uuid
int64
541B
3,299B
dataset
stringclasses
1 value
text
stringlengths
1
4.29M
1,108,101,565,815
arxiv
\subsection{Progress-Insensitive} \label{app:pits} We need a collection of lemmas on the type system, which we prove correct in \paperTR{the extended version of our paper.} {appendix~\ref{app:auxlemmas}.} \begin{lemma}[Preserving Dependencies] \label{app:lemma:paper:presdep} The dependencies of program points only increase with sequential composition. That is, if $|- \{ c_1 \} \Gamma_1$ and $|- \{c_1;c_2\} \Gamma$ then $\Gamma_1(a_p) \subseteq \Gamma(a_p)$ for all $a_p$. \end{lemma} \begin{lemma}[Non-Branching Reduction] \label{app:lemma:paper:nbr} For all configurations $c$ and stores $\sigma, \rho$ such that $|- \{c\} \Gamma$ with $<c,\sigma> \overarrow{\alpha}{} <c',\sigma'>$ and $<c,\rho> \overarrow{\beta}{} <c',\rho'>$, i.e.\ to the same command $c'$, where $\alpha, \beta$ is either an output or $\epsilon$. Then $\sigma \Gequiv{a_p} \rho$ implies that $|- \{c'\} \Gamma'$ and $\sigma' \NGequiv{\Gamma'}{a_p} \rho'$. \end{lemma} \begin{lemma}[Branching Reduction] \label{app:lemma:paper:br} For all configurations $c$ and stores $\sigma, \rho$such that $|- \{c\} \Gamma$ with $<c,\sigma> \overarrow{\epsilon}{} <c'_\sigma,\sigma'>$ and $<c,\rho> \overarrow{\epsilon}{} <c'_\rho,\rho'>$, where $c'_\sigma \mathop{\not=} c'_\rho$. For all $a_p$ such that $<c'_\sigma,\sigma'> \overarrowA{t(a,v,p)}{*}$, it holds that if $\sigma \Gequiv{a_p} \rho$ then either: \begin{itemize} \item There exists a joining command $c_j$ with $t_1 \mathop{\preceq} t$, such that $<c'_\sigma,\sigma'> \overarrowA{t_1}{*} <c_j,\sigma_j>$ and $<c'_\rho,\rho'> \overarrowA{t'_1}{*} <c_j,\rho_j>$ with $\length{t_1} \mathop{=} \length{t'_1}$ and $|- \{c_j\} \Gamma_j$ with $\sigma_j \NGequiv{\Gamma_j}{a_p} \rho_j$. I.e. both executions join again at equal command $c_j$ after an equal number of outputs with equivalent dependencies for $a_p$, or \item For all $t'$ such that $<c,\rho> \overarrowA{t'}{*}$, $\length{t} \geq \length{t'}$. \end{itemize} \end{lemma} \noindent We restate the semantic soundness property for the progress-insensitive type sytem: \begin{align} \label{app:eq:pits:ih} \begin{minipage}{24em} For all $n \geq 0$, for all commands $c$ with typing $|- \{c\} \Gamma$, we have for all stores $\sigma, \rho$, if $<c,\sigma> \overarrowA{t}{n} <c_{\sigma},\sigma'> \overarrow{(a,v,p)}{}$ and $<c,\rho> \overarrowA{t'}{*} <c_{\rho},\rho'> \overarrowA{v'}{}$ and $\length{t} = \length{t'}$ then $\sigma \Gequiv{a_p} \rho \Rightarrow v \mathop{=} v'$. \end{minipage} \end{align} \noindent We show this by complete (strong) induction on $n$. \paragraph{$(n \mathop{=} 0)$} When $c$ produces an output in a single step for some store, it does so for any store and at the same program point (Fig.~\ref{fig:semantics}). We need to show $$ <c,\sigma> \overarrow{(a,v,p)}{} \and \sigma \Gequiv{a_p} \rho \and <c,\rho> \overarrow{(a,v',p)}{} ==> v = v' $$ By induction on the step taken from $<c,\sigma>$. Due to the output, there are only two cases that do not lead to immediate contradiction: \begin{itemize} \item ${\tt out}\ e\ {\tt on}\ a\ {\tt @}\ p$ -- By \ruleName{TS-Output}, ${\it fv}(e) \subseteq \Gamma(l,p)$. Therefore, $\sigma(e) = \rho(e)$ and thus $v = v'$. \item $c_1; c_2$ -- By Lemma~\ref{app:lemma:paper:presdep}, $\Gamma_1(l,p) \subseteq \Gamma(l,p)$ for $|- \{c_1\}\Gamma_1$. Therefore $\sigma \NGequiv{\Gamma_1}{a_p} \rho$ and the property follows by induction on $<c_1,\sigma>$. \end{itemize} \paragraph{\mathligsoff$(n + 1 > 0)$\mathligson} We have $<c,\sigma> \overarrowA{\alpha}{} <c'_\sigma,\sigma'> \overarrowA{t}{n} <c''_{\sigma},\sigma''> \overarrow{(a,v,p)}{}$ and $<c,\rho> \overarrowA{\beta}{} <c'_\rho,\rho'>$ $\overarrowA{t'}{*} < c''_{\rho},\rho''> \overarrowA{v'}{}$ with the induction hypothesis \eqref{app:eq:pits:ih} for evaluations of length $\leq n$. Here $\alpha$ could be either silent or an output. We only consider the case where $\alpha$ is silent; for the case where $\alpha$ is an output the proof is similar except in the induction step we have that traces produced by $\sigma'$ and $\rho'$ are both 1 shorter in length. When $c$ produces no output in a single step for some store, it does so for any store (Fig.~\ref{fig:semantics}). We case split on $c'_\sigma = c'_\rho$: \hfill\begin{minipage}{\dimexpr\columnwidth-3em} \begin{itemize} \item[$c'_\sigma = c'_\rho$] By Lemma~$\ref{app:lemma:paper:nbr}$, $|- \{c'_\sigma\} \Gamma'$ and $\sigma' \NGequiv{\Gamma'}{a_p} \rho'$. The case follows by induction on $n$. \item[$c'_\sigma \not= c'_\rho$] Since $<c,\rho>$ produces a trace longer than $\length{t}$, by Lemma~$\ref{app:lemma:paper:br}$ there exists a $t_1 \mathop{\preceq} t$ such that $<c'_\sigma,\sigma'> \overarrowA{t_1}{n_1} <c_j,\sigma_j> \overarrowA{t_2}{n_2} <c''_{\sigma},\sigma''> \overarrow{(a,v,p)}{}$ and there is a $t'_1 \mathop{\preceq} t'$ such that $<c'_\rho,\rho'> \overarrowA{t'_1}{*} <c_j,\rho_j> \overarrowA{t'_2v'}{*}$ with $\length{t'_1} = \length{t_1}$ and $|- \{c_j\} \Gamma_j$ with $\sigma_j \NGequiv{\Gamma_j}{a_p} \rho_j$. Since $\length{t} = \length{t'}$ and $\length{t_1} = \length{t'_1}$ we have $\length{t_2} = \length{t'_2}$. Then the case follows by induction on some $n_2 \leq n$. \end{itemize} \xdef\tpd{\the\prevdepth} \end{minipage} \subsection{Dynamic policies} \label{subsec:intro:dynpol} Since the pioneering work of Denning and Denning \cite{Denning:Denning:Certification}, a wide variety of infor{-}mation-flow policies and corresponding enforcment mechanisms have been proposed. Much recent work on information-flow properties goes beyond the static, multi-level security policies of earlier work, considering instead more sophisticated, dynamic forms of policy which permit different flows at different points during the excecution of a program. Indeed, this shift of focus better reflects real-world requirements for security policies which are naturally dynamic. \begin{wrapfigure}{r}{0.22\textwidth} \vspace{-1em} \begin{lstlisting} // /*@$ {\tt x} \rightarrow a $@*/; out x on /*@$a$@*/; // /*@$ {\tt x} \not\rightarrow a $@*/; out 2 on /*@$a$@*/; \end{lstlisting} \caption{} \vspace{-2.5em} \label{fig:depex1} \end{wrapfigure} For example, consider a request for sensitive employee information made to an employer by a regulatory authority. In order to satisfy this request it may be necessary to temporarily allow the sensitive information to flow to a specific user in the Human Resources department. In simplified form, the essence of this example is captured in Figure~\ref{fig:depex1}. Here ${\tt x}$ contains the sensitive information, channel $a$ represents the HR user, and the policy is expressed by the annotations $ {\tt x} \rightarrow a $ (${\tt x}$ \emph{may} flow to $a$) and $ {\tt x} \not\rightarrow a $ (${\tt x}$ \emph{must not} flow to $a$). It is intuitively clear that this program complies with the policy. Consider two slightly more subtle examples, in each of which revocation of a permitted flow depends on run-time data: \begin{wrapfigure}{l}{0.55\textwidth} \vspace{-1em} \begin{lstlisting}[numbers=left] /*Program A*/ /*Program B*/ // /*@$ {\tt x, y} \rightarrow a $@*/; // /*@$ {\tt x} \rightarrow a $@*/; out x on /*@$a$@*/; out x on /*@$a$@*/; if (y > 0) { if (x > 0) { out 1 on /*@$a$@*/; out 1 on /*@$a$@*/; // /*@$ {\tt x} \not\rightarrow a $@*/; /*@$\ \!$@*/ // /*@$ {\tt x} \not\rightarrow a $@*/; } } out 2 on /*@$a$@*/; out 2 on /*@$a$@*/; out 3 on /*@$a$@*/; out 3 on /*@$a$@*/; \end{lstlisting} \end{wrapfigure} In program A, the revocation of ${\tt x} \rightarrow a $ is controlled by the value of ${\tt y}$, whereas in program B it is controlled by the value of ${\tt x}$ itself. Note that the policy for A explicitly allows ${\tt y} \rightarrow a $ so the conditional output (which reveals information about ${\tt y}$) appears to be permissible. In program B the conditional output reveals information about ${\tt x}$ itself, but this happens \emph{before} the revocation. So should program B be regarded as compliant? We argue that it should not, as follows. Consider ``the third output'' of program B as observed on channel $a$. Depending on the initial value of \texttt{x}, the observed value may be either 2 (line 8) or 3 (line 9). Thus this observation reveals information about ${\tt x}$ and, in the cases where revocation occurs, the observation happens \emph{after} the revocation. Unsurprisingly, increasing the sophistication of policies also increases the challenge of formulating good semantic definitions, which is to say, definitions which both match our intuitions about what the policies mean and can form the basis of formal reasoning about correctness. At first sight it might seem that increasing semantic sophistication should also require increasingly intricate enforcement mechanisms. However, all such mechanisms must somehow solve the same two distinct problems: \begin{enumerate} \item Determine what data dependencies exist between the various data sources and sinks manipulated by the program. \item Determine whether those dependencies are consistent with the flows permitted by the policy. \end{enumerate} Ideally, the first of these problems would be solved independently of the second, since dependencies are a property of the code, not the policy. This would allow reuse at two levels: a) reuse of the same dependency analysis mechanisms and proof techniques for different \emph{types} of policy; b) reuse of the dependency properties for a given program across verification of multiple \emph{alternative} policies (whether of the same type or not). In practice, enforcement mechanisms are typically not presented in a way which cleanly separates the two concerns. Not only does this hamper the reuse of analysis mechanisms and proof techniques, it also makes it harder to identify the \emph{essential} differences between different approaches. \paragraph{Central Contribution} We take a well-understood dependency type system for a simple while-language, originally designed to support enforcement of static policies, and extend it in a straightforward way to a language with output channels (\S~\ref{sec:typesystem}). We demonstrate the advantages of a clean separation between dependency analysis and policy enforcement, by establishing a generic soundness result (\S~\ref{sec:semsoundness}) for the type system which characterises the meaning of types as dependency properties. We then show how the dependency information derived by the type system can be used to verify compliance with dynamic policies. Note that this means that the core analysis for enforcement can be done even before the policy is known: we dub this \emph{very static} enforcement. More significantly, it opens the way to reuse of dependency analyses across verification of multiple types of information flow policy (for example, it might be possible to use the dependency analyses performed by advanced slicing tools such as Joanna and Indus). \paragraph{Foundations of Dynamic Flow Policies} Although it was not our original aim and focus, we also make some contributions of a more foundational nature, and our paper opens with these (\S\ref{sec:policymodel}--\S\ref{sec:secproperty}). The semantic definition of security which we use is based on work of Askarov and Chong \cite{askarov2012}, and we begin with their abstract formulation of dynamic policies (\S\ref{sec:policymodel}). In defining security for dynamic policies, they made a convincing case for using a family of attackers of various strengths, following an observation that the intuitively strongest attacker (who never forgets anything that has been observed) actually places weaker security demands on the system than we would want. On the other hand they observe that the family of \emph{all} attackers contains pathological attacker behaviours which one certainly does not wish to consider. Due to this they do not give a characterisation of the set of all \emph{reasonable} attackers against which one should protect. We make the following two foundational contributions: \paragraph{Foundational Contribution 1} We focus (\S\ref{subsec:pi}) on the pragmatic case of \emph{progress insensitive} security (where slow information leakage is allowed through observation of computational progress \cite{Askarov+:ESORICS08}). We argue for a new definition of progress insensitive security (Def \ref{def:pikbsec}), which unconditionally grants all attackers knowledge of computational progress. With this modification to the definition from \cite{askarov2012}, the problematic examples of pathological attackers are eliminated, and we have a more complete definition of security. Consequently, we are able to prove security in the central contribution of the paper \emph{for all attackers}. \paragraph{Foundational Contribution 2} The definitions of security are based on characterising attacker knowledge and how it changes over time relative to the changing policy. As argued previously e.g., \cite{Broberg:Sands:PLAS09}, this style of definition forms a much more intuitive basis for a semantics of dynamic policies than using two-run characterisations. However, two-run formulations have the advantage of being easier to use in proofs. We show (\S\ref{sec:secproperty}) that our new knowledge-based progress-insensitive security definition enjoys a simple two-run characterisation. We make good use of this in our proof of correctness of our central contribution. \subsection{Change in Knowledge} Firstly, recognising that policy changes should not apply retrospecively, we can relax (\ref{eqn:K-bound}) to constrain only how an attacker's knowledge should be allowed to \emph{increase}, rather than its absolute value. The increase in attacker knowledge going from $t$ to $t\cdot v$ is given by the set difference ${\ek(t\cdot v)} - {\ek(t)}$. So, instead of (\ref{eqn:K-bound}), we require: \begin{equation} \label{eqn:K-increase-bound} {\ek(t\cdot v)} - {\ek(t)} \subseteq \compeclass{\sigma}{\equiv} \end{equation} where $\equiv$ is the policy in effect immediately before the output $v$. (Some minor set-theoretic rearrangement gives the equivalent \[ k(t\cdot v) \supseteq k(t) \cap \eclass{\sigma}{\equiv} \] which is the form of the original presentation in \cite{askarov2012}.) \subsection{Forgetful attackers} \label{sec:knowledgebased:forgetful} Focussing on change in knowledge addresses the problem of retrospective revocation but it creates a new issue. Consider the following example. \begin{wrapfigure}{r}{0.28\textwidth} \begin{lstlisting} /*@$ {\tt x} \rightarrow a $@*/; out x on a; /*@$ {\tt x} \not\rightarrow a $@*/; while (true) out x on a; \end{lstlisting} \caption{} \vspace{-1em} \label{fig:kbex1} \end{wrapfigure} \begin{example} \label{ex:gradualrelease} The program in Figure~\ref{fig:kbex1} produces the same output many times, but only the first output is permitted by the policy. Assume that the value of {\tt x} is $5$. Before the first output, the knowledge set of an observer on channel $a$ contains every possible store. After the first output the observer's knowledge set is reduced to include only those stores $\sigma$ where $\sigma({\tt x}) = 5$. This is allowed by the policy at that point. By the time the second output occurs, the policy prohibits any further flow from {\tt x}. However, since the attacker's knowledge set \emph{already} includes complete knowledge of {\tt x}, the second output does not actually change the attacker's knowledge at all, so (\ref{eqn:K-increase-bound}) is satisfied (since $k(t\cdot v) = k(t)$). Thus a policy semantics based on (\ref{eqn:K-increase-bound}) would accept this program even though it continues to leak the value of {\tt x} \emph{long} after the flow has been revoked. \end{example} Askarov and Chong address this by revisiting the assumption that an attacker's knowledge is necessarily determined by the simple function of traces (\ref{eqn:K-proto}) above. Consider an attacker which \emph{forgets} the value of the first output in example~\ref{ex:gradualrelease}. For this attacker, the second output would come as a revalation, revealing the value of {\tt x} all over again, in violation of the policy. Askarov and Chong thus arrive at the intriguing observation that security against a more powerful attacker, one who remembers everything that happens, does not imply security against a less resourceful attacker, who might forget parts of the observations made. Forgetful attackers are modelled as deterministic automata. \begin{definition}[Forgetful Attacker $\rhd$ \S\ III.A~\cite{askarov2012}] A forgetful attacker is a tuple $A\mathop{=}(S_A,s_0,\delta_A)$ where $S_A$ is the set of attacker states; $s_0 \in S_A$ is the initial state; and $\delta_A : S_A \times {\it Val} \rightarrow S_A$ the (deterministic) transition function describing how the attacker's state changes due to the values that the attacker observes. \end{definition} We write $A(t)$ for the attacker's state after observing trace $t$: \begin{align*} A(\epsilon) =&\ s_0 \\ A(t \cdot v) =&\ \delta_A(A(t),v) \end{align*} A forgetful attacker's knowledge after trace $t$ is defined as the set of all initial stores that produce a trace which would result in the same attacker state $A(t)$: \begin{definition}[Forgetful Attacker Knowledge $\rhd$ \S\ III.A~\cite{askarov2012}] $$k(A,c,a,t) = \{ \rho \mid <c,\rho> \overarrowA{t'}{} \wedge A(t') = A(t)\} $$ \end{definition} (Note that, in preparation for the formal definition of the security condition, program $c$ and channel $a$ now appear as explicit parameters.) The proposed security condition is still essentially as given by (\ref{eqn:K-increase-bound}), but now relative to a specific choice of attacker. Stated in the notation and style of the current paper, the formal definition is as follows. \begin{definition}[Knowledge-Based Security $\rhd$ Def. 1~\cite{askarov2012}] \label{def:pskbsec} Command $c$ is secure for policy $D$ against an attacker $A$ on channel $a$ for initial store $\sigma$ if for all traces $t$ and values $v$ such that $<c,\sigma>\overarrowA{t}{n}<c',\sigma'>\overarrowA{v}{1}$ we have $$ {\ek(A,c,a,t\cdot v)} - {\ek(A,c,a,t)} \subseteq \compeclass{\sigma}{\equiv} $$ where ${\equiv} = D_a(c,\sigma,n)$. \end{definition} Having relativized security to the power of an attacker's memory, it is natural to consider the strong notion of security that would be obtained by requiring Def.~\ref{def:pskbsec} to hold for all choices of $A$. However, as shown in \cite{askarov2012}, this exposes a problem with the model: there are attackers for which even well-behaved programs are insecure according to Def.~\ref{def:pskbsec}. \begin{example} Consider again the first example from the Introduction (Section~\ref{subsec:intro:dynpol}). Here, for simplicity, we assume that the variable {\tt x} is boolean, taking value 0 or 1. \begin{figure}[h] \centering \begin{subfigure}[b]{0.3\textwidth} \centering \begin{lstlisting} // /*@$ {\tt x} \rightarrow a $@*/ out x on /*@$a$@*/; // /*@$ {\tt x} \not\rightarrow a $@*/ out 2 on /*@$a$@*/; \end{lstlisting} \end{subfigure} \begin{subfigure}[b]{0.5\textwidth} \centering \begin{tikzpicture}[->,auto,node distance=2cm,semithick] \tikzstyle{every state}=[fill=none,draw=black,text=black] \node[initial,state] (A) {$q_0$}; \node[state] (B) [above right=0cm and 1cm of A] {$q_1$}; \node[state] (C) [below right=0cm and 1cm of A] {$q_2$}; \path (A) edge [loop above] node {$0$} (A) edge [above, pos=0.4] node {$1$} (B) edge [below, pos=0.4] node {$2$} (C) (B) edge [loop right] node {$2$} (B); \end{tikzpicture} \end{subfigure} \end{figure} It is intuitively clear that this program complies with the policy. However, as observed in \cite{askarov2012}, if we instantiate Def.~\ref{def:pskbsec} with the forgetful attacker displayed, the attacker's knowledge increases with the second output when ${\tt x} \mathop{=} 0$. After observing the value $0$, the attacker's state is $A(0)\mathop{=}q_0$. Since $A(\epsilon) \mathop{=} q_0$, the knowledge set still holds every store possible. After the second observation, only stores where ${\tt x} \mathop{=} 0$ could have led to state $q_2$, so the knowledge set shrinks (ie, the attacker's knowledge increases) at a point where the policy does not allow it. \end{example} This example poses a question which (so far as we are aware) remains unanswered: if we base a dynamic policy semantics on Def.\ref{def:pskbsec}, for \emph{which set} of attackers should we require it to hold? In the next section we define a progress-insensitive variant of Def.\ref{def:pskbsec}. For this variant it seems that security against all attackers \emph{is} a reasonable requirement and in Section~\ref{sec:semsoundness} we show that progress-insensitive security against all attackers is indeed enforced by our type system. \subsection{Progress Insensitive Security} \label{subsec:pi} Since \cite{Volpano:Smith:Irvine:Sound}, work on the formalisation and enforcement of information-flow policies has generally distinguished between two flavours of security: \emph{termination-sensitive} and \emph{termination-insensitive}. Termination-sensitive properties guarantee that protected information is neither revealed by its influence on input-output behaviour nor by its influence on termination behaviour. Termination-insensitive properties allow the latter flows and thus provide weaker guarantees. For systems with incremental output (as opposed to batch-processing systems) it is more appropriate to distinguish between \emph{progress-sensitive} and \emph{progress-insensitive} security. Progress-insensitive security ignores progress-flows, where a flow is regarded as a progress-flow if the information that it reveals can be inferred solely by observing \emph{how many} outputs the system produces. Two examples of programs with progress-flows are as follows: \begin{example} \label{ex:progressleaks} Programs containing progress-flows: \begin{lstlisting} // Program A // Program B out 1 on a; out 1 on a; while (x == 8) skip; if (x != 8) out 2 on a; out 2 on a; \end{lstlisting} Let $\sigma$ and $\rho$ differ only on the value of \texttt{x}: $\sigma({\tt x}) = 4$ and $\rho({\tt x}) = 8$. Note that, if started in $\sigma$, both programs produce a trace of length 2 (namely, the trace $1\mathop{\cdot}2$) whereas, if started in $\rho$, the maximum trace length is 1. Thus, for both programs, observing just the length of the trace produced can reveal information about \texttt{x}. Note that, since termination is not an observable event in the semantics, A and B are actually observably equivalent; we give the two variants to emphasise that progress-flows may occur even in the absence of loops. \end{example} In practice, most enforcement mechanisms only enforce progress-insensitive security. This is a pragmatic choice since (a) it is hard to enforce progress-sensitive security without being overly restrictive (typically, all programs which loop on protected data will be rejected), and (b) programs which leak solely via progress-flows, leak slowly \cite{Askarov+:ESORICS08}. Recall that Knowledge-Based Security (Def.~\ref{def:pskbsec}) places a bound on the increase in an attacker's knowledge which is allowed to arise from observation of the next output event. Askarov and Chong show how this can be weakened in a natural way to provide a progress-insensitive property, by artificially strengthening the supposed previous knowledge to already include progress knowledge. Their definition of progress knowledge is as follows: \begin{definition}[AC Progress Knowledge $\rhd$ \S\ III.A~\cite{askarov2012}] $$k^{+}(A,c,a,t) = \{ \rho \mid <c,\rho> \overarrowA{t'\cdot v}{} \wedge A(t') = A(t) \} $$ \end{definition} Substituting this (actually, its complement) in the ``previous knowledge'' position in Def.~\ref{def:pskbsec} provides Askarov and Chong's notion of progress-insensitive security: \begin{definition}[AC Progress-Insensitive (ACPI) Security $\rhd$ Def. 2~\cite{askarov2012}] \label{def:akpisec} Command $c$ is AC Progress-Insensitive secure for policy $D$ against an attacker $A$ on channel $a$ for initial store $\sigma$ if for all traces $t$ and values $v$ such that $<c,\sigma>\overarrowA{t}{n}<c',\sigma'>\overarrowA{v}{1}$ we have $$ {\ek(A,c,a,t\cdot v)} - {\ek^{+}(A,c,a,t)} \subseteq \compeclass{\sigma}{\equiv} $$ where ${\equiv} = D_a(c,\sigma,n)$. \end{definition} Now consider again programs A and B above. These are examples of programs where the \emph{only} flows are progress-flows. In general, we say that a program is \emph{quasi-constant} if there is some fixed (possibly infinite) trace $t$ such that every trace produced by the program is a prefix of $t$, regardless of the choice of initial store. Thus, for a quasi-constant program, the only possible observable variation in observed behaviour is trace length, so all flows are progress-flows. Since PI security is intended explicitly to allow progress-flows, we should expect all quasi-constant programs to satisfy PI security, regardless of the choice of policy and for all possible attackers. But, for Def.~\ref{def:akpisec}, this fails to hold, as shown by the following counterexample. \begin{example} \label{ex:piflawed} Consider the program and attacker below. The attacker is a very simple bounded-memory attacker which remembers just the last output seen and nothing else (not even whether it has seen any previous outputs). \begin{figure}[h] \centering \begin{subfigure}[b]{0.3\textwidth} \centering \begin{lstlisting} // /*@${\tt x} \not\rightarrow a$@*/ out 1 on /*@$a$@*/; out 1 on /*@$a$@*/; while (x) skip; out 1 on /*@$a$@*/; out 2 on /*@$a$@*/; \end{lstlisting} \end{subfigure} \begin{subfigure}[b]{0.5\textwidth} \centering \begin{tikzpicture}[->,auto,node distance=2cm,semithick] \tikzstyle{every state}=[fill=none,draw=black,text=black] \node[initial,state] (A) {$q_0$}; \node[state] (B) [above right=0cm and 1cm of A] {$q_1$}; \node[state] (C) [below right=0cm and 1cm of A] {$q_2$}; \path (A) edge [above, pos=0.4] node {1} (B) edge [below, pos=0.4] node {2} (C) (B) edge [loop right] node {1} (B) edge node {2} (C) (C) edge [loop right] node {2} (C) edge node {1} (B); \end{tikzpicture} \end{subfigure} \end{figure} Clearly, the program is quasi-constant. However, it is \emph{not} ACPI secure for the given attacker. To see this, suppose that {\tt x} = 0 and consider the trace $t = 1 \cdot 1 \cdot 1$. The attacker has no knowledge at this point ($k(t)$ is the set of all stores) since it does not know whether it has seen one, two or three 1's. It is easily verified that $k^{+}(t)$ is also the set of all stores for this attacker (intuitively, giving this attacker progress knowledge in the form $k^{+}$ doesn't help it, since it still does not know which side of the loop has been reached). But $k(t\cdot 2)$ is \emph{not} the set of all stores, since in state $q_2$ the attacker is able to exclude all stores for which {\tt x} = 1, thus ACPI security is violated. \end{example} What has gone wrong here? The attacker itself seems reasonable. We argue that the real problem lies in the definition of $k^{+}(A,c,a,t)$. As defined, this is the knowledge that $A$ would have in state $A(t)$ if given just the additional information that $c$ can produce at least one more output. But this takes no account of any \emph{previous} progress knowledge which might have been forgotten by $A$. (Indeed, the above attacker forgets nearly all such previous progress knowledge.) As a consequence, the resulting definition of PI security mistakenly treats some increases in knowledge as significant, even though they arise purely because the attacker has forgotten previously available progress knowledge. Our solution will be to re-define progress knowledge to include what the attacker would know \emph{if it had been keeping count}. To this end, for any attacker $A = (S,s_0,\delta)$ we define a counting variant $A^{\omega} = (S^{\omega},s^{\omega}_0,\delta^{\omega})$, such that $S^{\omega} \subseteq S \times N$, $s^{\omega}_0 = (s_0, 0)$ and $\delta^{\omega}((s,n),v) = (\delta(s,v), n+1)$. In general, $A^\omega$ will be at least as strong an attacker as $A$: \begin{lemma} \label{lemma:omegastrengthens} For all $A$, $c$, $a$, $t$: \begin{enumerate} \item $k(A^\omega,c,a,t) \subseteq k(A,c,a,t)$ \item $ek(A,c,a,t) \subseteq ek(A^\omega,c,a,t)$ \end{enumerate} \end{lemma} \begin{proof} It is is easily seen that $A^\omega(t) = (q,n) \Rightarrow A(t) = q$. Thus $A^\omega(t') = A^\omega(t) \Rightarrow A(t') = A(t)$, which establishes part 1. Part 2 is just the contrapositive of part 1. \end{proof} Our alternative definition of progress knowledge is then: \begin{definition}[Full Progress Knowledge] $$k^{\#}(A,c,a,t) = \{ \rho \mid <c,\rho> \overarrowA{t'\cdot v}{} \wedge A^\omega(t') = A^\omega(t) \} $$ \end{definition} Our corresponding PI security property is: \begin{definition}[Progress-Insensitive (PI) Security] \label{def:pikbsec} Command $c$ is progress-insensitive secure for policy $D$ against an attacker $A$ on channel $a$ for initial store $\sigma$ if for all traces $t$ and values $v$ such that $<c,\sigma>\overarrowA{t}{n}<c',\sigma'>\overarrowA{v}{1}$ we have $$ {\ek(A,c,a,t\cdot v)} - {\ek^{\#}(A,c,a,t)} \subseteq \compeclass{\sigma}{\equiv} $$ where ${\equiv} = D_a(c,\sigma,n)$. \end{definition} This definition behaves as expected for quasi-constant programs: \begin{lemma} \label{lemma:progress-only} Let $c$ be a quasi-constant program. Then $c$ is PI secure for all policies $D$ against all attackers $A$ on all channels $a$ for all initial stores $\sigma$. \end{lemma} \begin{proof} It suffices to note that, from the definitions, if $t\cdot v$ is a possible trace for $c$ and $c$ is quasi-constant, then $k^{\#}(A,c,a,t) = k(A^\omega,c,a,t\cdot v)$. The result follows by Lemma~\ref{lemma:omegastrengthens}. \end{proof} As a final remark in this section, we note that there is a class of attackers for which ACPI and PI security coincide. Say that $A$ is \emph{counting} if it always remembers at least how many outputs it has observed. Formally: \begin{definition}[Counting Attacker] $A$ is counting if $A(t) = A(t') \Rightarrow \length{t} = \length{t'}$. \end{definition} Now say that attackers $A$ and $A'$ are isomorphic (written $A \cong A'$) if $A(t_1) = A(t_2) \Leftrightarrow A'(t_1) = A'(t_2)$ and note that none of the attacker-parametric security conditions distinguish between isomorphic attackers (in particular, knowledge sets are always equal for isomorphic attackers). It is easily verified that $A \cong A^{\omega}$ for all counting attackers. It is then immediate from the definitions that ACPI security and PI security coincide for counting attackers. \section{Introduction} \label{sec:dependency} \input{dependency} \section{The Dynamic Policy Model} \label{sec:policymodel} \input{policymodel} \section{Knowledge-Based Security Conditions} \label{sec:knowledgebased} \input{knowledgebased} \section{Progress-Insensitive Security as a Two-Run Property} \label{sec:secproperty} \input{secproperty} \section{A Dependency Type System} \label{sec:typesystem} \input{typesystem} \section{Semantic Soundness and Policy Compliance} \label{sec:semsoundness} \input{semsoundness} \section{Related Work} \label{sec:related} \input{related} \section{Conclusions} \label{sec:conclusions} \input{conclusions} \bibliographystyle{alpha} \subsection{Computation and Observation Model} \label{sec:computation} \paragraph{Computation Model} The computation model is given by a labelled transition system over \emph{configurations}. We write $<c,\sigma> \overarrow{\alpha}{} <c',\sigma'>$ means that configuration $<c,\sigma>$ evaluates in one step to configuration $<c', \sigma'>$ with label $\alpha$. Here $c$ is a \emph{command} and $\sigma \in \Sigma$ is a \emph{store}. In examples and when we instantiate this model the store will be a mapping from program variables to values. The label $\alpha$ records any output that happens during that step, and we have a distinguished label value $\epsilon$ to denote a silent step which produces no output. Every non-silent label $\alpha$ has an associated channel $\textsf{\small channel}(\alpha) \in {\it Chan}$ and a value $\textsf{\small value}(\alpha)$. Channels are ranged over by $a$ and values by $v$. We abbreviate a sequence of evaluation steps \[ <c_0,\sigma_0> \overarrow{\alpha_1}{} <c_1,\sigma_1> \overarrow{\alpha_2}{} \dots \overarrow{\alpha_n}{} <c_n,\sigma_n> \] as $<c_0,\sigma_0> \overarrow{}{n} <c_n,\sigma_n>$. We write $<c_0,\sigma_0> \overarrow{}{*} <c',\sigma'>$ if $<c_0,\sigma_0> \overarrow{}{n} <c',\sigma'>$ for some $n \mathop{\geq} 0$. We write the projection of a single step $<c,\sigma> \overarrow{\alpha}{} <c',\sigma'>$ to some channel~$a$ as $<c,\sigma> \overarrowA{\beta}{} <c',\sigma'>$ where $\beta \mathop{=} v$ if $\textsf{\small channel}(\alpha) = a$ and $\textsf{\small value}(\alpha) = v$, and $\beta \mathop{=} \epsilon$ otherwise, that is, when $\alpha$ is silent or an output on a channel different from $a$. We abbreviate a sequence of evaluation steps \[ <c_0,\sigma_0> \overarrowA{\beta_1}{} <c_1,\sigma_1> \overarrowA{\beta_2}{} \dots \overarrowA{\beta_n}{} <c_n,\sigma_n> \] as $<c_0,\sigma_0> \overarrowA{t}{n} <c_n,\sigma_n>$ where $t$ is the trace of values produced on channel $a$ with every silent $\epsilon$ filtered out. We write $<c_0,\sigma_0> \overarrowA{t}{*} <c',\sigma'>$ if $<c_0,\sigma_0> \overarrowA{t}{n} <c',\sigma'>$ for some $n \mathop{\geq} 0$. We use $\length{t}$ to denote the length of trace $t$ and $t_1 \preceq t_2$ to denote that trace~$t_1$ is a prefix of trace~$t_2$. \paragraph{Attacker's Observation Model} We follow the standard assumption that the command $c$ is known to the attacker. We assume a passive attacker which aims to extract information about an input store $\sigma$ by observing outputs. As in \cite{askarov2012}, the attacker is able only to observe a \emph{single} channel. A generalisation to multi-channel attackers (which would also allow colluding attackers to be modelled) is left for future work. \subsection{Dynamic Policies} \label{sec:dynpolicies} A flow policy specifies a limit on how much information an attacker may learn. A very general way to specify such a limit is as an equivalence relation on input stores. \begin{example} Consider a store with variables {\tt x} and {\tt y}. A simple policy might state that the attacker should only be able to learn the value of {\tt x}. It follows that all stores which agree on the value of {\tt x} should look the same to the attacker. This is expressed as the equivalence relation $\sigma \mathop{\equiv} \rho$ iff $\sigma({\tt x}) \mathop{=} \rho({\tt x})$. A more complicated policy might allow the attacker to learn the value of some arbitrary expression $e$ on the initial store, e.g.\ ${\tt x} \mathop{=} {\tt y}$. This is expressed as the equivalence relation $\sigma \mathop{\equiv} \rho$ iff $\sigma(e) \mathop{=} \rho(e)$. \end{example} \begin{definition} [Policy] A policy $P$ maps each channel to an equivalence relation $\equiv$ on stores. We write $P_a$ for the equivalence relation that $P$ defines for channel $a$. \end{definition} As defined, policies are static. A dynamic policy changes while the program is running and may dictate a different $P$ for each point in the execution. Here we assume that the policy changes \emph{synchronously} with the execution of the program. That is, the active policy can be deterministically derived from the execution history so far. \begin{definition} [Execution History] An execution history $\mathcal{H}$ of length $n$ is a transition sequence $<c_0,\sigma_0> \overarrow{\alpha_1}{} <c_1,\sigma_1> \overarrow{\alpha_2}{} \dots \overarrow{\alpha_n}{} <c_n,\sigma_n>$. \end{definition} \begin{definition} [Dynamic Policy] \label{def:dynamicpol} A dynamic policy $D$ maps every execution history $\mathcal{H}$ to a policy $D(\mathcal{H})$. We write $D_a(\mathcal{H})$ for the equivalence relation that is defined by $D(\mathcal{H})$ for channel $a$, that is to say, $D_a(\mathcal{H}) = P_a$ where $P = D(\mathcal{H})$. \end{definition} Most synchronous dynamic policy languages in the literature determine the current policy based solely on the store $\sigma_n$ in the final configuration of the execution history \cite{askarov2012,Paragon}. Definition~\ref{def:dynamicpol} allows in principle for more flexible notions of dynamic policies, as they can incorporate the full execution history to determine the policy at each stage of an execution (similar to the notion of conditional noninterference used by \cite{Goguen:Meseguer:Unwinding,Zhang2012}). However, our enforcement does assume that the dynamic policy can be statically approximated per program point, which arguably is only feasible for policies in the style of \cite{askarov2012,Paragon}. Such approximations can typically be improved by allowing the program to branch on policy-related queries. Since programs are deterministic, an execution history of length $n$ is uniquely determined by its initial configuration $<c_0,\sigma_0>$. We use this fact to simplify our definitions and proofs: \begin{definition} [Execution Point] An execution point is a triple $(c_0,\sigma_0,n)$ identifying the point in execution reached after $n$ evaluation steps starting from configuration $<c_0,\sigma_0>$. Such an execution point is considered well-defined iff there exists $<c_n,\sigma_n>$ such that $<c_0,\sigma_0> \overarrow{}{n} <c_n,\sigma_n>$. \end{definition} \begin{lemma} Each well-defined execution point $(c_0,\sigma_0,n)$ uniquely determines an execution history $\mathcal{H}(c_0,\sigma_0,n)$ of length $n$ starting in configuration $<c_0,\sigma_0>$. \end{lemma} In the rest of the paper we rely on this fact to justify a convenient abuse of notation, writing $D(c_0,\sigma_0,n)$ to mean $D(\mathcal{H}(c_0,\sigma_0,n))$. \subsection{Language} We instantiate the abstract computation model of Section~\ref{sec:computation} with a simple while-language with output channels, shown in Figure~\ref{fig:semantics}. We let ${\tt x} \in {\it PVar}$ range over program variables, $a \in {\it Chan}$ range over channels (as before) and $p \in {\it PPoint}$ range over program points. Here non-silent output labels have the form $(a,v,p)$, $\textsf{\small channel}(a,v,p) = a$, and $\textsf{\small value}(a,v,p) = v$. The language is similar to the one considered in~\cite{askarov2012}, except for the absence of input channels. Outputs have to be annotated with a program point $p$ to bridge between the dependency analysis and the policy analysis, described in Section~\ref{sec:semsoundness}. \begin{figure}[t] \begin{tabular}{lll} \text{Values} & $v ::=$ & $n$ \qquad\qquad \text{Expressions} $e ::=$ $v \mid {\tt x}$ \\ \text{Commands} & $c ::=$ & ${\tt skip} \mid c_1;c_2 \mid {\tt x} := e \mid {\tt if}\ e\ c_1\ c_2$ $\mid {\tt while}\ e\ c \mid {\tt out}\ e\ {\tt on}\ a\ {\tt @}\ p$ \end{tabular} \[ \inferrule{} {<{\tt skip};c,\sigma> \overarrow{\epsilon}{} <c,\sigma>} \qquad \inferrule{<c_1,\sigma> \overarrow{\alpha}{} <c'_1,\sigma'>} {<c_1;c_2,\sigma> \overarrow{\alpha}{} <c_1';c_2,\sigma'>} \qquad \inferrule{\sigma(e) = v} {<{\tt x} := e,\sigma> \overarrow{\epsilon}{} <{\tt skip},\sigma'>} \] \[ \inferrule{\sigma(e) = v} {<{\tt out}\ e\ {\tt on}\ a\ {\tt @}\ p,\sigma> \overarrow{(a,v,p)}{} <{\tt skip},\sigma'>} \qquad \inferrule{} {<{\tt while}\ e\ c,\sigma> \overarrow{\epsilon}{} <{\tt if}\ e\ (c; {\tt while}\ e\ c)\ {\tt skip}, \sigma>} \] \[ \inferrule{\sigma(e) \not= 0} {<{\tt if}\ e\ c_1\ c_2,\sigma> \overarrow{\epsilon}{} <c_1,\sigma>} \qquad \inferrule{\sigma(e) = 0} {<{\tt if}\ e\ c_1\ c_2,\sigma> \overarrow{\epsilon}{} <c_2,\sigma>} \] \caption{Language and semantics.} \label{fig:semantics} \end{figure} \subsection{Generic typing} Traditional type systems for information flow assume that all sensitive inputs to the system (here: program variables) are associated with a security level. Expressions in the command to be typed might combine information with different security levels. To ensure that all expression can be typed, the security levels are therefore required to form at least a join-semilattice, or in some cases a full lattice. The type system then ensures no information of a (combined) level $l_1$ can be written to a program variable with level $l_2$ unless $l_1 \sqsubseteq l_2$. The system FST from Hunt and Sands~\cite{Hunt:Sands:ESOP11} differs from these type systems in two ways. Firstly, it does not require intermediate assignments to respect the security lattice ordering. As an observer is assumed to only see the final state of the program, only the final value of a variable must not depend on any information which is forbidden by the lattice ordering. For example, suppose ${\it level}({\tt y}) \sqsubseteq {\it level}({\tt z}) \sqsubseteq {\it level}({\tt x})$ but ${\it level}({\tt x}) \not\sqsubseteq {\it level}({\tt z})$ and consider the first two assignments in the example from Fig.~\ref{fig:depex2}. \[ \verb!x = z + 1; z = x;! \] A traditional type system would label this command as insecure because of the assignment {\tt z = x} and the fact that ${\it level}({\tt x}) \not\sqsubseteq {\it level}({\tt z})$, even though the value of {\tt z} after this assignment does not depend on the initial value of {\tt x} at all. FST however is \emph{flow-sensitive} and allows the security label on {\tt x} to vary through the code. Secondly, and more significantly, by using the powerset of program variables as security lattice, FST provides a \emph{principal typing} from which all other possible typings can be inferred. Thus the typing by FST is generic: a command needs to be typed only once and can then be verified against any static information-flow policy. Since the ordering among labels is not relevant while deriving the typing, FST is also able to verify policies which are not presented in the shape of a security lattice, but any relational {\it `may-flow'} predicate between security labels can be verified. \subsection{Generic typing for dynamic policies} We now present an extended version of FST which includes an additional typing rule for outputs. All the original typing rules of FST remain unchanged. Intuitively, an output on a channel is like the final assignment to a variable in the original FST, that is, its value can be observed. Since types are sets of dependencies, we could simply type an output channel as the union of all dependencies resulting from all output statements for that channel. This would be sound but unduly imprecise: the only flows permitted would be those permitted by the policy \emph{at all times}, in effect requiring us to conservatively approximate each dynamic policy by a static one. But we can do better than this. The flow-sensitivity of FST means that a type derivation infers types at intermediate program points which will, in general, be different from the top-level type inferred for the program. These intermediate types are not relevant for variables, since their intermediate values are not observable. But the outputs on channels at intermediate points \emph{are} observable, and so intermediate channel types \emph{are} relevant. Therefore, for each channel we record in $\Gamma$ distinct dependency sets for each program point at which an output statement on that channel occurs. Of course, this is still a static approximation of runtime behaviour. While our simple examples of dynamic policies explicitly associate policy changes to program points, for real-world use more expressive dynamic policy languages may be needed. In Section~\ref{sec:dynpolicies} we formally define the semantics of a dynamic policy as an arbitrary function of a program's execution history, which provides a high degree of generality. However, in order to apply a typing to the verification of such a policy, it is first necessary to conservatively approximate the flows permitted by the policy at each program point of interest (Definition~\ref{def:policyapproximation}). Let $X$ be the dependency set for the channel-$a$ output statement at program point $p$. The meaning% \footnote{This is progress-insensitive dependency (see Section~\ref{sec:knowledgebased}). A progress-sensitive version can be defined in a similar way.} of $X$ is as follows: \begin{quote} Let $\sigma$ be a store such that execution starting in $\sigma$ arrives at $p$, producing the $i$'th output on $a$. Let $\rho$ be any store which agrees with $\sigma$ on all variables in $X$ and also eventually produces an $i$'th output on $a$ (not necessarily at the same program point). Then these two outputs will be equal. \end{quote} Two key aspects of our use of program points should be highlighted: \begin{enumerate} \item While the intended semantics of $X$ as outlined above does not require corresponding outputs on different runs to be produced at the same program point, the $X$ that is inferred by the type system \emph{does} guarantee this stronger property. Essentially this is because (in common with all similar analyses) the type system uses control-flow dependency as a conservative proxy for the semantic dependency property of interest. \item Our choice of program point to distinguish between different ouputs on the same channel is not arbitrary; it is essentially forced by the structure of the original type system. As noted, program point annotations simply allow us to record in the final typing exactly those intermediate dependency sets which are already inferred by the underlying flow-sensitive system. While it would be possible in principle to make even finer distinctions (for example, aiming for path-sensitivity rather than just flow-sensitivity) this would require fundamental changes to the type system. \end{enumerate} \begin{figure} \begin{align*} \ruleName{TS-Skip} &\quad \inferrule {} {\vdash \{ {\tt skip} \}\ {\Gamma_{\mathit{id}}}} \\ \ruleName{TS-Assign} &\quad \inferrule {} { \vdash \{{\tt x :=}\ e\}\ {\Gamma_{\mathit{id}}}\ [ {\tt x} \mapsto {\it fv}(e) \cup \{ {\tt pc} \} ]} \\ \ruleName{TS-Seq} &\quad\hspace{-0.3ex} \inferrule {\vdash \{ c_1 \} \Gamma_1 \\ \vdash \{ c_2 \} \Gamma_2} {\vdash \{c_1\ {\tt ;}\ c_2\}\ \Gamma_2 ; \Gamma_1} \end{align*} \vspace{-3ex} \begin{align*} \ruleName{TS-IfElse} & \\ \span\inferrule{\vdash \{ c_i \} \Gamma_i \\ \vdash \Gamma'_i = \Gamma_i ; {\Gamma_{\mathit{id}}}[{\tt pc} \mapsto \{{\tt pc}\} \cup {\it fv}(e)] \\ i = 1, 2 } {\vdash \{{\tt if}\ e\ c_1\ c_2 \}\ (\Gamma'_1 \cup \Gamma'_2) [ {\tt pc} \mapsto \{ {\tt pc} \} ]} \\ \ruleName{TS-While} & \\ \span\inferrule{ \vdash \{ c \} \Gamma_c \hspace{1cm} \Gamma_f = (\Gamma_c ; {\Gamma_{\mathit{id}}}[{\tt pc} \mapsto \{{\tt pc}\} \cup \mathit{fv}(e)])^{*} } {\vdash \{{\tt while}\ e\ c\}\ \Gamma_f\ [ {\tt pc} \mapsto \{ {\tt pc} \} ]} \\ \ruleName{TS-Output} & \\ \span\inferrule {} { \vdash \{{\tt out}\ e\ \mathtt{on}\ a\ \mathtt{@}\ p\} {\Gamma_{\mathit{id}}} [ a_p \mapsto {\it fv}(e) \cup \{{\tt pc}, a, a_p \} ; a \mapsto \{{\tt pc}, a\} ]} \end{align*} \caption{Type System.} \label{fig:typesystem} \end{figure} The resulting type system is shown in Figure~\ref{fig:typesystem}. We now proceed informally to motivate its rules. Definitions and proofs of formal soundness are presented in Section~\ref{sec:semsoundness}. The type system derives judgements of the form $|- \{c\} \Gamma$, where $\Gamma : {\it Var} \rightarrow 2^{{\it Var}}$ is an environment mapping variables to a set of dependencies. The variables we consider are ${\it Var} = {\it PVar} \cup {\it CPoint} \cup \{{\tt pc}\} \cup {\it Chan}$ with ${\it CPoint} = {\it Chan} \times {\it PPoint}$. We consider the relevance of each kind of variable in turn. \begin{itemize} \item As program variables ${\it PVar}$ form the inputs to the command, these are the dependencies of interest in the typing of a command. For program variables themselves, $\Gamma({\tt x})$ are the dependencies for which a different intial value might result in a different final value of {\tt x}. \item Pairs of channels and program points $(a,p) \in {\it CPoint}$ are denoted as $a_p$. The dependencies $\Gamma(a_p)$ are those program variables for which a difference in initial value might cause a difference in the value of any observation that can result from an output statement for channel $a$ with annotation $p$. \item Whenever the program counter ${\tt pc} \mathop{\in} \Gamma({\tt x})$ this indicates that this command potentially changes the value of program variable {\tt x}. Similar, if ${\tt pc} \mathop{\in} \Gamma(a)$ then $c$ might produce an output on channel $a$ and if ${\tt pc} \mathop{\in} \Gamma(a_p)$ then $c$ might produce an output on $a$ caused by a statement annotated with $p$. We use the program counter to catch implicit flows that may manifest in these ways. \item We use ${\it Chan}$ to capture the latent flows described in example program B in the introduction. The dependencies $\Gamma(a)$ are those program variables for which a difference in initial value might result in a different number of outputs produced on channel $a$ by this command. This approach to address latent flows was first introduced in~\cite{askarov2012} as \emph{channel countext bounds}. \end{itemize} We first explain the notation used in the unchanged rules from FST before turning our attention to the new \ruleName{TS-Output} rule. All concepts have been previously introduced in~\cite{Hunt:Sands:ESOP11}. The function ${\it fv}(e)$ returns the free variables in expression $e$. The identity environment ${\Gamma_{\mathit{id}}}$ maps each variable to the singleton set of itself, that is ${\Gamma_{\mathit{id}}}(x) \mathop{=} \{x\}$ for all $x \mathop{\in} {\it Var}$. Sequential composition of environments is defined as: \[\Gamma_2;\Gamma_1(x) = \bigcup_{y \in \Gamma_2(x)}\Gamma_1(y)\] Intuitively, $\Gamma_2;\Gamma_1$ is as $\Gamma_2$ but substituting the dependency relations already established in $\Gamma_1$. We overload the union operator for environments: $(\Gamma_1 \cup \Gamma_2)(x) = \Gamma_1(x) \cup \Gamma_2(x)$. We write $\Gamma^{*}$ for the fixed-point of $\Gamma$, used in \ruleName{TS-While}: \[ \Gamma^{*} = \bigcup_{n \geq 0} \Gamma^n \qquad \textnormal{ where } \Gamma^0 = {\Gamma_{\mathit{id}}} \textnormal{ and } \Gamma^{n+1} = \Gamma^n; \Gamma \] It is only in the typing \ruleName{TS-Output} of the output command that the additional channel and program point dependencies are mentioned; this underlines our statement that extending FST to target dynamic policies is straightforward. We explain the changes to ${\Gamma_{\mathit{id}}}$ in \ruleName{TS-Output} in turn. For $a_p$, clearly the value of the output and thus the observation is affected by the program variables occuring in the expression $e$. We also include the program counter ${\tt pc}$ to catch implicit flows; if we have a command of the form ${\tt if}\ e\ ({\tt out}\ 1\ {\tt on}\ a\ @\ p)\ ({\tt out}\ 2\ {\tt on}\ a\ @\ q)$ the value of the observation caused by output $a_p$ is affected by the branching decision, which is caught in \ruleName{TS-IfElse}. We include the channel context bounds $a$ for the channel on which this output occurs to capture the latent flows of earlier conditional outputs, as demonstrated in the introduction. Observe that by the definition of sequential composition of environments, we only add those dependencies for conditional outputs that happened \emph{before} this output. That is, not the ones that follow this output, since it cannot leak information about the absence of future observations. Finally, we include the dependencies of output point $a_p$ itself. By doing so the dependency set of $a_p$ becomes \emph{cumulative}: with every sequential composition (including those used in $\Gamma^{*}$) the dependency set of $a_p$ only grows, as opposed to the dependencies of program variables. This makes us sum the dependencies of all outputs on channel $a$ annotated with the same program point, as we argued earlier. The mapping for channel context bounds $a$ is motivated in a similar manner. The ${\tt pc}$ is included since the variables affecting whether this output occurs on channel $a$ are the same as those that affect whether this statement is reached. Note that we are over-approximating here, as the type system adds the dependencies of $e$ in \[ {\tt if}\ e\ ({\tt out}\ 1\ {\tt on}\ a\ @\ p_1)\ ({\tt out}\ 2\ {\tt on}\ a\ @\ p_2) \] to context bounds $a$, even though the number of outputs is always one. Like for $a_p$, we make $a$ depend on itself, thus accumulating all the dependencies that affect the number of outputs on channel $a$. As the \ruleName{TS-Output} rule does not introduce more complex operations than already present, the type system has the same complexity as FST. That is, the type system can be used to construct a generic type in $O(nv^3)$ where $n$ is the size of the program and $v$ the number of variables in {\it Var}.
1,108,101,565,816
arxiv
\subsection{Choosing the scaling functions~$g(t)$ and~$b(t)$} \label{sec:practical} It follows from~\cref{thm:main} that \textsc{A-GP-UCB} achieves no-regret for any functions~$g(t)$ and~$b(t)$ that increase without bound and render~\cref{eq:thm:regret} sublinear in~$t$. Thus, the corresponding BO routine converges to the optimal value eventually. For example,~$b(t)=g(t)=\log(t)$ satisfy this condition. However, the convergence guarantees in~\cref{thm:main} are only meaningful once $t$ has grown sufficiently so that the true function is contained in the confidence intervals. In practice, BO is often used with objective functions~$f$ that are expensive to evaluate, which imposes a hard constraint on the number of evaluations. For the regret bounds to be meaningful in this setting, we must choose functions~$g$ and~$b$ that grow fast enough to ensure that the constant regret period in~\cref{eq:thm:regret} is small, yet slow enough that the effect of the sublinear regret is visible for small enough~$t$. In the following, we propose two methods to choose~$g(t)$ and~$b(t)$ \emph{adaptively}, based on the observations seen so far. For convenience, we fix the relative magnitude of~$g(t)$ and~$b(t)$. In particular, we define $b(t) = 1 + \epsilon_b(t)$ and $g(t)^d = 1 + \epsilon_g(t)$ together with a weighting factor~$\lambda = \epsilon_b(t) / \epsilon_g(t)$ that encodes whether we prefer to scale up the norm bound using~$b(t)$ or decrease the lengthscales using~$g(t)$. This allows us to reason about the overall magnitude of the scaling~$h(t) = (1 + \epsilon_g(t))(1 + \epsilon_b(t)) \geq 1$, which can be uniquely decomposed into~$g(t)$ and~$b(t)$ given~$\lambda$. For~$\lambda=0$ we have~$g(t) = h(t)$, $b(t)=1$ and the algorithm prefers to attribute an increase in~$h(t)$ to~$g(t)$ and shorten the lengthscales, while for~$\lambda \to \infty$ the algorithm prefers to scale up the RKHS norm. The assumptions in Corollary~\ref{cor:concrete_regret_bounds} hold for any~$\lambda \in (0, \infty)$ if~$h(t)$ grows unbounded. Moreover, we have that~$g(t)^d \leq h(t)$ and~$b(t) \leq h(t)$. \paragraph{Reference regret} While any function~$h(t)$ that grows unbounded and renders the cumulative regret in~\cref{thm:main} sublinear makes our method to converge to the optimum eventually, we want to ensure that our method performs well in finite time too. For fixed hyperparameters with~$h(t)=1$, which implies $g(t)=b(t)=1$, our algorithm reduces to~\textsc{GP-UCB} with hyperparameters~$\theta_0$ and $B_0$ and the regret bound term~$\sqrt{C_1 \beta_t \gamma_t(\theta_0)}$ is sublinear, which is illustrated by the bottom curve in~\cref{fig:cumulative_regret}. However, this does not imply no-regret if hyperparameters are misspecified as in~\cref{fig:bo_example_1}, since the first term in~\cref{thm:main} is unbounded in this case. To avoid this, we must increase the scaling factor~$h(t)$ to consider larger function classes. We propose to define a sublinear reference regret~$p(t)$, see~\cref{fig:cumulative_regret}, and to scale~$h(t)$ to match an estimate of the regret with respect to the current hyperparameters to this reference. As \textsc{GP-UCB} converges, the regret estimate with respect to the current hyperparameters levels off and drops below the reference~$p(t)$. In these cases, we increase~$h(t)$ to consider larger function classes and explore further. The choice of~$p(t)$ thus directly specifies the amount of additional regret one is willing to incur for exploration. Specifically, given a regret estimate~$\bar{R}_t(h)$ that depends on the data collected so far and the selected scaling~$h$, we obtain~$h(t)$ from matching the reference, $\bar{R}_t(h) = p(t)$, as \begin{equation} h^*(t) = \bar{R}_t^{-1}(p(t)), \qquad h(t) = \max (h^*(t), \, h(t-1)) . \label{eq:h_opt_prob} \end{equation} Here we explicitly enforce that~$h(t)$ must be an increasing function. In the following, we consider estimators~$\bar{R}_{t}$ that are increasing functions of~$h$, so that~\cref{eq:h_opt_prob} can be solved efficiently via a line search. Whether choosing~$h(t)$ according to~\cref{eq:h_opt_prob} leads to a sublinear function depends on the regret estimator~$\bar{R}_t$. However, it is always possible to upper bound the~$h(t)$ obtained from~\cref{eq:h_opt_prob} by a fixed sublinear function. This guarantees sublinear regret eventually. In the following, we consider two estimators that upper bound the cumulative regret experienced so far with respect to the hyperparameters suggested by~$h(t)$. \paragraph{Regret bound} As a first estimator for the cumulative regret, we consider the regret bound on~$R_t$ in~\cref{eq:thm:regret}. We focus on the Gaussian kernel, but the arguments transfer directly to the case of the Mat\'ern kernel. The term~$\sqrt{C_1 t \,\beta_t\, I_{\theta_{t}}(\mb{y}_t; f) }$ bounds the regret with respect to the current function class specified by~$\theta_t$. In addition to the direct dependence on~$b(t)g(t)^d$ in~$\beta_t$, the regret bound also depends on~$g(t)$ implicitly through the mutual information~$I_{\theta_t}(\mb{y}_t; f)$, where $\theta_t = \theta_0 / g(t)$. To make the dependence on~$g(t)$ more explicit, we use~\cref{thm:gamma_t_lengthscale_bounds} and rewrite the mutual information as~$(g(t) / g(t-1))^d I_{\theta_{t-1}}(\mb{y}_t; f)$ instead. Note that the scaling factor was derived for~$\gamma_t$, but remains a good indicator of increase in mutual information in practice. With this replacement we use \begin{equation} \bar{R}_t(h) = \sqrt{C_1 t \,\beta_t\left(b(t), g(t)\right)\, g(t)^d I_{\theta_{t-1}}(\mb{y}_t; f) } \label{eq:gt_scaleinfo} \end{equation} to estimate the regret, where the term~$\beta_t(b, g)$ is as in~\cref{thm:main}, but with the mutual information similarly replaced with the explicit dependence on~$g(t)$. Solving~\cref{eq:h_opt_prob} together with~\cref{eq:gt_scaleinfo} is computationally efficient, since computing~$\bar{R}_t$ does not require inverting the kernel matrix. \added{ \paragraph{One step predictions} While~\cref{eq:gt_scaleinfo} is fast to compute, it requires us to know the dependence of~$\gamma_t(\theta_t)$ on~$h(t)$. Deriving analytic bounds can be infeasible for many kernels. As an alternative, we estimate the regret one-step ahead directly. In particular, if the considered function class is sufficiently large and our confidence intervals hold at all time steps~$t > 0$, then the one-step ahead cumulative regret~$R_{t+1}$ for our algorithm at iteration~$t$ is bounded from above by \begin{equation} \bar{R}_{t} = 2 \sum_{j=1}^{t} \beta_j^{1/2} \sigma_j(\mb{x}_{j + 1}), \label{eq:hyp_opt} \end{equation} where each~$\beta_t$ and~$\sigma_t$ is based on the corresponding hyperparameters~$\theta_t$. In \cref{thm:main}, $R_{t+1}$ is further upper-bounded by~\cref{eq:thm:regret}. The regret estimate in~\cref{eq:hyp_opt} depends on~$\mb{x}_{t+1}$, which is the next input that would be evaluated based on the UCB criterion with GP hyperparameters scaled according to~$h(t)$. As the hyperparameters for previous iterations are fixed, the only term that depends on~$h(t)$ is the bound on the instantaneous regret,~$r_t \leq 2 \beta_t \sigma_t(\mb{x}_{t+1})$. Unlike~\cref{eq:gt_scaleinfo}, \cref{eq:hyp_opt} is not able to exploit the known dependence of~$\gamma_t$ on~$h(t)$, so that it cannot reason about the long-term effects of changing~$h(t)$. This means that, empirically, the cumulative regret may overshoot the reference regret, only to settle below it later. Scaling~$h(t)$ according to~\cref{eq:hyp_opt} provides an interesting perspective on the method by~\citet{Wang2014Theoretical}. They decrease the kernel lengthscales whenever~$\sigma_t(\mb{x}_{t+1}) \leq \kappa$. In our framework, this corresponds to~$p(t) = \sum_{j=1}^t 2 \beta_j \max(\kappa, \sigma_j(\mb{x}_{j+1})) \geq \kappa t$, which is not sublinear. As a consequence, while they ultimately bound the cumulative regret using the smallest possible lengthscale, the choice for~$p(t)$ forces too much exploration to achieve sublinear regret before the lower bound is reached. In contrast, if we choose~$p(t)$ to be sublinear, then the function class grows slowly enough to ensure more careful exploration. This allows us to achieve sublinear regret in the case when a lower bound on the hyperparameters it not known. } \subsection{Practical Considerations and Discussion} \label{sec:exploration} \label{sec:discussion} In this section, we discuss additional practical considerations and show how to combine the theoretical results with online inference of the hyperparameters. \paragraph{Online inference and exploration strategies} The theoretical results presented in the previous sections extend to the case where the initial guess~$\theta_0$ of the GP's lengthscale is improved online using any estimator, e.g., with MAP estimation to obtain~$\theta_t^\mathrm{MAP}$. Theoretically, as long as the change in~$\theta_0$ is bounded, the cumulative regret increases by at most a constant factor. In practice, this bound can always be enforced by truncating the estimated hyperparameters. Moreover, the scaling induced by online inference can be considered to be part of~$g(t)$ according to~\cref{thm:rkhs_norm_change}, in which case the norm bound can be adapted accordingly. In practice, online inference improves performance drastically, as it is often difficult to specify an appropriate relative initial scaling of the lengthscales~$\theta_0$. In more than one dimension,~$d>1$, there are multiple ways that MAP estimation can be combined with the theoretical results of the paper. The simplest one is to enforce an upper bound on the lengthscales based on~$g(t)$, \begin{equation} \theta_t = \min(\theta_t^{\mathrm{MAP}},\, \theta_0 \,/\, g(t)), \label{eq:exploration_max} \end{equation} \added{where the min is taken elementwise}. This choice is similar to the one by~\citet{Wang2016Bayesian}. If all entries of~$\theta_0$ have the same magnitude, this scaling can be understood as encouraging additional exploration in the smoothest direction of the input space first. This often makes sense, since MAP estimates tend to assume functions that are too smooth, see~\cref{fig:mcmc_example}. However, it can be undesirable in the case when the true function only depends on a subset of the inputs. In these cases, the MAP estimate would correctly eliminate these inputs from the input space by assigning long lengthscales, but the scaling in~\cref{eq:exploration_max} would encourage additional exploration in these directions first. \added{Note that eventually exploring the entire input space is unavoidable to avoid getting stuck in local optima~\citep{Bull2011Convergence}.} An alternative approach is to instead scale down the MAP estimate directly, \begin{equation} \theta_t = \theta_t^{\mathrm{MAP}} \, / \, \max(g(t), \, 1). \label{eq:exploration_scale} \end{equation} This scaling can be understood as evenly encouraging additional exploration in all directions. While~\cref{eq:exploration_scale} also explores in directions that have been eliminated by the MAP estimate, unlike~\cref{eq:exploration_max} it simultaneously explores all directions relative to the MAP estimate. From a theoretical point of view, the choice of exploration strategy does not matter, as in the limit as~$t \to \infty$ all lengthscales approach zero. In the one-dimensional case, the two strategies are equivalent. Both strategies use the MAP lengthscales for BO in the nominal case, but the~$g(t)$ factor eventually scales down the lengthscales further. This ensures that our method only improves on the empirical performance of BO with MAP estimation. In practice, maximum likelihood estimates for the inputs are often good enough when the underlying function resembles a sample from a GP. Thus, the approach presented in this paper is most relevant when the underlying function has some `nonstationarity'. In the literature, other approaches to deal with nonstationarity have been proposed. For example,~\cite{Snoek2013Input} scale the input inputs through a beta function and infer its hyperparameters online. Our approach can easily be combined with any such method, as it works on top of any estimate provided by the underlying inference scheme. Moreover, in high-dimensional spaces one can combine our algorithm with methods to automatically identify a low-dimensional subspace of~$\mathcal{D}$~\citep{Djolonga2013HighDimensional,Wang2016Bayesian}. In this paper, we have considered the kernel to be fixed, and only adapted the lengthscales and norm bound. However, often the kernel structure itself is a critical hyperparameter~\citep{Duvenaud2011Additive}. The strategy presented in this paper could be used to add rougher kernels over time or, for example, to adapt the~$\nu$ input of the Mat\'ern kernel, which determines its roughness. \paragraph{Confidence intervals} Empirically, $\beta_t$ is often set to a constant rather than using the theoretical bounds in~\cref{thm:confidence_interval}, which leads to (point-wise) confidence intervals when~$f$ is sampled from a GP model. In particular, typically measurement data is standardized to be zero mean and unit variance and~$\beta_t$ is set to two or three. This often works well in practice, but does not provide any guarantees. However, if one were to believe the resulting confidence bounds, our method can be used to avoid getting stuck in local optima, too. In this case on can set~$h(t) = g(t)$ and apply our method as before. \paragraph{General discussion} Knowing how the sample complexity of the underlying BO algorithm depends on the lengthscales also has implications in practice. For example,~\cite{Wang2016Bayesian} and~\cite{Wabersich2016Advancing} suggest to scale down the lengthscales by a factor of~$2$ and roughly~$1.1$, respectively, although not at every iteration. As shown in~\cref{sec:theory}, this scales the regret bound by a factor of~$g^d$, which quickly grows with the number of dimensions. Exponentiating their factors with~$1/d$ is likely to make their approaches more robust when BO is used in high-dimensional input spaces~$\mathcal{D}$. Lastly, in a comparison of multiple BO algorithms (acquisition functions) on a robotic platform,~\cite{Calandra2014Experimental} conclude that the~\textsc{GP-UCB} algorithm shows the best empirical performance for their problem. They use the theoretical version of the algorithm by~\citet{Srinivas2012Gaussian}, in which~$\beta_t$ grows with an additional factor of~$\mathcal{O}(\sqrt{\log(t^2)})$ relative to~\cref{thm:confidence_interval}. In our framework with the bounds in~\cref{thm:confidence_interval}, this is equivalent to scaling up the initial guess for the RKHS norm bound for~$f$ by the same factor at every iteration, which increases the function class considered by the algorithm over time. We conjecture that this increase of the function class over time is probably responsible for pushing the MAP estimate of the lengthscales out of the local minima, which in turn led to better empirical performance. \section{Experiments} \label{sec:experiments} In this section, we evaluate our proposed method on several benchmark problems. \added{As baselines, we consider algorithms based on the \textsc{UCB} acquisition function.} We specify a strong gamma prior that encourages short lengthscales, and consider both maximum a posteriori (MAP) point-estimates of the hyperparameters and a Hamiltonian Monte Carlo (HMC) approach that samples from the posterior distribution of the hyperparameters and marginalizes them out. Unless otherwise specified, the initial lengthscales are set to~$\theta_0 = \mb{1}$, the initial norm bound is~$B_0=2$, the confidence bounds hold with probability at least~$\delta=0.9$, and the tradeoff factor between~$b(t)$ and~$g(t)$ is~$\lambda = 0.1$. We follow several best-practices in BO to ensure a fair comparison with the baselines. We rescale the input space~$\mathcal{D}$ to the unit hypercube in order to ensure that both the initial lengthscales and the prior over lengthscales are reasonable for different problems. As is common in practice, the comparison baselines use the empirical confidence intervals suggested in~\cref{sec:discussion}, instead of the theoretical bounds in~\cref{thm:confidence_interval} that are used for our method. Lastly, we initialize all GPs with~$2^d$ measurements that are collected uniformly at random within the domain~$\mathcal{D}$. To measure performance, we use the cumulative regret that has been the main focus of this paper. In addition, we evaluate the different methods in terms of simple regret, which is the regret of the best inputs evaluated so far, $\max_{x\in\mathcal{D}} f(x) - \max_{t' <= t} f(\mb{x}_{t'})$. This metric is relevant when costs during experiments do not matter and BO is only used to determine high-quality inputs by the end of the optimization procedure. \subsection{Synthetic Experiments} \paragraph{Example function} \begin{figure*}[t] \centering \subcaptionbox{Simple regret. \label{fig:bumplinear_simregret}} {\includegraphics{figures/bumplinear_simregret.pdf}} \subcaptionbox{Cumulative regret. \label{fig:bumplinear_cumregret}} {\includegraphics{figures/bumplinear_cumregret.pdf}} \subcaptionbox{Scaling~$g(t)$. \label{fig:bumplinear_funclass}} {\includegraphics{figures/bumplinear_functionclass.pdf}} \caption{Mean and standard deviation of the empirical simple and cumulative regret over ten different random initializations for the function in~\cref{fig:bo_example}. The HMC baseline (red) gets stuck in a local optimum and obtains constant regret in~\cref{fig:bumplinear_simregret}. \textsc{GP-UCB} with the true hyperparameters (gray dashed) obtains the lowest cumulative regret in~\cref{fig:bumplinear_cumregret}. However, our methods (orange/blue) increase the function class over time, see~\cref{fig:bumplinear_funclass}, and thus obtain sublinear regret without knowing the true hyperparameters.} \label{fig:bumplinear_results} \end{figure*} We first evaluate all proposed methods on the example function in~\cref{fig:bo_example}, which lives inside the~RKHS associated with a Gaussian kernel with~$\theta=0.1$ and has norm~$\|f\|_{k_\theta}=2$. We evaluate our proposed method for the sublinear reference function~$p(t) = t^{0.9}$ together with maximum a posteriori hyperparameter estimation. We compare against both \textsc{GP-UCB} with the fixed, correct hyperparameters and HMC hyperparameter estimation. Additionally, we consider a modified variant of the method suggested by~\cite{Wang2014Theoretical}, see~\cref{sec:practical}. Rather than scaling the lengthscales by a fixed constant, we conduct a line search to find the smallest possible scaling factor that renders~$\sigma_t(\mb{x}_{t+1}) \geq \kappa = 0.1$. This is the most conservative variant of the algorithm. Note that we do not know a lower bound on the hyperparameters and therefore do not enforce it. The results of the experiments are shown in~\cref{fig:bumplinear_results}. The simple regret plot in~\cref{fig:bumplinear_simregret} shows that all methods based on hyperparameter adaptation evaluate close-to-optimal inputs eventually, and do so almost as quickly as~\textsc{GP-UCB} based on the true hyperparameters (black, dashed). However, the method based on HMC hyperparameter estimation (red) considers functions that are too smooth and gets stuck in local optima, as in~\cref{fig:bo_example}. This can also be seen in~\cref{fig:bumplinear_funclass}, which plots the effective scaling~$g(t)$ based on the combination of Bayesian hyperparameter estimation and hyperparameter adaptation through~$h(t)$. The HMC hyperparameters consistenly over-estimate the lengthscales by a factor of roughly two. In contrast, while the MAP estimation leads to the wrong hyperparameters initially, the adaptation methods in~\cref{eq:gt_scaleinfo,eq:hyp_opt} slowly increase the function class until the true lengthscales are found eventually. It can be seen that the one step estimate~\cref{eq:hyp_opt} (orange) is more noisy than the upper bound in~\cref{eq:gt_scaleinfo} (blue). \begin{figure*}[t] \centering \subcaptionbox{Simple regret. \label{fig:sample_simregret}} {\includegraphics{figures/sample_simregret.pdf}} % \subcaptionbox{Cumulative regret. \label{fig:sample_cumregret}} {\includegraphics{figures/sample_cumregret.pdf}} % \caption{Simple and cumulative regret over 10 random seeds for samples from a GP with bounded RKHS norm. The \textsc{GP-UCB} algorithm with misspecified hyperparameters (magenta) fails to converge given only a wrong choice of~$B_0$. In contrast, our methods (blue/orange) converge even though~$\theta_0$ is misspecified in addition.} \label{fig:sample_results} \end{figure*} While all adaptation methods determine good inputs quickly according to the simple regret, they perform differently in terms of the cumulative regret in~\cref{fig:bumplinear_cumregret}. As expected, the HMC method (red line) converges to a local optimum and experiences constant regret increase equal to the simple regret at every time step. The modified method of~\cite{Wang2014Theoretical} (green line) expands the function class too aggressively and also achieves constant regret. Empirically, their method always explores and never repeatedly evaluates close-to-optimal inputs that would decrease cumulative regret. While the method works well in terms of simple regret, without a lower bound on the hyperparameters it never converges to sublinear regret. As expected from~\cref{thm:main}, GP-UCB based on the optimal hyperparameters achieves the lowest cumulative regret. Our two methods expand the function class over time, which allows them to converge to close-to-optimal inputs, even though MAP estimation estimates the hyperparameters wrongly initially. While the regret is sublinear, the additional exploration caused by~$g(t)$ means that the cumulative regret is larger. This is the additional cost we incur for not knowing the hyperparameters in advance. \paragraph{Samples from a GP} As a second experiment, we compare~\textsc{GP-UCB} to \textsc{A-GP-UCB} on samples drawn from a GP when the norm bound~$B_0$ is misspecified. Samples from a GP are not contained in the RKHS. To avoid this technical issue, we sample function values from the posterior GP at only a finite number of discrete gridpoints and interpolate between them using the kernel with the correct lengthscales~$\theta$. We rescale these functions to have~RKHS norm of~$B=4$, but use~$B_0=0.25$ as an initial guess for both BO algorithms and do not use any hyperparameter estimation. Even though we use the correct kernel lengthscales for \textsc{GP-UCB},~$\theta_0 = \theta = 0.1$, this discrepancy means that the true function is not contained in the initial confidence intervals. As before, for our method we use the reference regret~$p(t) = t^{0.9}$ and additionally misspecify the lengthscales,~$\theta_0 = 1$. The results are shown in~\cref{fig:sample_results}. \textsc{GP-UCB} with the correct hyperparameters (black, dashed) obtains the lowest cumulative regret. However, it fails to converge when hyperparameters are misspecified (magenta), since the confidence intervals are too small to encourage any exploration. In contrast, our methods (blue/orange) converge to close-to-optimal inputs as in the previous example. \subsection{Logistic Regression Experiment} \begin{figure*}[t] \centering \subcaptionbox{Simple regret. \label{fig:logistic_simregret}} {\includegraphics{figures/logistic_simregret.pdf}} % \subcaptionbox{Cumulative regret. \label{fig:logistic_cumregret}} {\includegraphics{figures/logistic_cumregret.pdf}} % \caption{Simple and cumulative regret over 5 random seeds for a logistic regression problem. All methods determine close-to-optimal parameters. However, our methods explore more to counteract misspecified hyperparameters.} \label{fig:logistic_results} \end{figure*} Lastly, we use our method to tune a logistic regression problem on the MNIST data set~\citep{Lecun1998MNIST}. As in the experiment in~\cite{Klein2016Bayesian}, we consider four training inputs: the learning rate, the~$l_2$ regularization constant, the batch size, and the dropout rate. We use the validation loss as the optimization objective. The results are shown in~\cref{fig:logistic_results}. Even though the input space is fairly high-dimensional with~$d=4$, all algorithms determine close-to-optimal inputs quickly. In particular, MAP estimation determines that both the dropout rate and the batch size do not influence the validation loss significantly. Since the theoretical results in~\textsc{A-GP-UCB} are compatible with MAP estimation, our approach achieves the same empirical performance, but has theoretical worst-case regret bounds. After convergence, the BO baselines repeatedly evaluate the same inputs, without gaining any new information. In contrast, our method continues to explore in order to potentially find better inputs. While it does not occur in this case, this allows us to be more confident that the global optimum has been identified as~$t$ increases. For standard BO methods, there is no guarantee of convergence with misspecified hyperparameters. \section{The Adaptive \textsc{GP-UCB}~Algorithm} \label{sec:theory} In this section, we extend the \textsc{GP-UCB} algorithm to the case where neither the norm bound~$B$ nor the lengthscales~$\theta$ are known. In this case, it is always possible that the local optimum is defined by a local bump based on a kernel with small lengthscales, which has not been encountered by the data points as in~\cref{fig:mcmc_example_map}. The only solution to avoid this problem is to keep exploring to eventually cover the input space $\mathcal{D}$ \citep{Bull2011Convergence}. We consider expanding the function space associated with the hyperparameters slowly over time, so that we obtain sublinear regret once the true function class has been identified. Intuitively, this can help BO algorithms avoid premature convergence to local optima caused by misspecified hyperparameters~$\theta$ and~$B$. For example, in~\cref{fig:bo_example_1}, the \textsc{GP-UCB} algorithm has converged to a local maximum. By decreasing the lengthscales, we increase the underlying function class, which means that the GP confidence intervals on the function increase. This enables \textsc{GP-UCB} to explore further so that the global optimum is found, as shown in~\cref{fig:bo_example_3}. \begin{figure*}[t] \centering \subcaptionbox{Stuck in local optimum. \label{fig:bo_example_1}} {\includegraphics{figures/example_0.pdf}} \subcaptionbox{Expanding the function class. \label{fig:bo_example_2}} {\includegraphics{figures/example_1.pdf}} \subcaptionbox{Global optimum found. \label{fig:bo_example_3}} {\includegraphics{figures/example_2.pdf}} \caption{BO algorithms get stuck in local optima when the hyperpararameters of the model are misspecified. In~\cref{fig:bo_example_1}, the true function is not contained within the GP's confidence intervals (blue shaded), so that~$\textsc{GP-UCB}$ only collects data at the local optimum on the right (green arrow), see also~\cref{fig:mcmc_example}. Our method expands the function class over time by scaling the hyperparameters, which encourages additional exploration in~\cref{fig:bo_example_2}. The function class grows slowly enough, so that the global optimum is provably found in~\cref{fig:bo_example_3}.} \label{fig:bo_example} \end{figure*} Specifically, we start with an initial guess~$\theta_0$ and~$B_0$ for the lengthscales and norm bound on~$f$, respectively. Over the iterations, we scale down the lengthscales and scale up the norm bound, \begin{equation} \theta_{t} = \frac{1}{g(t)} \, \theta_0, \qquad B_t = b(t) g(t)^d \, B_0, \label{eq:temporal_lengthscales_and_norm} \end{equation} where~$g \colon \mathbb{N} \to \mathbb{R}_{> 0}$ and~$b \colon \mathbb{N} \to \mathbb{R}_{> 0}$ with~$b(0)=g(0)=1$ are functions that can additionally depend on the data collected up to iteration~$t$, $\mathcal{A}_t$ and~$\mb{y}_t$. As~$g(t)$ increases, the lengthscales~$\theta_t$ of the kernel become shorter, which enlarges the underlying function space: \begin{lemma}{\cite[Lemma 4]{Bull2011Convergence}} If $f \in \mathcal{H}_\theta$, then $f \in \mathcal{H}_{\theta'}$ for all $0 < \theta' \leq \theta$, and % \begin{equation} \| f \|^2_{\mathcal{H}_{\theta'}} \leq \left( \prod_{i=1}^d \frac{[\theta]_i}{[\theta']_i} \right) \| f \|^2_{\mathcal{H}_{\theta}} \,. \label{eq:rkhs_norm_change} \end{equation} \label{thm:rkhs_norm_change} \end{lemma} \cref{thm:rkhs_norm_change} states that when decreasing the lengthscales~$\theta$, the resulting function space contains the previous one. Thus, as~$g(t)$ increases we consider larger RKHS spaces as candidate spaces for the function~$f$. In addition, as we increase~$b(t)$, we consider larger norm balls within the function space~$\mathcal{H}_{\theta_t}$, which corresponds to more complex functions. However, it follows from \cref{eq:rkhs_norm_change} that, as we increase~$g(t)$, we also increase the norm of any existing function in~$\mathcal{H}_{\theta_0}$ by at most a factor of~$g(t)^d$. This is illustrated in~\cref{fig:norm_balls}: as we scale up the norm ball to~$b(t)B_0$, we capture~$f$ under the initial lengthscales~$\theta_0$. However, by shortening the lengthscales by~$g(t)$, the function~$f$ has a larger norm in the new function space~$\mathcal{H}_{\theta_t} = \mathcal{H}_{\theta_0 / g(t)}$. We account for this through the additional scaling factor~$g(t)^d$ in the norm bound~$B_t$ in~\cref{eq:temporal_lengthscales_and_norm}. \begin{figure*}[t] \centering \subcaptionbox{Scaling of the norm bound. \label{fig:norm_balls}} {\includegraphics{figures/norm_balls.pdf}} \hfill \subcaptionbox{Cumulative regret with scaling. \label{fig:cumulative_regret}} {\includegraphics{figures/cumulative_regret.pdf}} \caption{The function $f$ in~\cref{fig:norm_balls} has RKHS norm $\|f\|_{\theta_0} > B_0$. To account for this, we expand the norm ball by~$b(t)$ over time. When we scale down the lengthscales by~$g(t)$, the norm of~$f$ in the resulting RKHS is larger, see~\cref{thm:rkhs_norm_change}. We account for this when defining the norm ball~$B_t$ in~\cref{eq:temporal_lengthscales_and_norm}. In~\cref{fig:cumulative_regret}, the \textsc{GP-UCB} algorithm based on the misspecified hyperparameters $B_0$ and $\theta_0$ does not converge (constant regret). Our method scales the lengthscales and norm bound by $g(t)$ and $b(t)$, so that we eventually capture the true model. Scaling the hyperparameters beyond the true ones leads to additional exploration and thus larger cumulative regret than~\textsc{GP-UCB} with the true, unknown hyperparameters $\theta$ and $B$. However, as long as the cumulative regret is upper bounded by a sublinear function~$p$, ultimately the~\textsc{A-GP-UCB}~ algorithm converges to the global optimum.} \label{fig:algorithm_intuition} \end{figure*} \paragraph{Theoretical analysis} Based on the previous derivations together with~\cref{thm:rkhs_norm_change}, it is clear that, if~$g(t)$ and~$b(t)$ are monotonically increasing functions and~$f \in \mathcal{H}_{\theta_{t^*}}$ with~$\|f\|_{\theta_{t^*}} \leq B_{t^*}$ for some~$t^* > 0$, then~$f \in \mathcal{H}_{\theta_{t}}$ and~$\|f\|_{\theta_{t}} \leq B_{t}$ for all~$t \geq t^*$. That is, once the function~$f$ is contained within the norm ball of~$B_{t^*}$ for the lengthscales~$\theta_{t^*}$, then, for any further increase in~$b(t)$ or~$g(t)$, the function~$f$ is still contained in the candidate space~$\{f \in \mathcal{H}_{\theta_t} \,|\, f \leq B_t\}$. Based on this insight, we propose~\textsc{A-GP-UCB} in~\cref{alg:a_gp_ucb}. At iteration~$t$, \textsc{A-GP-UCB} sets the GP lengthscales to~$\theta_t$ and selects new inputs~$\mb{x}_{t+1}$ similar to the~\textsc{GP-UCB} algorithm, but based on the norm bound~$B_t$. We extend the analysis of~\textsc{GP-UCB} and~\cref{thm:confidence_interval} to obtain our main result. \begin{theorem} Assume that~$f$ has bounded RKHS norm~$ \| f \|_{k_\theta}^2 \leq B $ in a RKHS that is parametrized by a stationary kernel $k_\theta(\mb{x}, \mb{x}')$ with unknown lengthscales $\theta$. Based on an initial guess, $\theta_0$ and~$B_0$, define monotonically increasing functions~$g(t)>0$ and~$b(t)>0$ and run~\textsc{A-GP-UCB} with $\beta_t^{1/2} = b(t) g(t)^d B_0 + 4 \sigma \sqrt{ I_{\theta_t}(\mb{y}_t; f) + 1 + \mathrm{ln}(1 / \delta)}$ and GP lengthscales~$\theta_t = \theta_0 / g(t)$. Then, with probability at least~$(1-\delta)$, we obtain a regret bound of \begin{equation} R_t \leq 2 B \max\left( g^{-1}\left(\max_i \frac{[\theta_0]_i} {[\theta]_i} \right),\, b^{-1}\left( \frac{B}{B_0} \right) \right) +\sqrt{ C_1 t \beta_t I_{\theta_t}(\mb{y}_{t}; f) } , \label{eq:thm:regret} \end{equation} where~$I_{\theta_t}$ is the mutual information in~\cref{eq:mutual_information} based on the GP model with lengthscales~$\theta_t$ and~$C_1 = 8 / \log(1 + \sigma^{-2} )$. \label{thm:main} \end{theorem} The proof is given in the appendix. Intuitively, the regret bound in~\cref{eq:thm:regret} splits the run of the algorithm into two distinct phases. In the first one, either the RKHS space~$\mathcal{H}_{\theta_t}(\mathcal{D})$ or the norm bound~$B_t$ are too small to contain the true function~$f$. Thus, the GP confidence intervals scaled by~$\beta_t^{1/2}$ do not necessarily contain the true function~$f$, as in~\cref{fig:mcmc_example_map}. In these iterations, we obtain constant regret that is bounded by~$2B$, since~$\|f\|_\infty \leq \|f\|_{\theta} \leq B$. After both~$g$ and~$b$ have grown sufficiently in order for the considered function space to contain the true function, the confidence bounds are reliable and we can apply the theoretical results of the~\textsc{GP-UCB} algorithm. This is illustrated in~\cref{fig:cumulative_regret}: If the initial hyperparameters~$\theta_0$ and~$B_0$ are misspecified, the confidence intervals do not contain~$f$ and \textsc{GP-UCB} does not converge. We avoid this problem by increasing~$b(t)$ and~$g(t)$ over time, so that we eventually contain $f$ in our function class. However, increasing the norm ball and decreasing the lengthscales beyond the true ones causes additional exploration and thus additional cumulative regret relative to \textsc{GP-UCB} with the true, unknown hyperparameters. This additional regret represents the cost of not knowing the hyperparameters in advance. As long as the overall regret remains bounded by a sublinear function~$p(t)$, our method eventually converges to the global optimum. The regret bound in~\cref{eq:thm:regret} depends on the true hyperparameters~$\theta$ and~$B$. However, the algorithm does not depend on them. \cref{thm:main} provides an instance-specific bound, since the mutual information depends on the inputs in~$\mathcal{A}_t$. One can obtain a worst-case upper bound by bounding~$I_{\theta_t}(\mb{y}_t; f) \leq \gamma_t(\theta_t)$, which is the worst-case mutual information as in~\cref{eq:gamma_t}, but based on the GP model with lengthscales~$\theta_t$. While \cref{thm:main} assumes that the noise properties are known, the results can be extended to estimate the noise similar to~\citet{Durand2018Streaming}. \begin{algorithm}[t] \caption{Adaptive \textsc{GP-UCB} (\textsc{A-GP-UCB})} \begin{algorithmic}[1] \STATE{} \textbf{Input:} Input space~$\mathcal{D}$, $GP(0, k(\mb{x}, \mb{x}'))$, functions~$g(t)$ and~$b(t)$ \\ \STATE{} Set $B_0 = 1$ and $\theta_0 = \mathrm{diam}(\mathcal{D})$ \FORALL{$t = 0, 1, 2, \dots$} \STATE{} Set the GP kernel lengthscsales to~$\theta_{t} = \theta_0 / g(t)$ \STATE{} $\beta_t^{1/2} \gets B(t) + 4 \sigma \sqrt{ I_{\theta_t}(\mb{y}_t; f) + 1 + \mathrm{ln}(1 / \delta)}$ with $B(t) = b(t) g(t)^d B_0$ \STATE{} Choose $\mb{x}_{t+1} = \operatornamewithlimits{argmax}_{\mb{x} \in \mathcal{D}} \, \mu_{t}(\mb{x}) + \beta_t^{1/2} \sigma_{t}(\mb{x})$ \STATE{} Evaluate $y_{t+1} = f(\mb{x}_{t+1}) + \epsilon_{t+1}$ \STATE{} Perform Bayesian update to obtain~$\mu_{t+1}$ and~$\sigma_{t+1}$ \ENDFOR{} \end{algorithmic} \label{alg:a_gp_ucb} \end{algorithm} For arbitrary functions~$g(t)$ and~$b(t)$, the candidate function space $\{f \in \mathcal{H}_{\theta_t} \,|\, f \leq B_t\}$ can grow at a faster rate than it contracts by selecting informative measurements~$y_t$ according to~\cref{eq:gp_ucb}. In particular, in the regret term~$\sqrt{C_1 t \beta_t \gamma_t}$ both~$\beta_t$ and~$\gamma_t$ depend on the scaling factors~$g(t)$ and~$b(t)$. If these factors grow at a faster rate than~$\sqrt{t}$, the resulting algorithm does not enjoy sublinear regret. We have the following result that explicitly states the dependence of~$\gamma_t$ on the scaling factor~$g(t)$. \begin{proposition} Let~$k_\theta$ be a stationary kernel parameterized by lengthscales~$\theta$ as in~\cref{eq:stationary_lengthscale_kernel} and define~$\gamma_t(\theta)$ for lengthscales~$\theta$ as in~\cref{eq:gamma_t}. Define the lengthscales as~$\theta_t = \theta_0 / g(t)$ as in~\cref{eq:temporal_lengthscales_and_norm}. \begin{itemize} \item If~$k(\mb{x}, \mb{x}')=\mathrm{exp}(-\frac{1}{2}\|\mb{x} - \mb{x}'\|_2^2)$ is the squared exponential (Gaussian) kernel, then \begin{equation} \gamma_t(\theta_t) = \mathcal{O} \left( g(t)^{d}(\log t)^{d+1} \right) \label{eq:gamma_t_gaussian} \end{equation} % \item If~$k(\mb{x}, \mb{x}') = (2^{1-\nu} /\, \Gamma(\nu))\, r^\nu B_\nu(r)$ is the Mat\'ern kernel, where $r = \sqrt{2 \nu} \|\mb{x} - \mb{x}'\|_2$, $B_\nu$ is the modified Bessel function with $\nu > 1$, and~$\Gamma$ is the gamma function. Then \begin{equation} \gamma_t(\theta_t) = \mathcal{O} \left( g(t)^{2\nu + d} t^{\frac{d(d+1)}{ 2\nu + d(d + 1) }} \log t \right) \label{eq:gamma_t_matern} \end{equation} \end{itemize} \label{thm:gamma_t_lengthscale_bounds} \end{proposition} Proposition~\ref{thm:gamma_t_lengthscale_bounds} explicitly states the relationship between~$\gamma_t$ and~$g(t)$. For the Gaussian kernel, if we scale down the lengthscales by a factor of two, the amount of mutual information that we can gather in the worst case,~$\gamma_t$, grows by~$2^d$. Given the dependence of~$\gamma_t$ on~$g(t)$, we can refine~\cref{thm:main} to obtain concrete regret bounds for two commonly used kernels. \begin{corollary} If, under the assumptions of~\cref{thm:main},~$g(t)$ and~$b(t)$ grow unbounded, then we obtain the following, high-probability regret bounds for~\cref{alg:a_gp_ucb}: \begin{itemize} \item Squared exponential kernel: $R_t \leq \mathcal{O} \left( b(t) \sqrt{t g(t)^{3d} \gamma_t(\theta_0)} + g(t)^d \gamma_t(\theta_0) \sqrt{t} \right)$; \item Mat\'ern kernel: $R_t \leq \mathcal{O} \left( b(t) \sqrt{t g(t)^{2 \nu + 3 d} \gamma_t(\theta_0)} + g(t)^{\nu + d} \gamma_t(\theta_0)\sqrt{t} \right)$. \end{itemize} \label{cor:concrete_regret_bounds} \end{corollary} If~$b(t)$ and~$g(t)$ grow unbounded, the first term of the cumulative regret in~\cref{eq:thm:regret} can be upper bounded by a constant. The remaining result is obtained by plugging in~$\beta_t$ and the bounds from~\cref{eq:gamma_t}. Thus, any functions $g(t)$ and $b(t)$ that render the regret bounds in Corollary~\ref{cor:concrete_regret_bounds} sublinear allow the algorithm to converge, even though the true lengthscales and norm bound are unknown. \added{ The specific choices of~$b(t)$ and~$g(t)$ matter for the regret bound in \cref{thm:main} in practice. Consider the one-dimensional case~$d=1$ for the Gaussian kernel. Given the true hyperparameters~$B$ and~$\theta$, if we set~$g(t)= \theta_0 / \theta$ and~$b(t) = B / B_0$ to be constant, we recover the non-adaptive regret bounds of~\textsc{GP-UCB} with known hyperparameters. If~$g(t)$ depends on~$t$ and grows slowly, then the algorithm incurs constant regret during the initial rounds when the model is misspecified, while functions~$g$ that grow to values larger than the optimal ones lead to additional exploration and incur an additional~$\mathcal{O}(b(t)g(t)^{3d/2)})$ factor in the cumulative regret in later rounds, as in Corollary~\ref{cor:concrete_regret_bounds}. In the following section, we discuss appropriate choices for these functions in practice. } \section{Background} \label{sec:background} In this section, we review Gaussian processes (GPs) and Bayesian optimization (BO). \subsection{Gaussian processes (GP)} \label{sec:gaussian_process} Based on the assumptions in~\cref{sec:problem_statement}, we can use GPs to infer confidence intervals on~$f$. The goal of GP inference is to infer a posterior distribution over the nonlinear map~${f(\mb{x}): D \to \mathbb{R}}$ from an input vector~${\mb{x} \in D }$ to the function value~$f(\mb{x})$. This is accomplished by assuming that the function values $f(\mb{x})$, associated with different values of $\mb{x}$, are random variables and that any finite number of these random variables have a joint Gaussian distribution~\citep{Rasmussen2006Gaussian}. A GP distribution is parameterized by a prior mean function and a covariance function or kernel $k(\mb{x}, \mb{x}')$, which defines the covariance of any two function values~$f(\mb{x})$ and $f(\mb{x}')$ for ${\mb{x}, \mb{x}' \in D}$. In this work, the mean is assumed to be zero without loss of generality. The choice of kernel function is problem-dependent and encodes assumptions about the unknown function. We can condition a~$GP(0, k(\mb{x}, \mb{x}'))$ on a set of~$t$ past observations ${ \mb{y}_t = (y_1, \dots, y_t) }$ at inputs~$\mathcal{A}_t = \{ \mb{x}_1, \dots, \mb{x}_t \}$ in order to obtain a posterior distribution on~$f(\mb{x})$ for any input~${ \mb{x} \in D }$. The GP model assumes that observations are noisy measurements of the true function value,~$y_t = f(\mb{x}_t) + \omega_t$, where~${\omega_t \sim \mathcal{N}(0,\sigma^2)}$. The posterior distribution is again a $GP(\mu_t(\mb{x}), k_t(\mb{x}, \mb{x}'))$ with mean~$\mu_t$, covariance~$k_t$, and variance~$\sigma_t$, where \begin{align} \mu_t(\mb{x}) &= \mb{k}_t(\mb{x}) (\mb{K}_t + \mb{I} \sigma^2)^{-1} \mb{y}_t , \label{eq:gp_prediction_mean} \\ k_t(\mb{x}, \mb{x}') &= k(\mb{x}, \mb{x}') - \mb{k}_t(\mb{x}) (\mb{K}_t + \mb{I} \sigma^2)^{-1} \mb{k}_t^\T(\mb{x}'), \label{eq:gp_prediction_covariance} \\ \sigma^2_t(\mb{x}) &= k_t(\mb{x}, \mb{x}). \label{eq:gp_prediction_variance} \end{align} The covariance matrix~${\mb{K}_t \in \mathbb{R}^{t \times t}}$ has entries ${[\mb{K}_t]_{(i,j)} = k(\mb{x}_i, \mb{x}_j)}$, ${i,j\in\{1,\dots,t\}}$, and the vector ${\mb{k}_t(\mb{x}) = \left[ \begin{matrix} k(\mb{x},\mb{x}_1),\dots,k(\mb{x},\mb{x}_t) \end{matrix} \right]}$ contains the covariances between the input~$\mb{x}$ and the observed data points in~$\mathcal{A}_t$. The identity matrix is denoted by~${ \mb{I}_t \in \mathbb{R}^{t \times t} }$. \subsection{Learning RKHS functions with GPs} The GP framework uses a statistical model that makes different assumptions from the ones made about~$f$ in~\cref{sec:problem_statement}. In particular, we assume a different noise model, and samples from a GP$(0, k(\mb{x}, \mb{x}'))$ are rougher than RKHS funtions and are not contained in~$\mathcal{H}_k$. However, GPs and RKHS functions are closely related \citep{Kanagawa2018Gaussian} and it is possible to use GP models to infer reliable confidence intervals on~$f$ in~\cref{eq:optimize_f}. \begin{restatable}[\citet{Abbasi-Yadkori2012Online,Chowdhury2017Kernelized}]{lemma}{confidencethm} Assume that $f$ has bounded RKHS norm $\|f\|_k \leq B$ and that measurements are corrupted by~$\sigma$-sub-Gaussian noise. If $\beta_t^{1/2} = B + 4 \sigma \sqrt{ I(\mb{y}_{t}; f) + 1 + \mathrm{ln}(1 / \delta)}$, then for all~${\mb{x} \in D}$ and~${t \geq 0}$ it holds jointly with probability at least~${1 - \delta}$ that $ \left|\, f(\mb{x}) - \mu_{t}(\mb{x}) \,\right| \leq \beta_{t}^{1/2} \sigma_{t}(\mb{x}). $ \label{thm:confidence_interval} \end{restatable} \cref{thm:confidence_interval} implies that, with high probability, the true function~$f$ is contained in the confidence intervals induced by the posterior GP distribution that uses the kernel~$k$ from~\cref{thm:confidence_interval} as a covariance function, scaled by an appropriate factor~$\beta_t$. Here, $I(\mb{y}_t; f)$ denotes the mutual information between the GP prior on~$f$ and the~${t}$ measurements~$\mb{y}_{t}$. Intriguingly, for GP models this quantity only depends on the inputs~$\mb{x}_t$ and not the corresponding measurement~$y_t$. Specifically, for a given set of measurements~$\mb{y}_\mathcal{A}$ at inputs~$\mb{x} \in \mathcal{A}$, the mutual information is given by \begin{equation} I(\mb{y}_\mathcal{A}; f) = 0.5 \log | \mb{I} + \sigma^{-2} \mb{K}_{\mathcal{A}} | , \label{eq:mutual_information} \end{equation} where~$\mb{K}_\mathcal{A}$ is the kernel matrix~$[k(\mb{x}, \mb{x}')]_{\mb{x}, \mb{x}' \in \mathcal{A}}$ and $|\cdot|$ is the determinant. Intuitively, the mutual information measures how informative the collected samples~$\mb{y}_\mathcal{A}$ are about the function~$f$. If the function values are independent of each other under the GP prior, they will provide large amounts of new information. However, if measurements are taken close to each other as measured by the kernel, they are correlated under the GP prior and provide less information. \subsection{Bayesian Optimization (BO)} \label{sec:bayesian_optimization} BO aims to find the global maximum of an unknown function~\citep{Mockus2012Bayesian}. The framework assumes that evaluating the function is expensive in terms of time required or monetary costs, while other computational resources are comparatively inexpensive. In general, BO methods model the objective function~$f$ with a statistical model and use it to determine informative sample locations. A popular approach is to model the underlying function with a GP, see~\cref{sec:gaussian_process}. GP-based BO methods use the posterior mean and variance predictions in~\cref{eq:gp_prediction_mean,eq:gp_prediction_variance} to compute the next sample location. One commonly used algorithm is the~\textsc{GP-UCB} algorithm by~\cite{Srinivas2012Gaussian}. It uses confidence intervals on the function~$f$, e.g., from~\cref{thm:confidence_interval}, in order to select as next input the point with the largest plasuble function value according to the model, \begin{equation} \mb{x}_{t+1} = \underset{\mb{x} \in \mathcal{D}}{\mathrm{argmax}}~ \mu_{t}(\mb{x}) + \beta_t^{1/2} \sigma_{t}(\mb{x}). \label{eq:gp_ucb} \end{equation} Intuitively,~\cref{eq:gp_ucb} selects new evaluation points at locations where the upper bound of the confidence interval of the GP estimate is maximal. Repeatedly evaluating the function~$f$ at inputs~$\mb{x}_{t+1}$ given by~\cref{eq:gp_ucb} improves the mean estimate of the underlying function and decreases the uncertainty at candidate locations for the maximum, so that the global maximum is provably found eventually~\citep{Srinivas2012Gaussian}. While~\cref{eq:gp_ucb} is also an optimization problem, it only depends on the GP model of~$f$ and solving it therefore does not require any expensive evaluations of~$f$. \paragraph{Regret bounds} \cite{Srinivas2012Gaussian} show that the~\textsc{GP-UCB} algorithm has cumulative regret~$R_t = \mathcal{O}(\sqrt{ t \beta_t \gamma_t} )$ for all $t \geq 1$ with the same~$(1-\delta)$ probability as the confidence intervals, e.g., in~\cref{thm:confidence_interval}, hold. Here~$\gamma_t$ is the largest amount of mutual information that could be obtained by any algorithm from at most~$t$ measurements, \begin{equation} \gamma_t = \max_{\mathcal{A} \subset D, \, |\mathcal{A}| \leq t} I(\mb{y}_\mathcal{A}; f). \label{eq:gamma_t} \end{equation} We refer to~$\gamma_t$ as the \emph{information capacity}, since it can be interpreted as a measure of complexity of the function class associated with a GP prior. It was shown by~\cite{Srinivas2012Gaussian} that~$\gamma_t$ has a sublinear dependence on~$t$ for many commonly used kernels such as the Gaussian kernel. As a result,~$R_t$ has a sublinear dependence on~$t$ so that~$R_t / t \to 0$ and therefore \textsc{GP-UCB} converges to function evaluations close to~$f(\mb{x}^*)$. These regret bounds were extended to Thompson sampling, an algorithm that uses samples from the posterior GP as the acquisition function, by~\cite{Chowdhury2017Kernelized}. \paragraph{Online hyperparameter estimation} \begin{figure*}[t] \centering \subcaptionbox{Sample from GP prior. \label{fig:mcmc_example_sample}} {\includegraphics{figures/mcmc_example_sample.pdf}} \subcaptionbox{GP estimate (RKHS). \label{fig:mcmc_example_map}} {\includegraphics{figures/mcmc_example_map.pdf}} \subcaptionbox{Lengthscale distribution. \label{fig:mcmc_example_mcmc}} {\includegraphics{figures/mcmc_example_mcmc.pdf}} \caption{A sample from the GP prior in~\cref{fig:mcmc_example_sample} typically varies at a consistent rate over the input space. However, RKHS functions with the same kernel may be less consistent and can have bumps, as in~\cref{fig:mcmc_example_map} (gray). As a result, inferring the posterior lengthscales based on measurements (blue crosses in~\cref{fig:mcmc_example_map}) can lead to erroneous results. In~\cref{fig:mcmc_example_mcmc}, most of the probability mass of the posterior lengthscales has concentrated around large lengthscales that encode smooth functions. Consequently, the GP's $2\sigma$ confidence intervals in~\cref{fig:mcmc_example_map} (blue shaded) based on the posterior samples do not contain the true function.} \label{fig:mcmc_example} \end{figure*} In the previous section, we have seen that the~\textsc{GP-UCB} algorithm provably converges. However, it requires access to a RKHS norm bound $\|f\|_\theta \leq B$ under the correct kernel hyperparameters~$\theta$ in order to construct reliable confidence intervals using~\cref{thm:confidence_interval}. In practice, these are unknown and have to be estimated online, e.g., based on a prior distribution placed on~$\theta$. Unfortunately, it is well-known that online estimation of the inputs, be it via maximum a posteriori (MAP) or sampling methods, does not always converge to the optimum~\citep{Bull2011Convergence}. The problem does not primarily lie with the inference scheme, but rather with the assumptions made by the GP. In particular, typical samples drawn from a GP with a stationary kernel tend to have a similar rate of change throughout the input space, see~\cref{fig:mcmc_example_sample}. In contrast, the functions inside the RKHS, as specified in~\cref{sec:problem_statement}, can have different rates of change and are thus improbable under the GP prior. For example, the grey function in~\cref{fig:mcmc_example_map} is almost linear but has one bump that defines the global maximum, which makes this function an improbable sample under the GP prior even though it belongs to the RKHS induced by the same kernel. This property of GPs \added{with stationary kernels} means that, for inference, it is sufficient to estimate the lengthscales in a small part of the state-space in order to make statements about the function space globally. This is illustrated in~\cref{fig:mcmc_example_mcmc}, where we show samples from the posterior distribution over the lengthscales based on the measurements obtained from the \textsc{GP-UCB} algorithm in~\cref{fig:mcmc_example_map} (blue crosses). Even though the prior distribution on the lengthscales~$\theta$ is suggestive of short lengthscales, most of the posterior probability mass is concentrated around lengthscales that are significantly larger than the true ones. As a result, even under model averaging over the samples from the posterior distribution of the lengthscales, the GP confidence intervals do not contain the true function in~\cref{fig:mcmc_example_map}. This is not a problem of the inference method applied, but rather a direct consequence of the probabilistic model that we have specified \added{based on the stationary kernel}, which does not consider functions with different rates of change to be likely. \section{Problem Statement} \label{sec:problem_statement} In general, BO considers global optimization problems of the form \begin{equation} \mb{x}^* = \operatornamewithlimits{argmax}_{\mb{x} \in \mathcal{D}} f(\mb{x}), \label{eq:optimize_f} \end{equation} where~$\mathcal{D} \subset \mathbb{R}^d$ is a compact domain over which we want to optimize inputs~$\mb{x}$, and~$f \colon \mathcal{D} \to \mathbb{R}$ is an objective function that evaluates the reward~$f(\mb{x})$ associated with a given input configuration~$\mb{x}$. For example, in a machine learning application,~$f(\mb{x})$ may be the validation loss and~$\mb{x}$ may be the tuning inputs (e.g., regularization parameters) of the training algorithm. We do not have any significant prior knowledge about the structure of~$f$. Specifically, we cannot assume convexity or that we have access to gradient information. Moreover, evaluations of~$f$ are corrupted by~$\sigma$-sub-Gaussian noise, a general class of noise models that includes, for example, bounded or Gaussian noise. \paragraph{Regret} We aim to construct a sequence of input evaluations~$\mb{x}_t$, that eventually maximizes the function value~$f(\mb{x}_t)$. One natural way to prove this convergence is to show that an algorithm has sublinear regret. The instantaneous regret at iteration~$t$ is defined as~$r_t = \max_{\mb{x} \in \mathcal{D}}f(\mb{x}) - f(\mb{x}_t) \geq 0$, which is the loss incurred by evaluating the function at~$\mb{x}_t$ instead of at the \textit{a priori unknown} optimal inputs. The cumulative regret is defined as~$R_T = \sum_{0 < t \leq T} r_t$, the sum of regrets incurred over~$T$ steps. If we can show that the cumulative regret is sublinear for a given algorithm, that is,~$\lim_{t \to \infty} R_t \,/ \, t = 0$, then eventually the algorithm evaluates the function at inputs that lead to close-to-optimal function values most of the time. We say that such an algorithm has~\emph{no-regret}. Intuitively, if the average regret approaches zero then, on average, the instantaneous regret must approach zero too, since~$r_t$ is strictly positive. This implies that there exists a~$t>0$ such that~$f(\mb{x}_t)$ is arbitrarily close to $f(\mb{x}^*)$ and the algorithm converges. Thus, we aim to design an optimization algorithm that has sublinear regret. \paragraph{Regularity assumptions} Without further assumptions, it is impossible to achieve sublinear regret on~\cref{eq:optimize_f}. In the worst case, $f$ could be discontinuous at every input in~$\mathcal{D}$. To make the optimization problem in~\cref{eq:optimize_f} tractable, we make regularity assumptions about~$f$. In particular, we assume that the function~$f$ has low complexity, as measured by the norm in a reproducing kernel Hilbert space (RKHS, \cite{Christmann2008Support}). An RKHS~$\mathcal{H}_k$ contains well-behaved functions of the form~$f(\mb{x}) = \sum_{i \geq 0} \alpha_i \, k(\mb{x}, \mb{x}_i)$, for given representer points~$\mb{x}_i \in \mathbb{R}^d$ and weights~$\alpha_i \in \mathbb{R}$ that decay sufficiently quickly. The kernel~$k(\cdot, \cdot)$ determines the roughness and size of the function space and the induced RKHS norm~$\|f\|_{k} = \sqrt{ \langle f, \, f \rangle }$ measures the complexity of a function~$f \in \mathcal{H}_k$ with respect to the kernel. In the following, we assume that~$f$ in~\cref{eq:optimize_f} has bounded RKHS norm~$\|f\|_{k_\theta} \leq B$ with respect to a kernel~$k_\theta$ that is parameterized by hyperparameters~$\theta$. We write~$\mathcal{H}_\theta$ for the corresponding RKHS,~$\mathcal{H}_{k_\theta}$. For known~$B$ and~$\theta$, no-regret BO algorithms for~\cref{eq:optimize_f} are known, e.g., \textsc{GP-UCB}~\citep{Srinivas2012Gaussian}. In practice, these hyperparameters need to be tuned. In this paper, we consider the case where~$\theta$ and~$B$ are unknown. We focus on stationary kernels, which measure similarity based on the distance of inputs, $k(\mb{x}, \mb{x}') = k(\mb{x} - \mb{x}')$. The most commonly used hyperparameters for these kernels are the lengthscales~$\theta \in \mathbb{R}^d$, which scale the inputs to the kernel in order to account for different magnitudes in the different components of~$\mb{x}$ and effects on the output value. That is, we scale the difference~$\mb{x} - \mb{x}'$ by the lengthscales~$\theta$, \begin{equation} k_\theta(\mb{x}, \mb{x}') = k\left( \frac{[\mb{x}]_1 - [\mb{x}']_1 } { [\theta]_1 } , \, \dots, \, \frac{[\mb{x}]_d - [\mb{x}']_d } { [\theta]_d } \right), \label{eq:stationary_lengthscale_kernel} \end{equation} where~$[\mb{x}]_i$ denotes the $i$th element of~$\mb{x}$. Typically, these kernels assign larger similarity scores to inputs when the scaled distance between these two inputs is small. \added{Another common hyperparameter is the prior variance of the kernel, a multiplicative constant that determines the magnitude of the kernel. We assume~$k(\mb{x}, \mb{x}) = 1$ for all $\mb{x} \in \mathcal{D}$ without loss of generality, as any multiplicative scaling can be absorbed by the norm bound~$B$.} In summary, our goal is to efficiently solve~\cref{eq:optimize_f} via a BO algorithm with sublinear regret, where~$f$ lies in some RKHS~$\mathcal{H}_\theta$, but neither the hyperparameters~$\theta$ nor the norm-bound~$\|f\|_{k_\theta}$ are known. \section{Proof of Main Theorem} \begin{lemma} Let~$f \in \mathcal{H}_{\theta_{t^*}}$ with~$\|f\|_{\theta_{t^*}} \leq B_{t^*}$. Then, for any monotonically increasing functions~$g(t) \geq 1$ and~$b(t) \geq 1$ and for all~$t \geq t^*$: $f \in \mathcal{H}_{\theta_{t}}$ with~$\|f\|_{\theta_{t}} \leq B_{t}$ \label{lem:f_contained_once_found} \end{lemma} \begin{proof} \cref{thm:rkhs_norm_change} together with monotonicity of~$g$ yields $\mathcal{H}_{\theta_{t}} \supseteq \mathcal{H}_{\theta_{t^*}}$ so that~$f \in \mathcal{H}_{\theta_{t}}$ and $$ \|f\|_{\theta_t} \leq \prod_{1 \leq i \leq d} \frac{[\theta_{t^*}]_i}{ [\theta_t]_i} \|f\|_{\theta_{t^*}} \leq \frac{g(t)^d} {g(t^*)^{d}} B_{t^*} = \frac{g(t)^d} {g(t^*)^{d}} g(t^*)^d b(t^*) B_0 = g(t)^d b(t^*) B_0 \leq B_t $$ \end{proof} \begin{lemma} Under the assumptions of~\cref{thm:confidence_interval}, let~$\theta_t$ be a predictable sequence of kernel hyperparameters such that~$\|f\|_{k_{\theta_t}} \leq B_t$ and let the GP predictions~$\mu_t$ and~$\sigma_t$ use the prior covariance~$k_{\theta_t}$. If $\beta_t^{1/2} = B_t + 4 \sigma \sqrt{ I_{\theta_t}(\mb{y}_{t}; f) + 1 + \mathrm{ln}(1 / \delta)}$, then $|\, f(\mb{x}) - \mu_{t}(\mb{x}) \,| \leq \beta_{t}^{1/2} \sigma_{t}(\mb{x}) $ holds for all~${\mb{x} \in D}$ and iterations~${t \geq 0}$ jointly with probability at least~${1 - \delta}$. \label{thm:confidence_interval_extended} \end{lemma} \begin{proof} The proof is the same as the one by \citet{Abbasi-Yadkori2012Online,Chowdhury2017Kernelized}, except that the kernel is time-dependent. \end{proof} We are now ready to prove the main result: \newline \begin{proof}[\cref{thm:main}] We split the regret bound into two terms,~$R_t = t_0\, r_{c} + r_s(t)$. In the initial rounds, where either~$B_t \leq g(t)^d B_0$ or~$\max_i [\theta]_i / [\theta]_0 > 1$, the regret is trivially bounded by~$r_t \leq 2\|f\|_\infty \leq 2 \|f\|_{\theta} \leq B$. Thus~$r_c \leq 2 B$. Let~$t_0 \in (0, \infty]$ be the first iteration such that~$f \in \mathcal{H}_{\theta_{t_0}}$ with~$\|f\|_{\theta_{t_0}} \leq B_{t_0}$. From~\cref{lem:f_contained_once_found}, we have that $f \in \mathcal{H}_{\theta_{t}}$ with~$\|f\|_{\theta_{t}} \leq B_t$ for all~$t\geq t_0$. Thus we can use~\cref{thm:confidence_interval_extended} to conclude~$|f - \mu_t(\mb{x})| \leq \beta_t^{1/2} \sigma_t(\mb{x})$ for all~$\mb{x} \in \mathcal{D}$ and~$t\geq t_0$ jointly with probability at least~$(1-\delta)$. We use Lemmas 5.2-5.4 in \citet{Srinivas2012Gaussian} to conclude that the second stage has a regret bound of~$r_s^2(t) \leq C_1 \beta_t I(\mb{y}_t; f) $, which concludes the proof. \end{proof} \section{Bound on the information capacity~$\gamma_t$} \begin{theorem}[Theorem 8 in \cite{Srinivas2012Gaussian}] Suppose that ${D \subset \mathbb{R}^d}$ is compact, and~${k(\mb{x}, \mb{x}')}$ is a covariance function for which the additional assumption of Theorem 2 in~\cite{Srinivas2012Gaussian} hold. Moreover, let~${ B_k(T_*) = \sum_{s > T_*} \lambda_s }$, where~${\{\lambda_s\}}$ is the operator spectrum of~$k$ with respect to the uniform distribution over~$D$. Pick~$\tau > 0$, and let~${n_T = C_4 T^\tau (\log T) }$ with~${C_4 = 2 \mathcal{V}(D) (2 \tau + 1)}$. Then, the following bound holds true: \begin{equation} \gamma_T \leq \frac{1/2}{1 - e^{-1}} \, \max_{r \in \{1,\dots,T\}} T_* \log\left(\frac{r n_T}{\sigma^2}\right) + C_4 \sigma^{-2} (1 - \frac{r}{T}) ( B_k(T_*) T^{\tau + 1} + 1) \log T + \mathcal{O}(T^{1 - \frac{\tau}{d}}). \label{eq:gamma_t_spectrum_bound} \end{equation} \label{thm:bound_gamma_with_spectrum} \end{theorem} \cref{thm:bound_gamma_with_spectrum} allows us to bound~$\gamma_t$ through the operator spectrum of the kernel with respect to the uniform distribution. We now consider this quantity for two specific kernels. \subsection{Bounds for the Squared Exponential Kernel} \begin{lemma} \label{thm:log_1_x_bound} For all~$x \in [0, x_\mathrm{max}^2]$ it holds that $\log(1 + x^2) \geq \frac{\log(1 + x_\mathrm{max}^2)}{x_\mathrm{max}^2} x^2$ \end{lemma} In this section, we use~\cref{thm:bound_gamma_with_spectrum} to obtain concrete bounds for the Gaussian kernel. From~\cite{Seeger2008Information}, we obtain a bound on the eigenspectrum that is given by \begin{equation*} \lambda_s \leq cB^{s^{1/d}}, \textnormal{~where~} c = \sqrt{\frac{2 a}{A}}, \quad b = \frac{1}{2 \theta_t^2}, \quad B = \frac{b}{A},\quad \textnormal{and}\quad A = a + b + \sqrt{a^2 + 2ab }. \end{equation*} The constant~$a>0$ parameterizes the distribution~${\mu(\mb{x}) \sim \mathcal{N}(\mb{0}, (4a)^{-1} \mb{I}_d)}$. As a consequence of~$\theta_t > 0$, we have that~$b \geq 0$, $0<B<1$, $c > 0$, and $A > 0$. In the following, we bound the eigenspectrum. The steps follow the outline of~\cite{Seeger2008Information}, but we provide more details and the dependence on the lengtscales~$\theta_t$ is made explicit: \begin{align*} B_k(T_*) &= \sum_{s > T_*} \lambda_s \leq c \sum_{s \geq T_* + 1}B^{s^{1/d}} = c \sum_{s \geq T_* + 1} \exp \log (B^{s^{1/d}}) = c \sum_{s \geq T_* + 1} \exp(s^{1/d} \log B ) ,\\ &= c \sum_{s \geq T_* + 1} \exp(- s^{1/d} \alpha ) \leq c \int_{T_*}^\infty \exp(- \alpha s^{1/d} ) \dif s, % \intertext{ where $\alpha = -\log B$. Now substitute $s = \phi(t) = (t/\alpha)^d$. Then $\dif s = \frac{d t^{d-1}}{\alpha} \dif t$ and} % B_k(T_*) &\leq c \int_{\alpha T_*^{1/d}}^\infty \exp(-t) \frac{d t^{d-1}}{\alpha} \dif t = c d \alpha^{-d} \Gamma(d, \alpha T_*^{1/d}), % \intertext{ where $\Gamma(d, \beta) = \int_\beta^\infty e^{-t} t^{d-1} \,dt = (d-1)! e^{-\beta} \sum_{k=0}^{d-1} \beta^k / k!$ for $d \in \mathbb{N}$ as in~\citet[(8.352.4)]{Gradshtein2007Table}. Then, with $\beta = \alpha T_*^{1/d}$, } % B_k(T_*) &\leq c d \alpha^{-d} (d - 1)! e^{-\beta} \sum_{k=0}^{d-1} \beta^k / k! = c (d!) \alpha^{-d} e^{-\beta} \sum_{k=0}^{d-1} (k!)^{-1} \beta^k. \end{align*} Before we bound the information gain, let us determine how~$\alpha^{-d}$ and~$c$ depend on the lengthscales. In particular, we want to quantify their upper bounds in terms of~$g(t)$. \begin{align} \alpha^{-d} &= \log^{-d} (1 / B ) = \log^{-d} \left( 2 \theta_t^2 A \right) = \log^{-d} \left( 1 + 2 \theta_t^2 a + 2 \theta_t \sqrt{a^2 + \frac{a}{\theta_t^2}} \right) \\ &\leq \log^{-d} \left( 1 + 2 \theta_t^2 a \right) \leq \left( \frac{\log(1 + 2 \theta_0^2 a)}{2\theta_0^2 a} 2 \theta_t^2 a \right)^{-d} \label{eq:bound_alpha_d_Lemma} \text{~~~~~~~~by \cref{thm:log_1_x_bound}} \\ &= \mathcal{O}\left( \theta_t^{-2d} \right) = \mathcal{O}\left( g^{2d}(t) \right), \end{align} where~\cref{eq:bound_alpha_d_Lemma} follows from~\cref{thm:log_1_x_bound}, since $g(t) \geq 1$ for all~$t>0$. Similarly, \begin{equation} c = \left( \frac{2a}{a + \frac{1}{2\theta_t^2} + \sqrt{a^2 + \frac{a}{\theta_t^2}}} \right)^{d/2} \leq \left( \frac{2a}{\frac{1}{2 \theta_t^2}} \right) = \left( 4a\theta_t^2 \right)^{d/2} = \mathcal{O}(g(t)^{-d}). \end{equation} As in~\cite{Srinivas2012Gaussian}, we choose~$T_* = (\log(T n_T) / \alpha)^d$, so that~$\beta=\log(T n_T)$ and therefore does not depend on~$g_t$. Plugging into~\cref{eq:gamma_t_spectrum_bound}, the first term of~\cref{eq:gamma_t_spectrum_bound} dominates and \begin{align} \gamma_T = \mathcal{O} \left( \left[ \log(T^{d+1} (\log T)) \right]^{d+1} c \alpha^{-d} \right)^{d/2} = \mathcal{O} \left( (\log T)^{d + 1} g(t)^d \right). \end{align} \subsection{Mat\'ern kernel} Following the proof for Theorem 2 in the addendum to \citet{Seeger2008Information}, we have that \begin{equation} \lambda_s^{(T)} \leq C (1 + \delta) s^{-(2 \nu + d) / d} ~ \forall s \geq s_0, \end{equation} For the leading constant we have~$C=C_3^{(2 \nu + d) / d}$ with~$\alpha = \frac{2 \pi \theta_t}{\sqrt{2 \nu}}$. Hiding terms that do not depend on~$\alpha$ and therefore~$g(t)$, we have \begin{align*} &C_t(\alpha, \nu) = \frac{\Gamma(\nu + d / 2)}{\pi^{d/2} \Gamma(\nu)} \alpha^d = \mathcal{O}(g(t)^{-d}) &&c_1 = \frac{1}{(2\pi)^d C_t(\alpha, \nu)} = \mathcal{O}(g(t)^d) \\ &C_2 = \frac{\alpha^{-d}}{2^d \pi^{d/2}\Gamma(d/2)} = \mathcal{O}(g(t)^{d}) &&C_3 = C_2 \frac{2\tilde{C}}{d} c_1^\frac{-d}{2 \nu + d} = \mathcal{O}( g(t)^d g(t)^\frac{-d^2}{2 \nu + d} ) = \mathcal{O}(g(t)^d), \end{align*} so that $C = \mathcal{O}(g(t)^{2 \nu + d})$. The second term in~$C_3$ must be over-approximated as a consequence of the proof strategy. It follows that $ B_k(T_*) = \mathcal{O}(g(t)^{2 \nu d} T_*^{1-(2 \nu + d) / d}) $ and, as in~\cite{Srinivas2012Gaussian}, that $ \gamma_T = \mathcal{O}(T^\frac{d(d+1)}{2\nu + d(d+1)} (\log T) g(t)^{2 \nu d}). $ \section{Introduction} \label{sec:introduction} The performance of machine learning algorithms often critically depends on the choice of tuning inputs, e.g., learning rates or regularization constants. Picking these correctly is a key challenge. Traditionally, these inputs are optimized using grid or random search~\citep{Bergstra2012Random}. However, as data sets become larger the computation time required to train a single model increases, which renders these approaches less applicable. Bayesian optimization (BO,~\cite{Mockus2012Bayesian}) is an alternative method that provably determines good inputs within few evaluations of the underlying objective function. BO methods construct a statistical model of the underlying objective function and use it to evaluate inputs that are informative about the optimum. However, the theoretical guarantees, empirical performance, and data efficiency of BO algorithms critically depend on their own choice of hyperparameters and, in particular, on the prior distribution over the function space. Thus, we effectively shift the problem of tuning inputs one level up, to the tuning of hyperparameters of the BO algorithm. In this paper, we use a Gaussian processes (GP, \citet{Rasmussen2006Gaussian}) for the statistical model. We present the first BO algorithm that does not require knowledge about the hyperparameters of the GP's stationary kernel and provably converges to the global optimum. To this end, we adapt the hyperparameters of the kernel and our BO algorithm, so that the associated function space grows over time. The resulting algorithm provably converges to the global optimum and retains theoretical convergence guarantees, even when combined with online estimation of hyperparameters. \paragraph{Related work} General BO has received a lot of attention in recent years. Typically, BO algorithms suggest inputs to evaluate by maximizing an acqusition function that measures informativeness about the optimum. Classical acquisition functions are the \textit{expected improvement} over the best known function value encountered so far given the GP distribution~\citep{Mockus1978Application} and the \textit{Upper Confidence Bound} algorithm,~\textsc{GP-UCB}, which applies the `optimism in the face of uncertainty' principle. The latter is shown to provably converge by~\cite{Srinivas2012Gaussian}. \citet{Durand2018Streaming} extend this framework to the case of unknown measurement noise. A related method is \textit{truncated variance reduction} by~\citet{Bogunovic2016Truncated}, which considers the reduction in uncertainty at candidate locations for the optimum. \cite{Hennig2012Entropy} propose \textit{entropy search}, which approximates the distribution of the optimum of the objective function and uses the reduction of the entropy in this distribution as an acquisition function. Alternative information-theoretic methods are proposed by~\citet{Hernandez-Lobato2014Predictive,Wang2017Maxvalue,Ru2018Fast}. Other alternatives are the \textit{knowledge gradient}~\citep{Frazier2009Knowledgegradient}, which is one-step Bayes optimal, and \textit{information directed sampling} by~\cite{Russo2014Learning}, which considers a tradeoff between regret and information gained when evaluating an input. \cite{Kirschner2018Information} extend the latter framework to heteroscedastic noise. These BO methods have also been successful empirically. In machine learning, they are used to optimize the performance of learning methods~\citep{Brochu2010Tutorial,Snoek2012Practical}. BO is also applicable more broadly; for example, in reinforcement learning to optimize a parametric policy for a robot~\citep{Calandra2014Experimental,Lizotte2007Automatic,Berkenkamp2016Bayesian} or in control to optimize the energy output of a power plant~\citep{Abdelrahman2016Bayesian}. It also forms the backbone of Google vizier, a service for tuning black-box functions~\citep{Golovin2017Google}. Some of the previous BO algorithms provide theoretical guarantees about convergence to the optimum. These theoretical guarantees only hold when the kernel hyperparameters are known~\textit{a priori}. When this is not the case, hyperparameters are often inferred using either \textit{maximum a posteriori} estimates or sampling-based inference~\citep{Snoek2012Practical}. Unfortunately, methods that estimate the hyperparameters online are known to get stuck in local optima~\citep{Bull2011Convergence}. Instead, we propose to adapt the hyperparameters online in order to enlarge the function space over time, which allows us to provide guarantees in terms of convergence to the global optimum without knowing the hyperparameters. \citet{Wang2014Theoretical} analyze this setting when a lower bound on the kernel lengthscales is known \textit{a priori}. They decrease the lengthscales over time and bound the regret in terms of the known lower-bound on the lengthscales. Empirically, similar heuristics are used by~\citet{Wang2016Bayesian,Wabersich2016Advancing}. In contrast, this paper considers the case where the hyperparameters are \textit{not known}. Moreover, the scaling of the hyperparameters in the previous two papers did not depend on the dimensionality of the problem, which can cause the function space to increase too quickly. Considering larger function classes as more data becomes available is the core idea behind structural risk minimization~\citep{Vapnik1992Principles} in statistical learning theory. However, there data is assumed to be sampled independently and identically distributed. This is not the case in BO, where new data is generated actively based on past information. \paragraph{Our contribution} In this paper, we present~Adaptive \textsc{GP-UCB}~ (\textsc{A-GP-UCB}), the first algorithm that provably converges to the globally optimal inputs when BO hyperparameters are \emph{unknown}. Our method expands the function class encoded in the model over time, but does so slowly enough to ensure sublinear regret and convergence to the optimum. Based on the theoretical insights, we propose practical variants of the algorithm with guaranteed convergence. Since our method can be used as an add-on module to existing algorithms with hyperparameter estimation, it achieves similar performance empirically, but avoids local optima when hyperparameters are misspecified. In summary, we: \begin{itemize} \item Provide theoretical convergence guarantees for BO with unknown hyperparameters; \item Propose several practical algorithms based on the theoretical insights; \item Evaluate the performance in practice and show that our method retains the empirical performance of heuristic methods based on online hyperparameter estimation, but leads to significantly improved performance when the model is misspecified initially. \end{itemize} The remainder of the paper is structured as follows. We state the problem in~\cref{sec:problem_statement} and provide relevant background material in~\cref{sec:background}. We derive our main theoretical result in~\cref{sec:theory} and use insights gained from the theory to propose practical algorithms. We evaluate these algorithms experimentally in~\cref{sec:experiments} and draw conclusions in~\cref{sec:conclusion}. The technical details of the proofs are given in the appendix. \section{Conclusion and Future Work} \label{sec:conclusion} We introduced~\textsc{A-GP-UCB}, a BO algorithm that is provably no-regret when hyperparameters are unknown. Our method adapts the hyperparameters online, which causes the underlying BO algorithm to consider larger function spaces over time. Eventually, the function space is large enough to contain the true function, so that our algorithm provably converges. We evaluated our method on several benchmark problems, confirming that, on the one hand, it provably converges even in cases where standard BO algorithms get stuck in local optima, and, on the other hand, enjoys competitive performance as standard BO algorithms that do not have theoretical guarantees in this setting. The main idea behind our analysis is that adapting the hyperparameters increases the cumulative regret bound, but we do so slowly enough to converge eventually. This idea is fairly general and could also be applied to other no-regret algorithms. Another potential future direction is to investigate alternative strategies to select the scaling factors~$b(t)$ and~$g(t)$ and consider adapting other parameters such as the kernel structure.
1,108,101,565,817
arxiv
\section{#1}} \newcommand{\newsubsection}[1]{\setcounter{equation}{0} \setcounter{dfn}{0} \subsection{#1}} \renewcommand{\theequation}{\thesection.\arabic{equation}} \newtheorem{dfn}{Definition}[section] \newtheorem{thm}[dfn]{Theorem} \newtheorem{lmma}[dfn]{Lemma} \newtheorem{ppsn}[dfn]{Proposition} \newtheorem{crlre}[dfn]{Corollary} \newtheorem{xmpl}[dfn]{Example} \newtheorem{rmrk}[dfn]{Remark} \newtheorem{xrcs}{Exercise}[section] \newcommand{\begin{dfn}\rm}{\begin{dfn}\rm} \newcommand{\begin{thm}}{\begin{thm}} \newcommand{\begin{lmma}}{\begin{lmma}} \newcommand{\begin{ppsn}}{\begin{ppsn}} \newcommand{\begin{crlre}}{\begin{crlre}} \newcommand{\begin{xmpl}}{\begin{xmpl}} \newcommand{\begin{rmrk}\rm}{\begin{rmrk}\rm} \newcommand{\end{dfn}}{\end{dfn}} \newcommand{\end{thm}}{\end{thm}} \newcommand{\end{lmma}}{\end{lmma}} \newcommand{\end{ppsn}}{\end{ppsn}} \newcommand{\end{crlre}}{\end{crlre}} \newcommand{\end{xmpl}}{\end{xmpl}} \newcommand{\end{rmrk}}{\end{rmrk}} \newcommand{\mathbb{B}}{\mathbb{B}} \newcommand{\mathbb{C}}{\mathbb{C}} \newcommand{\mathbb{Z}}{\mathbb{Z}} \newcommand{\mathbb{M}}{\mathbb{M}} \newcommand{\mathbb{N}}{\mathbb{N}} \newcommand{\mathbb{R}}{\mathbb{R}} \newcommand{\mathbb{Q}}{\mathbb{Q}} \newcommand{\mathbb{T}}{\mathbb{T}} \newcommand{\mathbb{O}}{\mathbb{O}} \newcommand{\mathfrak{S}}{\mathfrak{S}} \newcommand{\mathscr{B}}{\mathscr{B}} \newcommand{\mathscr{C}}{\mathscr{C}} \newcommand{\mathscr{F}}{\mathscr{F}} \newcommand{\mathscr{G}}{\mathscr{G}} \newcommand{\mathscr{R}}{\mathscr{R}} \newcommand{\mathscr{S}}{\mathscr{S}} \newcommand{\mathscr{T}}{\mathscr{T}} \newcommand{\widetilde{\alpha}}{\widetilde{\alpha}} \newcommand{\epsilon}{\epsilon} \newcommand{\Lambda}{\Lambda} \newcommand{\mathcal{A}}{\mathcal{A}} \newcommand{\mathcal{B}}{\mathcal{B}} \newcommand{\mathcal{E}}{\mathcal{E}} \newcommand{\mathcal{F}}{\mathcal{F}} \newcommand{\mathcal{H}}{\mathcal{H}} \newcommand{\mathcal{I}}{\mathcal{I}} \newcommand{\mathcal{K}}{\mathcal{K}} \newcommand{\mathcal{L}}{\mathcal{L}} \newcommand{\mathcal{P}}{\mathcal{P}} \newcommand{\mathcal{Q}}{\mathcal{Q}} \newcommand{\mathcal{S}}{\mathcal{S}} \newcommand{\mathcal{U}}{\mathcal{U}} \newcommand{\mathcal{G}}{\mathcal{G}} \newcommand{\widehat{\alpha}}{\widehat{\alpha}} \newcommand{\widehat{\widetilde{\alpha}}}{\widehat{\widetilde{\alpha}}} \newcommand{\widehat{\gamma}}{\widehat{\gamma}} \newcommand{\widetilde{\alpha}}{\widetilde{\alpha}} \newcommand{\widetilde{\widetilde{\alpha}}}{\widetilde{\widetilde{\alpha}}} \newcommand{\raisebox{.4ex}{\ensuremath{\chi}}}{\raisebox{.4ex}{\ensuremath{\chi}}} \newcommand{\noindent{\it Proof\/}: }{\noindent{\it Proof\/}: } \newcommand{\subseteq}{\subseteq} \newcommand{{1\!\!1}}{{1\!\!1}} \newcommand{\nonumber}{\nonumber} \newcommand{\mbox{id}}{\mbox{id}} \newcommand{\textsl{path}\ }{\textsl{path}\ } \newcommand{\textsl{paths}\ }{\textsl{paths}\ } \newcommand{\textsl{move}\ }{\textsl{move}\ } \newcommand{\textsl{moves}\ }{\textsl{moves}\ } \newcommand{\noindent}{\noindent} \newcommand {\CC}{\centerline} \def \qed { \mbox{}\hfill $\Box$\vspace{1ex}} \makeatletter \let\@wraptoccontribs\wraptoccontribs \makeatother \newcommand{\frac{1}{2}}{\frac{1}{2}} \newcommand{\hat{\cla}}{\hat{\mathcal{A}}} \newcommand{\widehat{G}}{\widehat{G}} \newcommand{\mbox{ker\,}}{\mbox{ker\,}} \newcommand{\mbox{ran\,}}{\mbox{ran\,}} \newcommand{\tilde{S}}{\tilde{S}} \newcommand{\hat{T}}{\hat{T}} \newcommand{\mbox{\textit{Trace}\,}}{\mbox{\textit{Trace}\,}} \author{ S. Sundar} \title{Representations of the weak Weyl commutation relation} \newcommand{\insfig}[2]{ \begin{figure}[hbpt] \centerline{\input{#1}} \caption{#2\label{f-#1}} \end{figure} } \begin{document} \maketitle \begin{abstract} Let $G$ be a locally compact abelian group with Pontraygin dual $\widehat{G}$. Suppose $P$ is a closed subsemigroup of $G$ containing the identity element $0$. We assume that $P$ has dense interior and $P$ generates $G$. Let $U:=\{U_{\chi}\}_{\chi \in \widehat{G}}$ be a strongly continuous group of unitaries and let $V:=\{V_{a}\}_{a \in P}$ be a strongly continuous semigroup of isometries. We call $(U,V)$ a weak Weyl pair if \[ U_{\chi}V_{a}=\chi(a)V_{a}U_{\chi}\] for every $\chi \in \widehat{G}$ and for every $a \in P$. We work out the representation theory (the factorial and the irreducible representations) of the above commutation relation under the assumption that $\{V_{a}V_{a}^{*}:a \in P\}$ is a commuting family of projections. Not only does this generalise the results of \cite{Bracci1} and \cite{Bracci2}, our proof brings out the Morita equivalence that lies behind the results. For $P=\mathbb{R}_{+}^{2}$, we demonstrate that if we drop the commutativity assumption on the range projections, then the representation theory of the weak Weyl commutation relation becomes very complicated. \end{abstract} \noindent {\bf AMS Classification No. :} {Primary 46L05 ; Secondary 81S05.} \\ {\textbf{Keywords :}} Weak Weyl relations, Semigroups of isometries, Morita equivalence. \section{Introduction} The classical Stone-von Neumann theorem that asserts the uniqueness of the Weyl commutation relation \[ U_sV_t=e^{its}V_tU_s\] where $\{U_s\}_{s \geq 0}$ and $\{V_t\}_{t \geq 0}$ are strongly continuous $1$-parameter group of unitaries is a fundamental theorem in both quantum mechanics and in operator algebras. In \cite{Bracci1} and in \cite{Bracci2}, a weaker version of the above commutation relation is considered where it is assumed that $\{V_t\}_{t \geq 0}$ is only a semigroup of isometries. The representation theory (the factorial representations and the irreducible representations) of such relations was worked out by Bracci and Picasso in \cite{Bracci1} and in \cite{Bracci2}. Bracci and Picasso considers such a weak form of the commutation relation as the quantisation postulate for systems whose configuration space is semibounded like the half-line. This is because, on the half-line, though the position operator generates a group of unitaries, the momentum operator (which is not self-adjoint) generates only a semigroup of isometries. The purpose of this paper is twofold. First, we would like to bring out the $C^{*}$-algebraic reason for the validity of the results in \cite{Bracci1} and in \cite{Bracci2}. Secondly, we wish to extend slightly the resuts to systems with $d$ degrees of freedom where $d \geq 2$. We work in the more general setting of subsemigroups of locally compact abelian groups. It is well known (\cite{Rosenberg}, \cite{Williams_Dana}) that the $C^{*}$-algebra that encodes the usual Weyl commutation relation is Morita equivalent to $\mathbb{C}$. Stone-von Neumann theorem is then an immediate consequence of this Morita equivalence. We establish a similar reasoning here. We prove that the $C^{*}$-algebra that encodes the weak version of the Weyl commutation relation considered in this paper is Morita equivalent to a commutative $C^{*}$-algebra. Thus, it follows at once that every factorial representation is a multiple of an irreducible representation and the irreducible representations are parameterised by the character space of the underlying commutative $C^{*}$-algebra. Moreover, in the one-dimensional case the commutative $C^{*}$-algebra is $C_{0}((-\infty,\infty])$. This provides a conceptual explanation for the results obtained in \cite{Bracci1} and in \cite{Bracci2}. The proof is based on the results obtained in \cite{Sundar_Ore} where a certain ``universal dynamical system" was constructed encoding all semigroups of isometries with commuting range projections. Strictly speaking, only half of the last statement was proved in \cite{Sundar_Ore} (see Remark \ref{Remark}) and we prove the other half in this paper modulo Morita equivalence. The results obtained are next explained. Let $G$ be a locally compact, second countable, Hausdorff abelian group. We denote the dual group of $G$ by $\widehat{G}$. We use additive notation for the group operations. Let $P \subset G$ be a closed subsemigroup containing $0$ such that $P-P=G$. Set $\Omega:=Int(P)$. We assume that $\Omega$ is dense in $P$. For $x,y \in G$, we write $x \leq y$ if $y-x \in P$ and $x<y$ if $y-x \in \Omega$ Let $U:=\{U_\chi\}_{\chi \in \widehat{G}}$ be a strongly continuous group of unitaries and let $V:=\{V_{a}\}_{a \in P}$ be a strongly continuous semigroup of isometries. We call $(U,V)$ a weak Weyl pair if for every $\chi \in \widehat{G}$ and for every $a \in P$, \[ U_\chi V_a=\chi(a)V_aU_\chi.\] Let $(U,V)$ be a weak Weyl pair. For $a \in P$, let $E_a:=V_aV_{a}^{*}$. We say that $(U,V)$ has commuting range projections if $\{E_{a}:a \in P\}$ is a commuting family of projections. Note that if $P=[0,\infty)$ or, more generally, if the preorder $\leq$ is a total order, every weak Weyl pair has commuting range projections. Examples of weak Weyl pairs with commuting range projections are given below. Let $A$ be a non-empty closed subset of $G$ which is $P$-invariant, i.e. $A+P \subset A$. Such subsets will be called $P$-spaces. Let $K$ be a Hilbert space whose dimension we denote by $k$. Consider the Hilbert space $H:=L^{2}(A,K)$. For $\chi \in \widehat{G}$, let $U_\chi$ be the unitary on $H$ defined by \[ U_{\chi}f(y)=\chi(y)f(y).\] Then, $U:=\{U_\chi\}_{\chi \in \widehat{G}}$ is a strongly continuous group of unitaries on $H$. For $a \in P$, let $V_{a}$ be the isometry on $H$ defined by \begin{equation*} \label{isometries} V_{a}(f)(y):=\begin{cases} f(y-a) & \mbox{ if } y-a \in A,\cr &\cr 0 & \mbox{ if } y-a \notin A. \end{cases} \end{equation*} Then, $V=\{V_a\}_{a \in P}$ is a strongly continuous semigroup of isometries on $H$. It is clear that $V$ has commuting range projections. It is routine to verify that $(U,V)$ is a weak Weyl pair. We call $(U,V)$ the weak Weyl pair associated to the $P$-space $A$ with multiplicity $k$. If we want to stress the dependence of $(U,V)$ on $A$ and $k$, we denote $(U,V)$ by $(U^{(A,k)},V^{(A,k)})$. The main theorem of this paper is stated below. \begin{thm} \label{main theorem} We have the following. \begin{enumerate} \item[(1)] Let $A$ be a $P$-space and let $k \in \{1,2,\cdots\} \cup \{\infty\}$ be given. The weak Weyl pair $(U^{(A,k)},V^{(A,k)})$ is a factorial representation. Moreover, it is irreducible if and only if $k=1$. \item[(2)] Let $A,B$ be $P$-spaces and let $k,\ell \in \{1,2,\cdots \} \cup \{\infty\}$ be given. The weak Weyl pair $(U^{(A,k)},V^{(A,k)})$ is unitarily equivalent to $(U^{(B,\ell)},V^{(B,\ell)})$ if and only if $A=B$ and $k=\ell$. \item[(3)] Suppose $(U,V)$ is a weak Weyl pair with commuting range projections. Assume that the von Neumann algebra generated by $\{U_{\chi},V_a: \chi \in \widehat{G},a \in P\}$ is a factor. Then, there exists a $P$-space $A$ and $k \in \{1,2,\cdots\} \cup \{\infty\}$ such that $(U,V)$ is unitarily equivalent to $(U^{(A,k)},V^{(A,k)})$. Thus, for weak Weyl pairs with commuting range projections, factorial representations are completely reducible. Moreover, irreducible weak Weyl pairs with commuting range projections are precisely those associated to $P$-spaces with multiplicity $1$. \end{enumerate} \end{thm} For $P=[0,\infty)$, as already mentioned, every weak Weyl pair has commuting range projections. Also, every $P$-space is either $\mathbb{R}$ or of the form $[a,\infty)$ for a unique $a \in \mathbb{R}$. It is now clear that the results of \cite{Bracci1} and \cite{Bracci2} for the semibounded case follow from Thm. \ref{main theorem}. Morever, for irreducible weak Weyl pairs with commuting range projections, we have the following uniqueness result. We need a bit of notation. Let $U:=\{U_{\chi}\}_{\chi \in \widehat{G}}$ be a strongly continuous group of unitaries on a Hilbert space $H$. Then, $U$ determines a represenation $\pi_U$ of $ C_0(G) \cong C^{*}(\widehat{G})$ on $H$. We denote the unique closed subset of $G$ that corresponds to the ideal $Ker(\pi_U)$ by $Spec(U)$. \begin{crlre} \label{uniqueness} Let $(U,V)$ and $(\widetilde{U},\widetilde{V})$ be irreducible weak Weyl pairs with commuting range projections. Assume that $(U,V)$ acts on $H$ and $(\widetilde{U},\widetilde{V})$ acts on $\widetilde{H}$. Suppose $Spec(U)=Spec(\widetilde{U})$. Then, there exists a unitary $X:H \to \widetilde{H}$ such that for $\chi \in \widehat{G}$ and $a \in P$, \[ XU_{\chi}X^{*}=\widetilde{U}_{\chi}~~;~~~XV_{a}X^{*}=\widetilde{V}_{a}.\] \end{crlre} What about weak Weyl pairs which do not have commuting range projections ? For $P=\mathbb{R}_{+}^{2}$, we demonstrate that working out the irreducible weak Weyl pairs is a complicated task. We explain a procedure (preserving factoriality, type and irreducibility) that allows us to build weak Weyl pairs starting from a representation of the free product $c_0(\mathbb{N})\ast c_0(\mathbb{N})$. We also prove that Corollary \ref{uniqueness} no longer stays true if we relax the commutativity assumption on the range projections. We end this introduction by mentioning that weak Weyl relations, for one degree of freedom, in the unbounded picture were analysed extensively in the literature. Some of the important papers that deal with the unbounded version are \cite{Konrad}, \cite{Konrad1}, \cite{Jorgensen_Muhly}, \cite{Arai2}, \cite{Arai3}, and \cite{Arai1}. We do not touch the unbounded version here. The author is of the belief that the $C^{*}$-algebra machinery may not be sufficient to handle domain issues. All the Hilbert spaces considered in this paper are assumed to be separable. Moreover, we use the convention that the inner product is linear in the first variable. \section{The equivalence between $Isom_c(P)$ and $Rep(C_0(Y_u)\rtimes G)$} For the rest of this paper, $G$ stands for an arbitrary but a fixed locally compact, second countable, Hausdorff abelian group. The letter $P$ stands for a closed subsemigroup of $G$ containing the identity element $0$. We assume that $\Omega:=Int(P)$ is dense in $P$. We also assume $P-P=G$. We first review, from \cite{Sundar_Ore}, the construction of the universal dynamical system that encodes the isometric representations of $P$ with commuting range projections. Let $\mathcal{C}(G)$ be the set of closed subsets of $G$ endowed with the Fell topology. Let \[ Y_u:=\{A \in \mathcal{C}(G): A \neq \emptyset, -P+A \subset A\}.\] Endow $Y_u$ with the subspace topology inherited from the Fell topology on $\mathcal{C}(G)$. Then, $Y_u$ is a locally compact, second countable, Hausdorff space. Moreover, the map \[ Y_u \times G \ni (A,x) \to A+x \in Y_u\] defines an action of $G$ on $Y_u$. Set \[ X_u:=\{A \in Y_u: -P \subset A\}=\{A \in Y_u:0 \in A\}.\] Then, $X_u$ is a compact subset of $Y_u$. Clearly, $X_u+P \subset X_u$. If $(s_n)$ is a cofinal sequence in $\Omega$, then \[Y_u=\bigcup_{n \geq 1}(X_u-s_n).\] \textbf{Notation:} For $f \in C_{c}(G)$, let $\widetilde{f}:Y_u \to \mathbb{C}$ be defined by \begin{equation} \label{generating set} \widetilde{f}(A):=\int f(x)1_{A}(x)dx.\end{equation} Then, $\widetilde{f} \in C_{c}(Y_u)$. Moreover, $\{\widetilde{f}:f \in C_{c}(G)\}$ generates $C_0(Y_u)$. For $f \in L^1(G)$, we define $\widetilde{f} \in C_0(Y_u)$ exactly as in Eq. \ref{generating set}. For the proof of the above assertions, we refer the reader to \cite{Hilgert_Neeb} and \cite{Sundar_Ore}. The reader is also recommended to consult Section 5 of \cite{Piyasa_Sundar}. \begin{lmma} \label{embedding} The map $Y_u \ni A \to 1_{A} \in L^{\infty}(G)$ is a topological embedding. Here, $L^{\infty}(G)$ is identified with $L^{1}(G)^{*}$ and $L^{\infty}(G)$ is endowed with the weak $^*$-topology. \end{lmma} \textit{Proof.} The fact that $Y_u \ni A \to 1_{A} \in L^{\infty}(G)$ is a continuous injection follows from Prop. II.13\footnote{In \cite{Hilgert_Neeb}, it is assumed that $P$ is a Lie semigroup. However, the proof of Prop. II.13 given in \cite{Hilgert_Neeb} works for subsemigroups (with dense interior) of locally compact abelian groups.} of \cite{Hilgert_Neeb}. Suppose $(A_n)$ is a sequence in $Y_u$ and let $A \in Y_u$. Suppose $1_{A_n} \to 1_{A}$ in $L^{\infty}(G)$. Then, $\widetilde{f}(A_n) \to \widetilde{f}(A)$ for every $f \in C_{c}(G)$. Since $\{\widetilde{f}:f \in C_{c}(G)\}$ generates $C_0(Y_u)$, it follows that $A_n \to A$ in $Y_u$. Hence the proof. \hfill $\Box$ \begin{rmrk} \label{Remark} For a strongly continuous semigroup of isometries $V:=\{V_{a}\}_{a \in P}$, we say $V$ has commuting range projections if $\{V_{a}V_{a}^{*}:a \in P\}$ is a commuting family of projections. We also call a strongly continuous semigroup of isometries indexed by $P$ an isometric representation of $P$. Let $Isom_c(P)$ be the collection (up to unitary equivalence) of isometric representations of $P$ with commuting range projections. Let $\mathcal{G}_u:=X_u \rtimes P$ be the Deaconu-Renault groupoid considered in \cite{Sundar_Ore}. Let $Rep(C^*(\mathcal{G}_u))$ be the collection (up to unitary equivalence) of non-degenerate representations of $C^{*}(\mathcal{G}_u)$. It follows from Theorem 7.4 of \cite{Sundar_Ore} that there exists an injective map \[ Isom_c(P) \ni V \to \pi_V \in Rep(C^{*}(\mathcal{G}_u)).\] However, it was not proved in \cite{Sundar_Ore} that the above map is surjective. More precisely, the inverse map was not constructed in \cite{Sundar_Ore}. In this paper, we correct this deficiency modulo Morita equivalence by passing to the transformation groupoid $Y_u \rtimes G$ which is equivalent to $\mathcal{G}_u$. We show in this paper how to construct maps $\Phi:Rep(C_0(Y_u)\rtimes G) \to Isom_c(P)$ and $\Psi:Isom_c(P) \to Rep(C_0(Y_u)\rtimes G)$ which are inverses of each other. To avoid repetition, we will be economical with details and omit proofs that require minor modifications of the arguments presented in \cite{Sundar_Ore}. The reason for preferring the transformation groupoid $Y_u\rtimes G$ over the Deaconu-Renault groupoid $X_u \rtimes P$ is that Takai duality is readily available for the transformation groupoid $Y_u \rtimes G$. \end{rmrk} Let $(\pi,W)$ be a covariant representation of the dynamical system $(C_0(Y_u),G)$ on a separable Hilbert space $K$. Denote the algebra of bounded Borel functions on $Y_u$ by $B(Y_u)$. For $\phi \in B(Y_u)$ and $x \in G$, let $L_x\phi \in B(Y_u)$ be defined by $L_x(\phi)(A)=\phi(A-x)$. Denote the extension of $\pi$ to $B(Y_u)$, obtained via the measurable functional calculus, by $\pi$ itself. Then, we have the following covariance relation: for $\phi \in B(Y_u)$ and $x \in G$, \[ W_x\pi(\phi)W_x^{*}=\pi(L_x(\phi)).\] Set $H:=\pi(1_{X_u})K$. Since $X_u+P \subset X_u$, it follows that the subspace $H$ is invariant under $\{W_a:a \in P\}$. For $a \in P$, let $V_{a}$ be the operator on $H$ defined by $V_{a}:=W_{a}|_{H}$. Then, $V:=\{V_{a}\}_{a \in P}$ is a strongly continuous semigroup of isometries. Moreover, the collection $\{V_{a}V_{a}^{*}:a \in P\}$ is a commuting family of projections on $H$. If we want to stress the dependence of $V$ on $(\pi,W)$, we denote $V$ by $V^{(\pi,W)}$. Thus, we get a map denoted $\Phi$ \[ Rep(C_0(Y_u) \rtimes G) \ni (\pi,W) \to V^{(\pi,W)} \in Isom_c(P).\] We next explain how to construct the inverse of the map $\Phi$. Let $V:=\{V_{a}\}_{a \in P}$ be a strongly continuous semigroup of isometries acting on a Hilbert space $H$ with commuting range projections. Let $(W,K)$ be the minimal unitary dilation of $V$. This means the following. \begin{enumerate} \item[(1)] The Hilbert space $K$ contains $H$ as a closed subspace. \item[(2)] $W=\{W_x\}_{x \in G}$ is a strongly continuous group of unitaries on $K$. \item[(3)] For $a \in P$ and $\xi \in H$, $W_a\xi=V_a\xi$. \item[(4)] The union $\bigcup_{a \in P}W_{a}^{*}H$ is dense in $K$. \end{enumerate} For $x \in G$, let $E_x$ be the projection onto the subspace $W_xH$. Observe the following. \begin{enumerate} \item[(1)] For $x, y\in G$, $W_xE_yW_{x}^{*}=E_{x+y}$. \item[(2)] Since $W_aH \subset H$ for $a \in P$, it follows that for $x,y \in G$ with $x \leq y$, $E_{x} \geq E_y$. \item[(3)] Let $x, y \in G$ be given. Then, $E_xE_y=E_yE_x$. If $x,y \in P$, this follows from the fact that $V$ has commuting range projections. Note that $\displaystyle \bigcup_{c \in \Omega}(\Omega-c)=\Omega-\Omega=G$. Making use of the fact that $(\Omega-c_1)\cup (\Omega-c_2) \subset \Omega-(c_1+c_2)$ for $c_1,c_2 \in \Omega$, choose $a,b,c \in \Omega$ such that $x=a-c$ and $y=b-c$. Then, by $(1)$, $E_{x}=W_{c}^{*}E_{a}W_{c}$ and $E_{y}=W_{c}^{*}E_{b}W_{c}$. Since $E_a$ and $E_b$ commute, $E_x$ and $E_y$ commute. \item[(4)] The map $G \ni x \to E_{x}=W_{x}E_0W_{x}^{*} \in B(K)$ is strongly continuous. \end{enumerate} Let $\mathcal{D}$ be the $C^{*}$-subalgebra of $B(K)$ generated by $\Big\{ \int f(x)E_xdx: f \in L^1(G)\Big\}$. Let $\chi$ be a character of $\mathcal{D}$. Arguing exactly as in the proof of Prop. 4.3 of \cite{Sundar_Ore} and as in the proof of Prop. 4.6 of \cite{Sundar_Ore}, we see that there exists a unique element in $Y_u$, denoted $A_{\chi}$, such that \[ \chi\Big(\int f(x)E_xdx\Big)=\int f(x)1_{A_\chi}(x)dx\] for every $f \in L^{1}(G)$. It follows from the above equality and Lemma \ref{embedding} that the map $\widehat{\mathcal{D}} \ni \chi \to A_{\chi} \in Y_u$ is a topological embedding. Via this embedding, we view $\widehat{\mathcal{D}}$ as a subspace of $Y_u$. \begin{lmma} The subset $\widehat{\mathcal{D}}$ is a closed subset of $Y_u$. \end{lmma} \textit{Proof.} Let $(\chi_n)$ be a sequence in $\widehat{\mathcal{D}}$ such that $A_{\chi_n} \to A$ for some $A \in Y_u$. Passing to a subsequence, if necessary, we can assume that $(\chi_n) \to \chi$ for some element in $\mathcal{D}^{*}$ where $\mathcal{D}^{*}$, the dual of $\mathcal{D}$, is given the weak $^*$-topology. We claim that $\chi \neq 0$. Choose $f \in C_{c}(G)$ such that $\int f(x)1_{A}(x)dx=1$. Then, \begin{align*} \chi\big(\int f(x)E_xdx\big)&=\lim_{n \to \infty}\chi_{n}\big(\int f(x)E_xdx \big)\\&=\lim_{n \to \infty}\int f(x)1_{A_{\chi_n}}(x)dx\\& = \int f(x)1_{A}(x)dx\\&=1.\end{align*} Thus, $\chi$ is non-zero. This proves the claim. Since $\chi \neq 0$, $\chi$ is a character of $\mathcal{D}$. The fact that $\chi$ is a character and the continuity of the map $\widehat{\mathcal{D}} \ni \psi \to A_{\psi} \in Y_u$ implies that $A_{\chi}=\lim_{n \to \infty}A_{\chi_n}=A$. Hence, $\widehat{\mathcal{D}}$ is a closed subset of $Y_u$. Hence the proof. \hfill $\Box$ Let $Res:C_{0}(Y_u) \to C_{0}(\widehat{\mathcal{D}})$ be the restriction map and let $G:C_{0}(\widehat{\mathcal{D}}) \to \mathcal{D}$ be the inverse of the Gelfand transform. Define $\pi:C_{0}(Y_u) \to \mathcal{D} \subset B(K)$ by \[ \pi(\phi)=G\circ Res(\phi).\] Note that $\pi$ is the unique $^*$-homomorphism such that \begin{equation} \label{defining equality} \pi(\widetilde{f})=\int f(x)E_xdx \end{equation} for $f \in L^1(G)$. Using Eq. \ref{defining equality}, the equality $W_xE_yW_{x}^{*}=E_{x+y}$ and the fact that $\{\widetilde{f}: f \in C_{c}(G)\}$ generates $C_0(Y_u)$, it is routine to verify that $(\pi,W)$ is a covariant representation of the dynamical system $(C_0(Y_u),G)$. We denote the extension of $\pi$ to $B(Y_u)$ obtained via the Borel functional calculus by $\pi$ itself. Then, \[ W_x\pi(\phi)W_{x}^{*}=\pi(L_x(\phi))\] for $x \in G$ and $\phi \in B(Y_u)$. We record below a few elementary properties of the representation $\pi$. \begin{lmma} \label{projection} For $x \in G$, $\pi(1_{X_u+x})=E_x$. In particular, $\pi(1_{X_u})$ is the orthogonal projection onto $H$. \end{lmma} \textit{Proof.} Thanks to the covariance relation, it suffices to prove that $\pi(1_{X_u})=E_0$. Let $(O_n)$ be a decreasing sequence of open sets with compact closure such that $\{O_n: n\geq 1\}$ forms a neighbourhood base at $0$. Set $E_n:=O_n \cap -P$ and let $f_n:=\frac{1}{\mu(E_n)}1_{E_n}$. Here, $\mu$ is the Haar measure on $G$. We leave it to the reader to verify that $\widetilde{f_n} \to 1_{X_u}$ pointwise. Moreover, $\widetilde{f_n}$ is uniformly bounded. Thus, \begin{align*} \pi(1_{X_u})&=\lim_{n \to \infty}\pi(\widetilde{f_n})\\ &=\lim_{n \to \infty}\int f_n(x)E_xdx\\ &=E_0. \end{align*} In the above chain of equalities, the limit is to be understood in the strong operator topology. Hence the proof. \hfill $\Box$ \begin{ppsn} The representation $\pi$ is non-degenerate. \end{ppsn} \textit{Proof.} Let $\xi \in K$ be such that $\langle \pi(\phi)\eta|\xi\rangle=0$ for every $\phi \in C_0(Y_u)$ and for every $\eta \in K$. Taking $\phi=\widetilde{f}$ for $f \in C_c(G)$, we see that \[ \langle \pi(\widetilde{f})\eta|\xi\rangle=\int f(x)\langle E_x\eta|\xi\rangle dx=0.\] As the above equality happens for every $f \in C_{c}(G)$ and the map $G \ni x \to E_x \in B(K)$ is strongly continuous, we deduce that for $x \in G$ and $\eta \in K$, $\langle E_x\eta|\xi\rangle=0$. Thus, $\xi$ is orthogonal to $W_{a}^{*}H$ for every $a \in P$. But the union $\bigcup_{a \in P}W_{a}^{*}H$ is dense in $K$. Therefore, $\xi=0$. This completes the proof. \hfill $\Box$ To denote the dependence of $(\pi,W)$ on $V$, we denote $(\pi,W)$ by $(\pi^V,W^V)$. This way, we obtain a map, denoted $\Psi$, \[ Isom_{c}(P) \ni V \to (\pi^V,W^V) \in Rep(C_0(Y_u)\rtimes G).\] \begin{thm} \label{equivalence} The map \[\Phi:Rep(C_0(Y_u) \rtimes G) \ni (\pi,W) \to V^{(\pi,W)} \in Isom_{c}(P)\] and the map \[\Psi:Isom_{c}(P) \ni V \to (\pi^V,W^V) \in Rep(C_0(Y_u)\rtimes G)\] are inverses of each other. \end{thm} \textit{Proof.} Let $(\pi,W) \in Rep(C_0(Y_u)\rtimes G)$ be given. Suppose that $(\pi,W)$ acts on $K$. Set $V:=V^{(\pi,W)}$. We need to show that $(\pi^V,W^V)=(\pi,W)$. First, we claim that $W$ is the minimal unitary dilation of $V$. Recall that $H=\pi(1_{X_u})K$ and $V=\{V_{a}\}_{a \in P}$ is the restriction of $\{W_{a}\}_{a \in P}$ onto $H$. Thus, $W$ is a dilation of $V$. It is enough to show that $\bigcup_{a \in P}W_{a}^{*}H$ is dense in $K$. Let $(s_n)$ be an increasing cofinal sequence in $\Omega$. Observe that $1_{X_u-s_n} \nearrow 1_{Y_u}$. Thus, $\pi(1_{X_u-s_n})=W_{s_n}^{*}\pi(1_{X_u})W_{s_n} \nearrow 1$ strongly. Clearly, $\pi(1_{X_u-{s_n}})$ is the orthogonal projection onto $W_{s_n}^{*}H$. Thus, $\bigcup_{n \geq 1}W_{s_n}^{*}H$ is dense in $K$. This proves that $W$ is the minimal unitary dilation of $V$. Thus, $W^{V}=W$. Next, we show that $\pi=\pi^V$. By definition, $\pi(1_{X_u+x})$ is the orthogonal projection onto $W_xH$ and by Lemma \ref{projection}, $\pi^V(1_{X_u+x})=E_x$ which is the orthogonal projection onto $W_xH$. Thus, \begin{equation} \label{basic} \pi(1_{X_u+x})=\pi^V(1_{X_u+x}) \end{equation} for every $x \in G$. For a compact subset $F$ of $G$ and for an open subset $O$ of $G$, define \begin{align*} \mathcal{U}_{F}:&=\{A \in Y_u: A \cap F=\emptyset\},\\ \mathcal{U}_{O}:&=\{A \in Y_u: A \cap O\neq \emptyset\},\\ \mathcal{U}^{'}_{O}:&=\{A \in Y_u: A \cap O=\emptyset\}. \end{align*} The sets $U_{F} \cap \mathcal{U}_{O_1} \cap \mathcal{U}_{O_2} \cap \cdots \mathcal{U}_{O_n}$, as $F$ and $O_i$'s vary, form a basis for the Fell topology on $Y_u$. Thus, it suffices to show that for every compact set $F$ and for every open set $O$, $\pi(1_{\mathcal{U}_F})=\pi^V(1_{\mathcal{U}_F})$ and $\pi(1_{\mathcal{U}_{O}})=\pi^V(1_{\mathcal{U}_O})$. Fix an open set $O$ of $G$. Let $D:=\{x_1,x_2,\cdots\}$ be a dense subset of $O$. Let $A \in Y_u$ be given. Observe $A+\Omega=\bigcup_{a \in A}(a+\Omega)$ is an open set contained in $A$. Thus, $A+\Omega \subset Int(A)$. Since $0 \in \overline{\Omega}$, $Int(A)$ is dense in $A$. Thus, for $A \in Y_u$, $A \cap O \neq \emptyset$ if and only if $Int(A) \cap O\neq \emptyset$ if and only if $A \cap D\neq \emptyset$. Therefore, for $A \in Y_u$, \[ 1_{\mathcal{U}_O}(A)=\sup_{n \geq 1}1_{A}(x_n)=\sup_{n \geq 1}1_{X_u+x_n}(A).\] By Eq. \ref{basic} and by Borel functional calculus, we obtain $\pi(1_{\mathcal{U}_O})=\pi^V(1_{\mathcal{U}_O})$. Since $\mathcal{U}^{'}_{O}$ is the complement of $\mathcal{U}_{O}$, it follows that \begin{equation} \label{basic1} \pi(1_{\mathcal{U}^{'}_{O}})=\pi^{V}(1_{\mathcal{U}^{'}_{O}}) \end{equation} for every open set $O$ of $G$. Let $F$ be a compact subset of $G$. Choose a decreasing sequence of open sets $(O_n)$ such that $\{O_n: n \geq 1\}$ forms a base at $F$. This means that if $O$ is an open set that contains $F$, then $O_n \subset O$ eventually. Note that for a closed subset $A$ of $G$, $A \cap F=\emptyset$ if and only if $A \cap O_n=\emptyset$ eventually. Thus, \[ 1_{\mathcal{U}_F}=\limsup_{n \to \infty}1_{\mathcal{U}^{'}_{O_n}}.\] By Borel functional calculus and by Eq. \ref{basic1}, we have $\pi(1_{\mathcal{U}_F})=\pi^V(1_{\mathcal{U}_F})$. Hence, $\pi=\pi^V$. This completes the proof of the assertion $\Psi\circ \Phi=Id$. Let $V \in Isom_{c}(P)$ be given. Suppose that $V$ acts on $H$. Set $(\pi,W)=(\pi^V,W^V)$ and let $K$ be the Hilbert space on which $(\pi,W)$ acts. Let $\widetilde{V}=V^{(\pi,W)}$. For $a \in P$, $V_{a}$ is the restriction of $W_{a}$ to $H$ and $\widetilde{V}_a$ is the restriction of $W_{a}$ to $\pi(1_{X_u})H$. By Lemma \ref{projection}, we have $\pi(1_{X_u})H=H$. Consequently, $\widetilde{V}=V$. Hence $\Phi \circ \Psi=Id$. The proof is now complete. \hfill $\Box$ \section{Proof of the main theorem} With Thm. \ref{equivalence} in hand, the conceptual explanation for Thm. \ref{main theorem} is quite simple. Having a unitary group, indexed by $\widehat{G}$, implementing the Weyl commutation relation is equivalent to having a unitary group implementing the dual action on $C_0(Y_u)\rtimes G$. Then, Thm. \ref{main theorem} is a straightforward consequence of Takai duality. We explain some details below. Let $\mathcal{W}_c(P,\widehat{G})$ denote the collection (up to unitary equivalence) of weak Weyl pairs with commuting range projections. Consider the dual action of $\widehat{G}$ on $C_0(Y_u) \rtimes G$. We prove below that $\mathcal{W}_{c}(P,\widehat{G}) \cong Rep((C_0(Y_u)\rtimes G)\rtimes \widehat{G})$. \begin{thm} \label{equivalence1} There exist maps \[\Psi:\mathcal{W}_{c}(P,\widehat{G}) \to Rep((C_0(Y_u)\rtimes G)\rtimes \widehat{G})\] and \[\Phi:Rep((C_0(Y_u)\rtimes G)\rtimes \widehat{G})\to \mathcal{W}_{c}(P,\widehat{G})\] such that $\Phi$ and $\Psi$ are inverses of each other. \end{thm} \textit{Proof.} Let $((\pi,W),U) \in Rep((C_0(Y_u)\rtimes G) \rtimes \widehat{G})$. Suppose that $((\pi,W),U)$ acts on $K$. Set $V:=V^{(\pi,W)}$. By definition, $V$ acts on $H=\pi(1_{X_u})K$. Since $U:=\{U_{\chi}\}_{\chi \in \widehat{G}}$ commutes with $\pi(C_0(Y_u))$, it follows that $U_{\chi}$ maps $H$ onto $H$. Clearly, $(U|_{H},V^{(\pi,W)})$ is a weak Weyl pair on $H$ with commuting range projections. We define \[ \Phi((\pi,W),U)=(U|_{H},V^{(\pi,W)}).\] Let $(U,V) \in \mathcal{W}_{c}(P,\widehat{G})$ be given. Suppose that $(U,V)$ acts on $H$. By Thm. \ref{equivalence}, there exists $(\pi,W) \in Rep(C_0(Y_u)\rtimes G)$ such that $V=V^{(\pi,W)}$. Suppose that $(\pi,W)$ acts on $K$. Then, $H=\pi(1_{X_u})K$. Recall that $W$ is the minimal unitary dilation of $V$. Let $\chi \in \widehat{G}$ be given. We claim that there exists a unique unitary operator $\widetilde{U}_\chi$ on $K$ such that \begin{enumerate} \item[(C1)] for $\xi \in H$, $\widetilde{U}_\chi\xi=U_\chi\xi$, and \item[(C2)] for $x \in G$, $\widetilde{U}_{\chi}W_x=\chi(x)W_{x}\widetilde{U}_\chi$. \end{enumerate} Conditions $(C1)$ and $(C2)$ together with the fact that $\bigcup_{a \in P}W_{a}^{*}H$ is dense in $K$ clearly determine the operator $\widetilde{U}_{\chi}$ uniquely. We show the existence below. Define $\widetilde{U}_{\chi}$ on the dense subspace $\bigcup_{a \in P}W_{a}^{*}H$ as follows: for $\xi \in W_{a}^{*}H$, set \[ \widetilde{U}_{\chi}\xi=\overline{\chi(a)}W_{a}^{*}U_\chi W_{a}\xi.\] Let $a,b \in P$ and let $\xi \in W_{a}^{*}H \cap W_{b}^{*}H$ be given. Since $W_xH \subset H$ for $x \in P$, it follows that $W_{a}^{*}H \cap W_{b}^{*}H \subset W_{a+b}^{*}H$. Calculate as follows to observe that \begin{align*} \overline{\chi(a+b)}W_{a+b}^{*}U_{\chi}W_{a+b}\xi&=\overline{\chi(a+b)}W_{a}^{*}W_{b}^{*}U_{\chi}W_{b}W_{a}\xi \\ &=\overline{\chi(a+b)}W_{a}^{*}W_{b}^{*}U_{\chi}V_bW_{a}\xi ~~(~~\textrm{since $W_a\xi \in H$})\\ &=\overline{\chi(a+b)}W_{a}^{*}W_{b}^{*}\chi(b)V_{b}U_{\chi}W_{a}\xi ~~(~~\textrm{since $U_{\chi}V_b=\chi(b)V_{b}U_\chi$})\\ &=\overline{\chi(a)}W_{a}^{*}W_{b}^{*}W_{b}U_{\chi}W_{a}\xi ~~(~~\textrm{since $U_{\chi}W_{a}\xi \in H$})\\ &=\overline{\chi(a)}W_{a}^{*}U_{\chi}W_{a}\xi. \end{align*} Similarly, $\overline{\chi(a+b)}W_{a+b}^{*}U_{\chi}W_{a+b}\xi=\overline{\chi(b)}W_{b}^{*}U_{\chi}W_{b}$. This shows that $\widetilde{U}_{\chi}$ is well defined. It is clear from the definition that $\widetilde{U}_{\chi}$ is an isometry on $D:=\bigcup_{a \in P}W_{a}^{*}H$ and maps $D$ onto $D$. Thus, $\widetilde{U}_{\chi}$ extends to a unitary operator to $K$ which we again denote by $\widetilde{U}_{\chi}$. By definition $\widetilde{U}_{\chi}$ restricted to $H$ coincides with $U_{\chi}$. Using the fact that $(U,V)$ is a weak Weyl pair and the definition of $\widetilde{U}_{\chi}$, it is easy to check, on the dense subspace $D:=\bigcup_{a \in P}W_{a}^{*}H$, that \[ \widetilde{U}_{\chi}W_{a}=\chi(a)W_{a}\widetilde{U}_\chi\] for $a \in P$. Since $P$ spans $G$ and $\{W_{x}\}_{x \in G}$ is a group of unitaries, it follows that $\widetilde{U}_{\chi}W_x= \chi(x)W_x\widetilde{U}_{\chi}$ for $x \in G$. Thus, we have established the existence of the unitary operator $\widetilde{U}_{\chi}$ on $K$ for which $(C1)$ and $(C2)$ are satisfied. For $x \in G$, let $E_x$ be the projection onto $W_xH$. Let $\chi \in \widehat{G}$ be given. By the definition of $\widetilde{U}_{\chi}$, $\widetilde{U}_{\chi}$ maps $W_{a}^{*}H$ onto $W_{a}^{*}H$ for every $a \in P$. Thus, $\widetilde{U}_{\chi}$ commutes with $\{E_{-a}: a \in P\}$. Let $x \in G$ be given. Write $x=a-b$ with $a,b \in P$. Then, $E_{x}=W_{a}E_{-b}W_{a}^{*}$. Thanks to the Weyl commutation relation and the fact that $\widetilde{U}_{\chi}$ commutes with $E_{-b}$, it follows that $\widetilde{U}_{\chi}$ commutes with $E_x$ for every $x \in G$. Let $f \in C_{c}(G)$ be given. Recall that \[ \pi(\widetilde{f})=\int f(x)E_xdx.\] Since $\widetilde{U}_{\chi}$ commutes with $\{E_x:x \in G\}$, $\widetilde{U}_{\chi}$ commutes with $\{\pi(\widetilde{f}):f \in C_{c}(G)\}$. Since $\{\widetilde{f}: f \in C_{c}(G)\}$ generates $C_0(Y_u)$, it follows that $\widetilde{U}_{\chi} \in \pi(C_0(Y_u))^{'}$. We leave it to the reader to verify that $\widetilde{U}:=\{\widetilde{U}_\chi\}_{\chi \in \widehat{G}}$ is a strongly continuous group of unitaries on $K$. We have now proved that $((\pi,W),\widetilde{U})$ is a representation of the dynamical system $(C_0(Y_u)\rtimes G,\widehat{G})$. Set \[ \Psi(U,V)=((\pi,W),\widetilde{U}).\] Then, $\Psi$ and $\Phi$ are inverses of each other. We omit this routine verification. \hfill $\Box$ \begin{rmrk} \label{factorial to factorial} The maps $\Phi$ and $\Psi$ of Thm. \ref{equivalence1} take factorial representations to factorial representations and take irreducible representations to irreducible representations. The proof that $\Phi$ maps factorial representations to factorial representations proceeds as follows. Let $((\pi,W),U)$ be a representation of $(C_0(Y_u)\rtimes G, \widehat{G})$ and let $K$ be the Hilbert space on which it acts. Let $\Phi((\pi,W),U)=(U|_{H},V)$ where $H=\pi(1_{X_u})K$. Denote the von Neumann algebra generated by $\{\pi(\phi),W_x,U_{\chi}:\phi \in C_0(Y_u),x \in G, \chi \in \widehat{G}\}$ by $M$ and the von Neumann algebra on $H$ generated by $\{V_{a},U_{\chi}|_{H}: a \in P, \chi \in \widehat{G}\}$ by $N$. Then, it is routine to prove that \[ M^{'} \ni T \to T|_{H} \in N^{'}\] is an isomorphism between $M^{'}$ and $N^{'}$. Thus, $M$ is a factor if and only if $N$ is a factor. Similarly, $M^{'}=\mathbb{C}$ if and only $N^{'}=\mathbb{C}$. Thus, $\Phi$ and $\Psi$ map factorial representations to factorial representations and irreducible representations to irreducible representations. \end{rmrk} Thm. \ref{main theorem} is an immediate consequence of Thm. \ref{equivalence1} and Takai duality. Recall that Takai duality asserts that $(C_0(Y_u)\rtimes G)\rtimes \widehat{G} \cong C_{0}(Y_u)\otimes \mathcal{K}(L^2(G))$. As a consequence, we have $Rep((C_0(Y_u)\rtimes G)\rtimes \widehat{G}) \cong Rep(C_0(Y_u))$. The proof of Thm. \ref{main theorem} is essentially transporting the representation theory of $C_0(Y_u)$ to $\mathcal{W}_{c}(P,\widehat{G})$ using Thm. \ref{equivalence1} and by making using of the explicit isomorphism between the $C^{*}$-algebras $(C_0(Y_u) \rtimes G)\rtimes \widehat{G}$ and $C_0(Y_u)\otimes \mathcal{K}(L^2(G))$. For the explicit isomorphism involved in Takai duality, we refer the reader to either \cite{Raeburn_Takai} or \cite{Williams_Dana}. We will not write down all the details. For the reader's convenience, we mention a few details concerning the irreducible weak Weyl pairs with commuting range projections. Let us recall the irreducible representations of $(C_{0}(Y_u)\rtimes G)\rtimes \widehat{G}$. Let $K:=L^{2}(G)$. Fix an element $A \in Y_u$. Define a representation $\pi_{A}$ of $C_{0}(Y_u)$ on $K$ by \[ \pi_{A}(f)\xi(x)=f(A+x)\xi(x).\] For $x \in G$, let $W_{x}$ be the unitary on $K$ defined by \[W_{x}\xi(y)=\xi(y-x).\] For $\chi \in \widehat{G}$, let $U_{\chi}$ be the unitary operator on $K$ defined by \[U_{\chi}\xi(y)=\chi(y)\xi(y).\] Then, $\{((\pi_A,W),U)\}_{A \in Y_u}$ form a mutually inequivalent exhaustive list of irreducible representations of $(C_0(Y_u)\rtimes G)\rtimes \widehat{G}$. For $A \in Y_u$, let $B=-A$ and let $V^A:=V^{(\pi_A,W)}$. Observe that for $\xi \in K$, \[ \pi(1_{X_u})\xi(x)=1_{X_u}(A+x)\xi(x)=1_{A}(-x)\xi(x)=1_{B}(x)\xi(x).\] Thus, $\pi(1_{X_u})K=L^{2}(B)$. By definition, $V^{A}$ is the compression of the left regular representation onto $L^2(B)$. Thanks to Thm. \ref{equivalence1}, the assertions in Thm. \ref{main theorem} concerning the irreducible weak Weyl pairs with commuting range projections are now clear. Other assertions can be proved similarly. We leave the details to the reader. What about weak Weyl pairs which do not have commuting range projections ? If we drop the assumption that the range projections commute, then we show that, for $P=\mathbb{R}_{+}^{2}$, we can construct weak Weyl pairs that generate a factor of both type II and type III. Moreover, we also illustrate that classifying all the irreducible weak Weyl pairs is a complicated task. More precisely, we explain a procedure (preserving factoriality and irreducibility) that allows us to build weak Weyl pairs starting from a non-degenerate representation of the free product $c_0(\mathbb{N})*c_0(\mathbb{N})$. For the rest of this paper, we assume that $P=\mathbb{R}_{+}^{2}=[0,\infty)\times [0,\infty)$ and $G=\mathbb{R}^2$. We identify $\widehat{G}$ with $\mathbb{R}^2$ in the usual way. Let $\{P_{m}\}_{m \geq 1}$ and $\{Q_n\}_{n \geq 1}$ be two sequences of projections on a Hilbert space $K$ such that $P_iP_j=\delta_{ij}P_i$ and $Q_kQ_{\ell}=\delta_{k\ell}Q_k$\footnote{Writing down two such sequences of projections on a Hilbert space $K$ is clearly equivalent to defining a representation of the free product $c_0(\mathbb{N})\ast c_0(\mathbb{N})$.}. Denote the set of projections on $K$ by $P(K)$. Define a map $F:\mathbb{Z}^2 \to P(K)$ by \begin{equation*} F_{(m,n)}:=\begin{cases} \sum_{k=1}^{m}P_k & \mbox{if $m \geq 1$ and $ n= 0$}, \cr \sum_{k=1}^{n}Q_k & \mbox{if $m=0$ and $n \geq 1$}, \cr 1 & \mbox{if~} m \geq 1, n \geq 1, \cr 0 & \mbox{otherwise}. \end{cases} \end{equation*} Note that if $(m,n) \in \mathbb{Z}^2$ and $(p,q) \in \mathbb{N}^2$, $F_{(m+p,n+q)} \geq F_{(m,n)}$. Let $R:=[0,1] \times [0,1]$ be the unit square and suppose that $\lambda$ is the Lebesgue measure on $R$. Consider $L^{\infty}(R,d\lambda)$ as a $C^{*}$-algebra and let $X$ be the character space of $L^{\infty}(R,d\lambda)$. Fix $a,b,c,d \in (0,1)$ such that $a<b$ and $c<d$. Fix a point $z_0 \in X$ such that $1_{[a,b]\times [c,d]}(z_0) \neq 0$. Define a map $E:\mathbb{R}_{+}^{2} \to P(K)$ as follows. Let $(s,t) \in \mathbb{R}_{+}^{2}$ be given. Let $m$ be the integral part of $s$ and let $n$ be the integral part of $t$. Set \begin{align*} R_0(s,t):&=[0,m+1-s]\times[0,n+1-t]\\ R_1(s,t):&=[0,m+1-s]\times [n+1-t,1]\\ R_2(s,t):&=[m+1-s,1]\times [0,n+1-t] \\ R_3(s,t):&=[m+1-s,1]\times [n+1-t,1]. \end{align*} Define $E_{(s,t)}$ by the following formula. \[ E_{(s,t)}:=1_{R_0(s,t)}(z_0)F_{(m,n)}+1_{R_1(s,t)}(z_0)F_{(m,n+1)}+1_{R_2(s,t)}(z_0)F_{(m+1,n)}+1_{R_3(s,t)}(z_0)F_{(m+1,n+1)}.\] Since $\{1_{R_i}\}_{i=0}^{3}$ is an orthogonal family in $L^{\infty}(R,d\lambda)$ adding up to $1$, exactly one term survives in the above expression. Consequently, $E_{(s,t)}$ is a projection. \begin{lmma} \label{crucial} With the foregoing notation, we have the following. \begin{enumerate} \item[(1)] The map $E:\mathbb{R}_{+}^{2} \to P(K)$ is increasing, i.e $E_{(s,t)} \leq E_{(s+s_0,t+t_0)}$ for $(s,t) \in \mathbb{R}_{+}^{2}$ and for every $(s_0,t_0) \in \mathbb{R}_{+}^{2}$. \item[(2)] For $\xi,\eta \in K$, the map \[ \mathbb{R}_{+}^{2} \ni (s,t) \to \langle E_{(s,t)}\xi|\eta \rangle \in \mathbb{C}\] is Lebesgue measurable. \item[(3)] Let $(m,n) \in \mathbb{N}^2$ be given. The set $\{(s,t) \in \mathbb{R}_{+}^{2}:E_{(s,t)}=F_{(m,n)}\}$ contains a Lebesgue measurable set of positive measure. \end{enumerate} \end{lmma} \textit{Proof.} The proof of $(1)$ is a case by case verification. Let $(s,t) \in \mathbb{R}_{+}^{2}$ be given. Suppose $s_1>s$. Let $m$ be the integral part of $s$, $p$ the integral part of $s_1$ and $n$ the integral part of $t$. \textbf{Case 1: $m<p$.} Let $r:=m+1-s$ and $r_1=p+1-s_1$. \textbf{Case (a): $r \leq r_1$.} \textbf{Case $(i)$: $1_{R_0(s,t)}(z_0)=1$}. In this case, $E_{(s,t)}=F_{(m,n)}$. Note that $R_{0}(s_1,t)$ contains $R_{0}(s,t)$. Thus, $1_{R_0(s,t)} \leq 1_{R_0(s_1,t)}$ in $L^{\infty}(R,d\lambda)$. Consequently, $1_{R_0(s_1,t)}(z_0) =1$. Therefore, $E_{(s_1,t)}=F_{(p,n)}$. Since $F_{(p,n)} \geq F_{(m,n)}$, we have $E_{(s_1,t)}\geq E_{(s,t)}$. \textbf{Case $(ii)$: $1_{R_1(s,t)}(z_0)=1$.} We can argue as in Case $(i)$ and deduce $E_{(s_1,t)}\geq E_{(s,t)}$. \textbf{Case $(iii)$: $1_{R_2(s,t)}(z_0)=1$.} In this case, $E_{(s,t)}=F_{(m+1,n)}$. Note that the union $R_{2}(s_1,t)\cup R_{0}(s_1,t)$ contains $R_{2}(s,t)$. Therefore, either $1_{R_0(s_1,t)}(z_0)=1$ or $1_{R_2(s_1,t)}(z_0)=1$. This means that $E_{(s_1,t)}$ is either $F_{(p,n)}$ or $F_{(p+1,n)}$. Both $F_{(p,n)}$ and $F_{(p+1,n)}$ are greater than $F_{(m+1,n)}$ as $F$ is increasing and as $p \geq m+1$. Thus, $E_{(s_1,t)} \geq E_{(s,t)}$. \textbf{Case $(iv)$: $1_{R_3(s,t)}(z_0)=1$.} In this case, $E_{(s,t)}=F_{(m+1,n+1)}$. Note that the union $R_{3}(s_1,t)\cup R_1(s_1,t)$ contains $R_3(s,t)$. Therefore, either $1_{R_3(s_1,t)}(z_0)=1$ or $1_{R_1(s_1,t)}(z_0)=1$. This means that $E_{(s_1,t)}$ is either $F_{(p+1,n+1)}$ or $F_{(p,n+1)}$. In either case, $E_{(s_1,t)} \geq E_{(s,t)}$. \textbf{Case (b): $r>r_1$.} The analysis here is similar and we can conclude $E_{(s,t)} \leq E_{(s_1,t)}$. \textbf{Case 2: $m=p$.} The analysis here is similar to Case 1 (in this case, Case $(a)$ does not arise) and we can conclude that $E_{(s,t)} \leq E_{(s_1,t)}$. Thus, we have proved that $E_{(s,t)} \leq E_{(s+s_0,t)}$ for every $(s,t) \in \mathbb{R}_{+}^{2}$ and for every $s_0 \geq 0$. An exactly similar argument shows $E_{(s,t)} \leq E_{(s,t+t_0)}$ for every $(s,t) \in \mathbb{R}_{+}^{2}$ and $t_0 \geq 0$. Hence, the function $E$ is increasing. This proves $(1)$. To prove $(2)$, thanks to the polarisation identity, it suffices to show, that for every $\xi \in K$, the map $\mathbb{R}_{+}^{2} \ni (s,t) \to \langle E_{(s,t)}\xi|\xi \rangle \in \mathbb{R}$ is Lebesgue measurable. To that effect, let $\xi \in K$ be given and define $\phi:\mathbb{R}_{+}^{2} \to \mathbb{R}$ by \[ \phi(s,t):=\langle E_{(s,t)}\xi|\xi \rangle.\] Then, if we fix one variable, $\phi$ is monotone in the other variable. It is well known (and we leave it to the reader to prove that) that such functions are Lebesgue measurable. This proves $(2)$. Let $(m,n) \in \mathbb{N}^2$. Let \[A:=\{(s,t) \in [m,m+1)\times [n,n+1): (m+1-s,n+1-t) \in (b,1)\times (d,1)\}.\] Then, $A$ is a Borel set of positive measure. Let $(s,t) \in A$ be given. Note that $R_0(s,t)$ contains $[a,b] \times [c,d]$. Since $1_{[a,b]\times [c,d]}(z_0)=1$, we have $1_{R_0(s,t)}(z_0)=1$. Consequently, for $(s,t) \in A$, $E_{(s,t)}=F_{(m,n)}$. This proves that $A \subset \{(s,t) \in \mathbb{R}_{+}^{2}: E_{(s,t)}=F_{(m,n)}\}$. The proof of $(3)$ is complete. \hfill $\Box$ Extend $E$ to the whole of $\mathbb{R}^2$ by setting $E_{(s,t)}=0$ if $(s,t) \notin \mathbb{R}_{+}^{2}$. Then, the extended map $E:\mathbb{R}^2 \to P(K)$ is still increasing and Lebesgue measurable. Let $L:=L^{2}(\mathbb{R}^2,K)$ be the space of square integrable Lebesgue measurable functions taking values in $K$. For $(x,y) \in \mathbb{R}^2$, let $U_{(x,y)}$ be the unitary on $L$ defined by \[ U_{(x,y)}f(u,v):=e^{i(ux+vy)}f(u,v).\] For $(s,t) \in \mathbb{R}^2$, let $W_{(s,t)}$ be the unitary on $L$ defined by \[ W_{(s,t)}f(u,v)=f(u-s,v-t).\] Define a projection $\widetilde{E}:L \to L$ by \[ \widetilde{E}f(u,v)=E_{(u,v)}f(u,v).\] Set $H:=Ran(\widetilde{E})$. Note that $U_{(x,y)}$ commutes with $\widetilde{E}$. Thus, $U_{(x,y)}$ maps $H$ onto $H$. We denote the restriction of $U_{(x,y)}$ to $H$ again by $U_{(x,y)}$. Using the fact that $E$ is increasing, it is routine to prove that $\widetilde{E}W_{(s,t)}\widetilde{E}=W_{(s,t)}\widetilde{E}$ for every $(s,t) \in \mathbb{R}_{+}^{2}$. In other words, $H$ is invariant under $\{W_{(s,t)}:(s,t) \in \mathbb{R}_{+}^{2}\}$. For $(s,t) \in \mathbb{R}_{+}^{2}$, let $V_{(s,t)}$ be the isometry on $H$ defined by \[ V_{(s,t)}=W_{(s,t)}|_{H}.\] Then, $V:=\{V_{(s,t)}\}_{(s,t) \in \mathbb{R}_{+}^{2}}$ is a strongly continuous semigroup of isometries on $H$. Similarly, $U:=\{U_{(x,y)}|_{H}\}_{(x,y) \in \mathbb{R}^2}$ is a strongly continuous group of unitaries. Clearly, $(U,V)$ is a weak Weyl pair. Let us fix notation. Define \begin{align*} M_0:&=W^{*}\{U_{(x,y)}|_{H}, V_{(s,t)}: (x,y) \in \mathbb{R}^2, (s,t) \in \mathbb{R}_{+}^{2}\},\\ M_1:&=W^{*}\{U_{(x,y)}, W_{(s,t)},\widetilde{E}: (x,y) \in \mathbb{R}^2, (s,t) \in \mathbb{R}^2\},\\ N:&=W^{*}\{F_{(m,n)}:(m,n) \in \mathbb{N}^2\}=W^{*}\{P_m,Q_n: m \in \mathbb{N}, n \in \mathbb{N}\}. \end{align*} Note that $M_0$ acts on $H$, $M_1$ acts on $L$ and $N$ acts on $K$. For a bounded operator $T$ on $K$, let $\widetilde{T}$ be the operator on $L$ defined by \[ \widetilde{T}f(y)=Tf(y).\] \begin{ppsn} \label{induced} With the foregoing notation, we have the following. \begin{enumerate} \item[(1)] The map $N^{'} \ni T \to \widetilde{T} \in M_1^{'}$ is an isomorphism. \item[(2)] $(W,L)$ is the minimal unitary dilation of $V$. \item[(3)] Let $t \in \{I, II, III\}$. The von Neumann algebra $M_0$ is a factor of type $t$ if and only if $N$ is a factor of type $t$. \item[(4)] The weak Weyl pair $(U,V)$ is irreducible if and only if $N^{'}=\mathbb{C}$. \end{enumerate} \end{ppsn} \textit{Proof.} From a routine computation, we see that if $T \in N^{'}$, then $\widetilde{T} \in M_1^{'}$. Let $S \in M_{1}^{'}$ be given. Note that \[W^{*}\{U_{(x,y)}, W_{(s,t)}:(x,y) \in \mathbb{R}^2, (s,t) \in \mathbb{R}^2\} =B(L^{2}(\mathbb{R}))\otimes 1 \subset B(L^{2}(\mathbb{R})\otimes K).\] Therefore, there exists $T \in B(K)$ such that $S=\widetilde{T}$. The fact that $S$ commutes with $\widetilde{E}$ translates to the equation \[ TE_{(s,t)}=E_{(s,t)}T\] for almost all $(s,t) \in \mathbb{R}_{+}^{2}$. By $(3)$ of Lemma \ref{crucial}, $T$ commutes with $F_{(m,n)}$ for every $(m,n) \in \mathbb{N}^2$. Thus, $T \in N^{'}$. This completes the proof of $(1)$. By definition, $(W,L)$ is a dilation of $V$. Let $Q$ be the projection onto the closure of the subspace $\bigcup_{(s,t) \in \mathbb{R}_{+}^{2}}W_{(s,t)}^{*}H$. Note that $Ran(Q)$ is invariant under $U_{(x,y)}$ and $W_{(s,t)}$ for every $(x,y) \in \mathbb{R}^2$ and for every $(s,t) \in \mathbb{R}^2$. Thus, $Q \in \{U_{(x,y)}, W_{(s,t)}:(x,y),(s,t) \in \mathbb{R}^2\}^{'}$. Consequently, $Q=\widetilde{R}$ for some projection $R$ on $K$. The condition $Q \geq \widetilde{E}$ translates to the fact that $R \geq E_{(s,t)}$ for almost all $(s,t) \in \mathbb{R}_{+}^{2}$. Thanks to $(3)$ of Lemma \ref{crucial}, $R \geq F_{(1,1)}=1$. Thus, $R=1$ and hence $Q=1$. This proves $(2)$. As alluded to in Remark \ref{factorial to factorial}, it is not difficult to prove using the fact that $(W,L)$ is the minimal unitary dilation of $V$ that \[ M_{1}^{'} \ni T \to T|_{H} \in M_0^{'}\] is an isomorphism of von Neumann algebras. Now, $(3)$ and $(4)$ follow from $(1)$. \hfill $\Box$ \begin{rmrk} We conclude this paper with the following remarks. \begin{enumerate} \item[(1)] Thanks to Prop. \ref{induced}, we can construct an irreducible weak Weyl pair starting from an irreducible representation of the free product $c_0(\mathbb{N})\ast c_0(\mathbb{N})$. Moreover, inequivalent irreducible representations of $c_0(\mathbb{N})\ast c_0(\mathbb{N})$ lead to inequivalent weak Weyl pairs. Thus, listing out all the irreducible weak Weyl pairs is at least as hard as describing the dual of $c_0(\mathbb{N})\ast c_0(\mathbb{N})$. Up to the author's knowledge, a ``good description" of the dual of $c_0(\mathbb{N})\ast c_0(\mathbb{N})$ or even the dual of some of its natural quotients like $C^{*}(\mathbb{Z}_n*\mathbb{Z}_m)$ ($n \geq 2$, $m \geq 3$) is not available in the literature. \item[(2)] Observe that for the weak Weyl pair $(U,V)$ constructed in Prop. \ref{induced}, we have $Spec(U)$ is independent of the underlying representation of $c_0(\mathbb{N})\ast c_0(\mathbb{N})$ as long as $F_{(m,n)} \neq 0$ for $(m,n) \in \mathbb{N}^2 \backslash \{(0,0)\}$. Thus, Corollary \ref{uniqueness} is not true without the commutativity assumption on the range projections. \item[(3)] Prop. \ref{induced} allows us to construct weak Weyl pairs that generate a factor of both type II and type III. This is because $c_0(\mathbb{N})\ast c_0(\mathbb{N})$ admit factorial representations of type II and type III as the $C^{*}$-algebra $c_0(\mathbb{N})\ast c_0(\mathbb{N})$ is not of type I. \item[(4)] Let $P$ be a closed convex cone in $\mathbb{R}^d$ which we assume is spanning, i.e. $P-P=\mathbb{R}^d$ and pointed, i.e. $P \cap -P=\{0\}$. Building on the two dimensional case, it is not difficult to construct, in this case, weak Weyl pairs $(U,V)$ that generate a factor of both type II and type III. Also, it is possible to construct a continuum of irreducible weak Weyl pairs which do not have commuting range projections. \end{enumerate} \end{rmrk} \iffalse \section*{Declaration} Competing interests: The author declares none. Associated data: The author declares that this manuscript has no associated data. \fi
1,108,101,565,818
arxiv
\section{Introduction} \label{sec:introduction} Interface problems raise from various models that involve multiple materials with different chemical or physical properties. In these models, the interface geometry itself may involve certain dynamics, i.e., the whole or portion of the interface evolve. Let $\Omega\subseteq\mathbb{R}^2$ be a fixed domain and let $\Gamma(t)$ be an evolving interface curve partitioning $\Omega$ into two subdomains $\Omega^-(t)$ and $\Omega^+(t)$ on a time interval $[0,T]$. Suppose there is certain velocity field $\mathcal{V}(X,t)$ guiding the movement of the interface curve, i.e., \begin{equation} \label{velocity} \frac{dX}{dt} = \mathcal{V}(X,t) ~~~~ X\in \Gamma(t). \end{equation} We further let $\beta$ be a piecewise constant function such that \begin{equation*} \beta(X,t)= \left\{\begin{array}{cc} \beta^- & \text{for} \; X\in \Omega^-(t) ,\\ \beta^+ & \text{for} \; X\in \Omega^+(t), \end{array}\right. \end{equation*} which is associated with some physical or chemical properties of the materials occupying each subdomain. In this article, we consider the following parabolic interface model \begin{subequations}\label{model} \begin{align} \label{inter_PDE} \partial_t u -\nabla\cdot(\beta\nabla u)=f, \;\;\;\; & \text{in} \; \Omega = \Omega^- \cup \Omega^+, ~~ t\in [0,T], \\ u(\cdot,t)=0, \;\;\;\; &\text{on} \; \partial\Omega, ~~ t\in [0,T], \\ u(\cdot,0) = u_0 ,\;\;\;\; &\text{in} \; \Omega = \Omega^- \cup \Omega^+. \end{align} The following jump conditions are imposed on the interface $\Gamma(t)$: \begin{align} [u]_{\Gamma(t)} &:=u^-|_{\Gamma(t)} - u^+|_{\Gamma(t)}= 0, ~~ t\in [0,T], \label{jump_cond_1} \\ \big[\beta \nabla u\cdot \mathbf{n}\big]_{\Gamma(t)} &:=\beta^- \nabla u^-\cdot \mathbf{n}|_{\Gamma(t)} - \beta^+ \nabla u^+\cdot \mathbf{n}|_{\Gamma(t)} = 0, ~~ t\in [0,T], \label{jump_cond_2} \end{align} \end{subequations} in which $\mathbf{ n}$ is the unit normal vector to $\Gamma(t)$. Here we only discuss the homogeneous jump conditions in the analysis, and the non-homogeneous jumps can be simply handled by the enriched functions as discussed by Babu\v{s}ka et al in \cite{2020AdjeridBabukaGuoLin}. The parabolic interface model in \eqref{velocity} and \eqref{model} widely appear in many applications. A well-known example is the Stefan problem \cite{1993Almgren,1997ChenMerrimanOsherSmereka} to model solidification process where $u$ represents the temperature and the velocity $\mathcal{V}$ is computed by the flux of temperature across the interface. It also appears in the Burton-Cabrera-Frank-type model for epitaxial growth of thin films \cite{2003CaflischLi} where $u$ denotes the adatom density and the velocity $\mathcal{V}$ depends on the flux of adatom density across the interface. Another example can be found in using shape optimization methodology to reconstruct inclusions governed by heat equations \cite{2013HarbrechtTausch}. In this case, the velocity is associated with the direction that shape functionals have the greatest descent rate, and computed though adjoint equations. It is well-known that moving interface problems may cause challenges to simulation since the modeling domain itself is evolving. If traditional finite element methods are applied, meshes have to be generated to fit the interface, and thus have to be moving or regenerated according to interface movement; otherwise the accuracy of the numerical solutions can be destroyed \cite{2000BabuskaOsborn}. The general principal is to reduce the frequency of completely remeshing procedure as much as possible, since remeshing could be troublesome, time-consuming and introduce projection or interpolation errors. There have been many moving mesh methods proposed in the literature such as the early researches \cite{1967Winslow} by Winslow based on solving elliptic-type PDEs to generate mapping for mesh generation and \cite{1992TezduyarBehrLiou} based on time-space formulation. Another typical example is the so called arbitrary Lagrangian-Eulerian (ALE) method \cite{1981HughesLiuZimmermann,2020LanRamirezSun} to solve fluid-structure-interaction (FSI) problems. In addition, we also refer readers to moving mesh methods based on Harmonic mappings \cite{2007DiLiTangZhang,2009HuLiTang} applied to diffusive interface models. Alternatively, in order to completely remove the burden of mesh moving or remeshing procedure in the computation, numerical methods based on interface-independent unfitted meshes have evoked a lot of interests among many researchers in the past decades. To handle interface-cutting elements, a group of methods enforce the jump conditions in the computation scheme such as the immersed interface methods (IIM) \cite{1994LevequeLi,1997Li} based on the finite difference framework, and CutFEM \cite{2015BurmanClaus,2017HuangWuXiao} and fictitious domain methods \cite{2017WangSun} based on the finite element (FE) framework. In the context of FE methods (FEM), another group of methods attempt to use some specially designed shape functions to incorporate the jump information such as generalized FEM \cite{1983BabuskaOsborn}, multiscale FEM (MsFEM) \cite{2010ChuGrahamHou}, extended FEM (XFEM) \cite{2001DolbowMoesBelytschko} and immersed finite element (IFE) method to be discussed in this article. It is important to note that the theoretical analysis has been extensively studied for all these unfitted mesh methods on stationary interface problems, but the theoretical work on moving interface problems is rather limited in the literature. When interface evolves, an extra obstacle stems from the variation of approximation spaces and computation schemes in dynamics. For moving mesh methods, the analysis is based on the mesh-generation mapping between the fixed reference domain and the evolving physical domain, see \cite{1997HuangRussel} and particularly \cite{2017StefanThomas,2020LanRamirezSun} for interface problems. But this strategy is not suitable for unfitted mesh methods since the dynamics of approximation spaces is independent of the mesh. To address this issue, in \cite{2013LehrenfeldReusken} the authors considered a space-time discontinuous Galerkin method based on XFEM but only suboptimal convergence with respect to time can be obtained. The author in \cite{2013Zunino} studied a backward Euler XFEM but the analysis approach depends on certain strong assumptions on the interpolation operators, see (12)-(15) in that article. The core idea of IFE methods is to use piecewise polynomials on interface elements to capture the jump behavior of the exact solutions. IFE methods are especially attractive for moving interface problems not only because they can be used on unfitted meshes but also the IFE spaces are isomorphic to the standard FE spaces defined on the same mesh, namely the degrees of freedom also keep unchanged in dynamics. Since the IFE method was first introduced in \cite{1998Li}, it has been applied to solve various moving interface problems. For instance, the authors in \cite{2015AdjeridChaabaneLin,2018AdjeridChaabaneLinYue} developed the IFE method for incompressible interfacial flows governed by Stokes equations and applied it to simulate drop behavior in shear and extensional flow. The authors in \cite{2019AdjeridMoon} investigated the IFE method for acoustic wave propagation problems where the simulation is conducted for an air bubble moving in water. A simulation for a moving object by IFE methods in electromagnetic field was conducted in \cite{2018BaiCaoHeLiuYang}. An IFE-based shape optimization method for geometric inverse problems was proposed in \cite{2018GuoLinLinElasto}. As far as we know, the numerical exploration for convergence behavior, without an error analysis, can be only found in \cite{2013HeLinLinZhang,2013LinLinZhang1} for parabolic interface problems. Despite these applications and numerical exploration, the theoretical analysis still remains open. Roughly speaking, the key difficulty in the analysis of IFE methods comes from the insufficient regularity of IFE functions including the kink across interface and discontinuities across interface edges. Namely the local IFE spaces on interface elements are only in $H^1$, and the global IFE space is not even $H^1$-conforming which all are weaker than the standard FE spaces. Thus many critical results such as trace/inverse inequalities and interpolation/projection errors can not be proved by standard techniques. For static interface problems (no time), a series of articles have built a systematic analysis framework \cite{2020AdjeridBabukaGuoLin,2019GuoLin,2003LiLinWu,2015LinLinZhang} which can establish those inequalities and estimates for IFE functions. These results are then employed in \cite{2015LinYangZhang,2020LinZhuang} to analyze the IFE methods for time-dependent problems but with the stationary interface. However there is still a gap between the analysis for stationary and unstationary interface problems due to the discontinuities of IFE spaces not only in spatial direction but also the temporal direction. Thanks to the isomorphism between the IFE and FE spaces, we are able to construct a uniform weak form throughout the dynamics and restrict all the variations only to the IFE spaces. This idea motivates us to reconsider the discontinuities of IFE spaces along the temporal direction from the perspective of time stepping discontinuous Galerkin method \cite{1985ErikssonJohnsonThomee}, and thus recast the time stepping IFE scheme into the framework of time-dependent adaptive methods \cite{1991ErikssonJohnsonI,1995ErikssonClaes}. The isomorphism also enables us to show that the IFE spaces share some nice properties of their FE images such as the trace inequality which is non-trivial since the IFE spaces are not $H^1$-conforming. By these preparation we present the first fully discrete optimal error estimates for a backward Euler IFE method solving the parabolic interface model \eqref{model}. This article consists of five additional sections. In the next section, we set up some basic notations and assumptions. In Section \ref{sec:ife_discret}, we recall the IFE spatial discretization and develop the backward Euler method. In Section \ref{sec:pre_est} we prepare some fundamental estimates. The fully discrete error estimates are presented in Section \ref{sec:error_est}. Some numerical experiments are shown in the last section to validate the analysis. \section{Notations and Assumptions} Throughout this article, we let $\mathcal{T}_h$ be a family of shape regular and quasi-uniform triangular partition of $\Omega$ which is independent of the evolving interface $\Gamma(t)$. For each $T\in \mathcal{T}_h$, we let $h_T$ be its diameter and define $h=\max_{T\in\mathcal{T}_h}h_T$ as the mesh size. Also we let $\mathcal{E}_h$, $\mathring{\mathcal{E}}_h$ and $\mathcal{N}_h$ be the collection of edges, interior edges and mesh nodes, respectively. We denote all the elements intersecting with $\Gamma(t)$ by $\mathcal{T}^i_h(t)$, i.e., the collection of interface elements. Similarly, we define the collection of interface edges as $\mathcal{E}^i_h(t)$. We emphasize that these two collections are all time-dependent, i.e., they depend on the interface location at $t$. In the analysis we employ a generic constant $C$ which is independent of mesh size and the interface location relative to the mesh. For each manifold $\omega\subseteq\Omega$, we define $H^k(\omega)$ as the standard Hilbert space with the norm $\|\cdot\|_{H^k(\omega)}$, and define the time-dependent Bochner space $H^l(0,T;H^k(\omega))$ with the norm $\|\cdot\|_{H^l(0,T;H^k(\omega))}$. If $|\omega\cap\Gamma|\neq0$, we let $\omega^{\pm}=\Omega^{\pm}\cap\omega$, define the split Hilbert space $H^k(\omega^-\cup\omega^+)=H^k(\omega^-\cup\omega^+,t)=\{v:v\in H^k(\omega^{\pm}(t))\}$ and further define the space involving the jump conditions: \begin{equation} \label{split_space} \widetilde{H}^k(\omega,t) = \{ v\in H^k(\omega^{\pm}(t))~:~ [v]_{\Gamma(t)}=0,~ [\beta\nabla v\cdot{\bf n}]_{\Gamma(t)} = 0 \} \end{equation} where we assume $k>3/2$ such that the traces are well-defined, and there clearly holds $\widetilde{H}^k(\omega)\subseteq H^1(\omega)\cap H^k(\omega^-\cup\omega^+)$. Note that the two spaces above are all time-dependent due to $\Gamma(t)$, but we shall drop $t$ if there is no cause of confusion. Then the norms associated with $\widetilde{H}^k(\omega)$ and $H(\omega^-\cup\omega^+)$ are understood as $\|\cdot\|^2_{H^k(\omega)}=\|\cdot\|^2_{H^k(\omega^+)}+\|\cdot\|^2_{H^k(\omega^-)}$. We also denote $H^k_0(\omega)$, $H^k_0(\omega^-\cup\omega^+)$ and $\widetilde{H}^k_0(\omega)$ as the subspaces with zero trace on $\partial \omega$. Furthermore, on the mesh $\mathcal{T}_h$, we define a underling space containing all the approximation spaces considered in this article \begin{equation} \begin{split} \label{split_space_2} W_h = \{ v_h \in L^2(\Omega)~:~ &v_h|_T\in H^1(T) ~ \forall T\in\mathcal{T}_h ~ \text{and} ~ v_h ~ \text{is continuous at each} ~ X\in\mathcal{N}_h, ~ v_h|_{\partial\Omega}=0, \\ & \nabla v_h\cdot{\bf n} ~ \text{is well-defined on each} ~ e\in \mathcal{E}_h ~\text{and belong to} ~L^2(e) \}. \end{split} \end{equation} Furthermore we define $\mathbb{P}_k(\omega)$ as the polynomial space with the degree not greater than $k$ where $k$ is any non-negative integer. We also define $(\cdot,\cdot)_{L^2(\omega)}$ as the standard $L^2$ inner product on ${\omega}$. At each $t\in [0,T]$, we assume $\Gamma(t)$ is a sufficiently smooth simple Jordan curve, namely it does not intersect itself. For simplicity, we also assume $\Gamma(t)$ does not touch the boundary. Furthermore we assume the interface only intersects an element $T$ with exactly two points locating on different edges as shown in Figure \ref{fig:interf_elem}. This assumption is widely used for many unfitted mesh methods on stationary interface problems, see \cite{2015BurmanClaus,2016GuoLin,1994LevequeLi} and the reference therein. We then connect all these intersection points to form a polyline $\Gamma_h(t)$ as the linear approximation to $\Gamma(t)$ shown in Figure \ref{fig:mesh}. An alternative way to construct $\Gamma_h(t)$ employs the level-set method \cite{2001OsherFedkiw} with piecewise linear elements. Namely, for a level-set representation $\varphi(t)$ of $\Gamma(t)$, we let $\varphi_h(t)$ be its continuous piecewise linear approximation computed by some algorithm, and then define $\Gamma_h(t)$ as the zero level-set of $\varphi_h(t)$. Here $\Gamma_h(t)$ exactly satisfies the assumption above since the $\varphi_h(t)$ is piecewise linear; but the intersection points of $\Gamma_h(t)$ are in general different from those of $\Gamma(t)$. We emphasize that these linear approximation $\Gamma_h(t)$ have $\mathcal{O}(h^2)$ geometric accuracy to the original interface $\Gamma(t)$ which is sufficient for the linear finite element method considered in this article. As for higher order methods, a higher order geometric approximation is needed and we refer readers to \cite{2010LiMelenkWohlmuthZou} for more details. IFE methods can be also applied to solve stationary interface problems with arbitrary high order accuracy \cite{2019GuoLin}. \begin{figure}[h] \centering \begin{minipage}{.42\textwidth} \centering \includegraphics[width=2.2in]{mesh.pdf} \caption{A unfitted mesh} \label{fig:mesh} \end{minipage} \hspace{2cm} \begin{minipage}{.4\textwidth} \centering \ \includegraphics[width=2in]{interface_elem.pdf} \caption{An interface element} \label{fig:interf_elem} \end{minipage} \end{figure} To end this section, we recall the Reynolds Transport Theorem \cite{1903Reynolds} in the context of fluid dynamics (a similar one referred as shape derivative formula can be found in the context of shape calculus, see (2.168) in \cite{J.Sokolowski_J.-P.Zolesio_1992}). Suppose the velocity $\mathcal{V}(t)$ is sufficiently smooth on $\Gamma$, then given any differentiable functional defined in terms of integral on $\Omega^{\pm}$ \begin{equation} \label{functional_1} \mathcal{J}^{\pm}(t) = \int_{\Omega^{\pm}(t)} j(t,X) dX \end{equation} its temporal derivative with respect to the $\mathcal{V}$ direction can be calculated by \begin{equation} \label{functional_2} \frac{d}{dt} \mathcal{J}^{\pm}(t) = \int_{\Omega^{\pm}(t)} \partial_t j(t,X) dX + \int_{\Gamma(t)} j \mathcal{V}\cdot{\bf n} ds, \end{equation} where ${\bf n}$ is the norm vector to $\Gamma$ and outward to to $\Omega^{\pm}$. \section{IFE Discretization} \label{sec:ife_discret} In this section, we first describe a linear IFE method for the spatial approximation, and then present a backward Euler method for the temporal approximation. \subsection{Spatial Discretization} The core of IFE methods is the so called IFE functions to approximate the jump conditions. At each $t$, let's define $\Gamma^T_{h}(t)=\Gamma_h(t)\cap T$ for every interface element $T\in\mathcal{T}^i_h(t)$ which is simply the segment connecting the intersection points shown in Figure \ref{fig:interf_elem}, and without causing any confusion we let $\Gamma^T_{h}(t)$ divide $T$ into $T^{\pm}(t)$. Then on each interface element $T\in\mathcal{T}^i_h(t)$ with the vertices $A_j$, $j=1,2,3$, the linear IFE space consists of piecewise linear polynomials such that they satisfy the jump conditions on $\Gamma^T_{h}(t)$, namely \begin{equation} \begin{split} \label{loc_IFE_spa} S_{h,T}(t) = & \{ v_h~:~ v^{\pm}_h=v_h|_{T^{\pm}}\in \mathbb{P}_1(T^{\pm}), ~ [v_h]_{\Gamma_{h}^T(t)}=0, ~ [\beta \nabla v_h\cdot\bar{{\bf n}}]_{\Gamma_{h}^T(t)}=0 \} \\ = & \text{Span}\{ \psi_{1,T}, \psi_{2,T}, \psi_{3,T} \} \end{split} \end{equation} where $\bar{{\bf n}}$ is the normal vector to $\Gamma^T_h(t)$, and $ \psi_{i,T}$, $i=1,2,3$ are the Lagrange-type shape functions satisfying \begin{equation} \label{unisolv} \psi_{i,T}(A_j) = \delta_{ij} ~~~~ i,j=1,2,3. \end{equation} The unisolvence of these shape functions is guaranteed regardless of interface location and $\beta^{\pm}$, and we refer interested readers to Theorem 5.3 of \cite{2016GuoLin} for more details. On all the non-interface elements, the local IFE spaces are simply linear polynomial spaces, i.e., $S_{h,T}(t)=\mathbb{P}_1(T)$. We note that these local IFE spaces vary in dynamics since the interface is evolving. We define the global IFE space \begin{equation} \label{glob_IFE_spa} S_h(t) = \{ v_h\in L^2(\Omega)~:~ v_h|_T\in S_{h,T}(t) ~ \forall T\in \mathcal{T}_h ~ v_h ~ \text{is continuous at } ~ X\in \mathcal{N}_h ~ \text{and} ~ v_h|_{\partial\Omega}=0 \}. \end{equation} Clearly we have $S_{h,T}(t)\subseteq H^1(T)$ but $S_h(t)$ is not $H^1$ conforming i.e., $S_h(t) \not\subset H^1(\Omega)$ because IFE functions may not be continuous across interface edges. We note that the global IFE space in \eqref{glob_IFE_spa} is isomorphic to the standard continuous piecewise linear FE space denoted by $\widetilde{S}_h$. To see this, let's define the standard nodal interpolation operator \begin{equation} \label{iso_map} \mathcal{I}_h(t)~:~ W_h \longrightarrow S_h(t) ~~~ \text{such that} ~~ \mathcal{I}_h(t) v_h(X) = v_h(X) ~~ \forall X\in \mathcal{N}_h. \end{equation} Here we note that $\mathcal{I}_h(t)$ is time-dependent purely because its range $S_h(t)$ depends on time, but the manner of the definition itself keeps unchanged. We also define the local interpolation $\mathcal{I}_{h,T}=\mathcal{I}_h|_T$. Since $\widetilde{S}_h\subseteq W_h$, we have $\mathcal{I}_h(t)$ restricted on $\widetilde{S}_h$ exactly gives the isomorphism between these two spaces. For example, we plot an IFE function in $S_h$ in Figure \ref{fig:ife_fun}\subref{ife_fun_1} and its isomorphic image in $\widetilde{S}_h$ in Figure \ref{fig:ife_fun}\subref{ife_fun_2}. Besides, comparing these two functions, we can clearly see that the IFE function can capture details much better across the interface while the FE function just losses interface information, but away from the interface they are exactly the same. Zooming in the function in Figure \ref{fig:ife_fun}\subref{ife_fun_1} we can see the slight discontinuities on some interface edges in Figure \ref{fig:ife_fun}\subref{ife_fun_1_zoom}. Moreover it has been proved that the IFE functions/spaces share many nice properties similar to the standard FE functions such as optimal approximation capabilities, trace/inverse inequalities and uniform bounds. These properties are presented in a series of articles \cite{2016GuoLin,2004LiLinLinRogers,2015LinLinZhang}. For readers' sake, we shall recall these results here since they will be used for the analysis later. \begin{theorem} There exists a constant $C$ such that for every interface $\Gamma(t)$ and interface element $T$ \begin{subequations} \label{recall_theorem} \begin{align} (\textbf{approximation capability}) ~~ &\| u - \mathcal{I}_h u \|_{L^2(\Omega)} + h |u - \mathcal{I}_h u|_{H^1(\Omega)} \le Ch^2 \| u \|_{H^2(\Omega)} ~~ \forall u\in \widetilde{H}^2(\Omega), \label{thm_appro_eq0} \\ (\textbf{inverse inequality}) ~~ &\| \nabla v_h \|_{L^2(T)} \le Ch^{-1}_T \| v_h \|_{L^2(T)} ~~ \forall v_h\in S_{h,T}(t), \label{inver_inequa} \\ (\textbf{trace inequality}) ~~& \| \nabla v_h \|_{L^2(e)} \le Ch^{-1/2}_T \| \nabla v_h \|_{L^2(T)} ~~ \forall v_h\in S_{h,T}(t) , \label{trace_inequa} \\ (\textbf{boundedness}) ~~ &| \psi_{i,T} |_{W^{j,\infty}(T)} \le Ch^{-j}_T,~~~j=0,1, ~ i=1,2,3. \label{boundedness} \end{align} \end{subequations} \end{theorem} \begin{figure}[h] \centering \begin{subfigure}{.3\textwidth} \includegraphics[width=2in]{IFE_fun_1_edit_new} \caption{A global function in $S_h(t)$} \label{ife_fun_1} \end{subfigure} ~ \begin{subfigure}{.3\textwidth} \includegraphics[width=2in]{IFE_fun_2_edit_new} \caption{The isomorphic image in $\widetilde{S}_h(t)$} \label{ife_fun_2} \end{subfigure} ~ \begin{subfigure}{.32\textwidth} \includegraphics[width=1.8in]{IFE_fun_1_zoomin_edit_new} \caption{discontinuities on interface edges} \label{ife_fun_1_zoom} \end{subfigure} \caption{Plots of IFE functions} \label{fig:ife_fun} \end{figure} However since the IFE functions loss the global continuity, the simple continuous Galerkin scheme yields the suboptimal convergence \cite{2015LinLinZhang}. To address this issue, the authors in \cite{2015LinLinZhang} added interior penalties on edges to handle the discontinuities. To describe the scheme, we define a symmetric bilinear form $a_h(\cdot,\cdot): W_h\times W_h\longrightarrow \mathbb{R}$ such that \begin{equation} \label{bilinear_form_1} a_h(v_h,w_h) := \int_{\Omega} \beta \nabla v_h\cdot\nabla w_h dX - \sum_{e\in\mathring{\mathcal{E}}_h} \int_e \{ \beta \nabla v_h \cdot {\bf n} \}_e [w_h]_e ds - \sum_{e\in\mathring{\mathcal{E}}_h} \int_e \{ \beta \nabla w_h\cdot {\bf n} \}_e [v_h]_e ds + \sum_{e\in\mathring{\mathcal{E}}_h} \frac{\sigma_0}{|e|} \int_e [v_h]_e [w_h]_e ds \end{equation} where $\sigma_0=\sigma \tau^{-1}$ is the stability parameter large enough with $\tau$ being the step size specified later, and \begin{equation} \label{bilinear_form_2} \{ \beta \nabla v\cdot {\bf n} \}_e = \frac{1}{2}\left( \beta \nabla v|_{T_1} \cdot {\bf n} + \beta \nabla v|_{T_2} \cdot {\bf n} \right), ~~~~~~ [v]_e = v|_{T_1} - v|_{T_2} \end{equation} with $T_1$ and $T_2$ being the neighbor elements of $e\in \mathring{\mathcal{E}}_h$. Note that $\sigma_0=\sigma \tau^{-1}$ is so called the super-penalty also used in \cite{2013Zunino}. Then the semi-discrete IFE scheme to the parabolic interface problem \eqref{model} is to find $u_h(\cdot,t)\in S_h(t)$ at each $t$ such that \begin{equation} \label{semi_discrete} (\partial_t u_h, v_h )_{L^2(\Omega)} + a_h(u_h,v_h) = (f, v_h)_{L^2(\Omega)}, ~~~ \forall v_h \in S_h(t). \end{equation} We note that \eqref{bilinear_form_1} shares the same format as the symmetric interior penalty discontinuous Galerkin method \cite{1982Arnold,2008Riviere}. But it is essentially not a discontinuous Galerkin method since the degrees of freedom of test and trial spaces (the IFE spaces) are as the same as the continuous piecewise linear FE spaces, i.e., the isomorphism. Furthermore we highlight that $a_h(\cdot,\cdot)$ only needs to operate on the IFE spaces $S_h(t)\subseteq W_h$, then all the penalties on non-interface edges vanish and only those on interface edges $\mathcal{E}^i_h(t)$ are non-zero due to the discontinuities. Namely $\forall v_h,w_h \in S_h(t)$ there holds \begin{equation} \begin{split} \label{bilinear_form_3} a_h(v_h,w_h;t) = a_h(v_h,w_h) = & \int_{\Omega} \beta \nabla v_h\cdot\nabla w_h dX - \sum_{e\in\mathcal{E}^i_h(t)} \int_e \{ \beta \nabla v_h\cdot {\bf n} \}_e [w_h]_e ds \\ -& \sum_{e\in\mathcal{E}^i_h(t)} \int_e \{ \beta \nabla w_h\cdot {\bf n} \}_e [v_h]_e ds + \sum_{e\in\mathcal{E}^i_h(t)} \frac{\sigma}{|e|} \int_e [v_h]_e [w_h]_e ds \end{split} \end{equation} which exactly reduces to the bilinear form of the so called partially penalized IFE (PPIFE) method introduced in \cite{2018GuoLinZhuang,2015LinLinZhang} for the elliptic interface problems and \cite{2020LinZhuang} for the parabolic interface problem but with the stationary interface. Actually since the bilinear form $a_h$ is only used on the IFE spaces, essentially only \eqref{bilinear_form_3} is required in computation. But here we prefer \eqref{bilinear_form_1} in analysis since it is uniform throughout dynamics independent of interface location. It makes the proposed method distinguished from other unfitted mesh methods requiring penalties on the interface itself \cite{2015BurmanClaus,2017HuangWuXiao}. This very unique feature of IFE methods enables us to restrict the variation in the algorithm to only the approximation spaces which suggests the employment of the fundamental framework of time-dependent adaptive finite element method in \cite{1991ErikssonJohnsonI}. Based on the bilinear form $a_h(\cdot,\cdot)$, let's introduce some useful operators for analysis. At each $t$, we define an elliptic projection $\mathcal{R}_h(t)$ and a discrete Laplace operator $\mathcal{L}_h(t)$ such that \begin{equation} \label{ellip_proj} \mathcal{R}_h(t)~:~ W_h(\Omega) \longrightarrow S_h(t), ~~~~ \text{with} ~ a_h(\mathcal{R}_h(t)w_h,v_h) = a_h(w_h,v_h) ~~ \forall v_h \in S_h(t), \end{equation} \begin{equation} \label{discre_proj} \mathcal{L}_h(t)~:~ W_h(\Omega) \longrightarrow S_h(t), ~~~~ \text{with} ~ (\mathcal{L}_h(t)w_h,v_h)_{L^2(\Omega)} = a_h(w_h,v_h) ~~ \forall v_h \in S_h(t), \end{equation} where these two operators are time-dependent since their images are time-dependent. Note that $\mathcal{R}_h(t)$ is well defined since $a_h$ is equivalent to the one in \eqref{bilinear_form_3} which is coercive on the IFE spaces (Lemma 4.1 in \cite{2015LinLinZhang}). The elliptic projection has been widely used in the semi and fully discrete analysis of numerical methods for time-dependent PDEs \cite{1991ErikssonJohnsonI,2008Riviere}. Its IFE version in \eqref{ellip_proj} has also been used for parabolic interface problems \cite{2015LinYangZhang,2020LinZhuang} with a stationary interface where the IFE spaces do not evolve so the related elliptic projection stay unchanged. Using Theorem 4.6 in \cite{2018GuoLinZhuang} for stationary interface problems, we immediately have the following estimate. \begin{theorem} \label{thm_ellip_proj_err} There exists a constant $C$ such that for every $u\in \widetilde{H}^2(\Omega)$ with some interface $\Gamma(t)$ \begin{equation} \label{thm_ellip_proj_err_eq0} \| u - \mathcal{R}_h(t) u \|_{L^2(\Omega)} + h| u - \mathcal{R}_h(t) u |_{H^1(\Omega)} \le Ch^2 \| u \|_{H^2(\Omega)}. \end{equation} \end{theorem} Some more delicate results about these operators and the bilinear form $a_h(\cdot,\cdot)$ will be derived in Section \ref{sec:pre_est}. \subsection{Temporal Discretization} In this subsection, we present a backward Euler time stepping IFE method for the parabolic moving interface model. As usual, we partition $[0,T]$ into $0=t_0<t_1<t_2<\cdots<t_{N}=T$ and define subintervals $J_n=(t_{n-1},t_n]$, $n=1,2,...,N$ which have equal length $\tau=| J_n |$. From now on, for simplicity at these discrete time points we shall denote the interpolation $\mathcal{I}^n_h=\mathcal{I}_h(t_n)$, the elliptic projection $\mathcal{R}^n_h=\mathcal{R}_h(t_n)$ and the discrete Laplace operator $\mathcal{L}^n_h=\mathcal{L}_h(t_n)$ as well as the IFE spaces $S^n_h=S_h(t_n)$, $n=0,1,\cdots,N$. In addition, for each sequence $v^n_h\in S^n_h$, $n=0,1,...,N$, we define the temporal finite difference operator \begin{equation} \label{time_fin_diff} \delta_t v^n_h = \frac{v^n_h - v^{n-1}_h}{\tau}, ~~~ n=1,2,...N. \end{equation} Then the proposed backward Euler IFE method is to find a sequence $u^n_h\in S^n_h$ to approximate $u^n:=u(t_n)$, such that \begin{equation} \label{time_IFE_1} (\delta_t u^n_h, v^n_h)_{L^2(\Omega)} + a_h(u^n_h, v^n_h) = (f(t_n),v^n_h)_{L^2(\Omega)}, ~~~~ \forall v^n_h \in S^n_h, ~~ n=1,\cdots,N, \end{equation} with $u^0_h = \mathcal{R}^0_h u_0$. Here we emphasize $a_h(\cdot,\cdot)$ can be understood as the one in \eqref{bilinear_form_3} and only the approximation spaces are changing in \eqref{time_IFE_1}. We note that \eqref{time_IFE_1} is readily used for computation, however it is not convenient for analysis in the present situation that approximation spaces are evolving at each step. To see this, let's apply the standard strategy by decomposing the error $u-u_h = u^n - u^n_h$ at $t=t_n$ into \begin{equation} \label{err_decomp} \xi_h^n = \mathcal{R}^n_hu - u^n_h \in S^n_h ~~~~~~ \text{and} ~~~~~~ \eta_h^n = u^n - \mathcal{R}^n_h u \in W_h. \end{equation} Then subtracting \eqref{time_IFE_1} from the counterpart for the exact solution $u$ and taking $v^n_h = \xi^n_h$, we obtain \begin{equation} \label{time_IFE_2} ( \delta_t \xi^n_h, \xi^n_h )_{L^2(\Omega)} + a_h( \xi^n_h, \xi^n_h) = - (\delta_t \eta^n_h, \xi^n_h)_{L^2(\Omega)} - (\partial_t u^n - \delta_t u^n_h, \xi^n_h )_{L^2(\Omega)}. \end{equation} The key of the standard approach is to estimate each term in the right hand side of \eqref{time_IFE_2}. If the interface is stationary, i.e., $\mathcal{V}=0$, then $\mathcal{R}^n_h=\mathcal{R}_h$ is independent of time, and thus we obtain $ \delta_t \eta^n_h = \delta_t u^n - \delta_t\mathcal{R}^n_h u = \delta_t u^n - \mathcal{R}_h \delta_t u^n = (\mathcal{I}-\mathcal{R}_h) \delta_t u^n $ where $\mathcal{I}$ is the identity operator. So the estimate directly follows from the approximation result of the elliptic projection \eqref{thm_ellip_proj_err_eq0}. However if the interface evolves, $\mathcal{R}^n_h$ is not commuting with $\delta_t$ anymore, and one can only obtain suboptimal estimate for $\delta_t \eta^n_h$. This issue is also discussed in Remark 3.1 of \cite{2013LehrenfeldReusken}. \begin{remark} \label{rem_time_IFE_1} It is also interesting to note that the continuous temporal differential operator $\partial_t$ is not commuting with $\mathcal{R}_h(t)$ either. Actually for $u\in \widetilde{H}^2(\Omega)$ with some interface $\Gamma(t)$ it is easy to see $\mathcal{R}_h(t)\partial_t u\in S_h(t)$ but $\partial_t \mathcal{R}_h(t)u(t)\notin S_h(t)$ since the latter one does not satisfy the homogeneous jump conditions on $\Gamma_h(t)$ anymore, see \cite{J.Sokolowski_J.-P.Zolesio_1992} for more details. \end{remark} Since the key issue is the variation of approximation spaces, it is reasonable to reconsider the fully discrete scheme \eqref{time_IFE_1} from the point of the view of the discontinuous Galerkin time stepping method introduced in \cite{1985ErikssonJohnsonThomee}, and this idea is then used for the time-dependent adaptive methods in \cite{1991ErikssonJohnsonI,1995ErikssonClaes}. To adopt this framework, we introduce the spaces \begin{equation} \label{IFE_space_time_new} \mathbb{W}_h = \{ V_h\in L^2(0,T;W_h)~:~ V_h|_{J_n} \in H^1(J_n;W_h), ~ n=1,2,...N \}, \end{equation} \begin{equation} \label{IFE_space_time} \mathbb{S}_h = \{ V_h\in L^2(0,T;W_h)~:~ V_h|_{J_n} := V^n_h\in S^n_h, ~ n=1,2,...N \}, \end{equation} where functions in $\mathbb{S}_h$ are constant with respect to time on each interval $J_n$. In the error analysis, we mainly focus on the spaces in \eqref{IFE_space_time_new} and \eqref{IFE_space_time}, and use the capital notations, such as $V_h$, to refer functions in these spaces. For each $V_h\in \mathbb{W}_h$, we define $V^n_h=V_h(t^-_{n})$, $n=0,1,...,N$, and in particular if $V_h\in \mathbb{S}_h$ we have $V^n_h=V_h(t^-_{n})=V_h(t^+_{n-1})$. Furthermore, we denote $[[V_h]]_{n-1} :=V_h(t^+_{n-1}) - V_h(t^-_{n-1})$ as the jump at $t_{n-1}$, and in particular if $V_h\in \mathbb{S}_h$ we have $[[V_h]]_{n-1}=V^n_h-V^{n-1}_h$. Then the scheme \eqref{time_IFE_1} can be equivalently rewritten as finding $U_h\in\mathbb{S}_h$ such that for all $n$ \begin{equation} \label{time_IFE_4} ( [[U_h]]_{n-1}, V^n_h )_{L^2(\Omega)} + \tau a_h(U^n_h,V^n_h) = \tau ( f(t_n), V^n_h )_{L^2(\Omega)}, ~~~~ \forall V_h\in \mathbb{S}_h. \end{equation} Note that we indeed have $U^n_h=u^n_h$ where $u^n_h$ are from the scheme \eqref{time_IFE_1}, and in the following discussion we shall focus on $U^n_h$ to avoid confusion of notations. Then summing \eqref{time_IFE_4} from $n=1$ to $N$, we have the equivalent time stepping scheme involving time integration: find $U_h\in \mathbb{S}_h$ such that \begin{equation} \label{time_IFE_5} A_h(U_h,V_h) = F_h(V_h), ~~~~ \forall V_h \in \mathbb{S}_h \end{equation} where the bilinear form $A_h(\cdot,\cdot):\mathbb{W}_h\times \mathbb{W}_h \rightarrow \mathbb{R}$ is defined as \begin{equation} \begin{split} \label{time_IFE_6} A_h(U_h,V_h) &= \sum_{n=1}^N \int_{J_n} (\partial_t U_h, V_h)_{L^2(\Omega)} dt + \tau \sum_{n=1}^N a_h(U_h(t^-_n),V_h(t^-_n)) \\ & + \sum_{n=2}^N ( [[U_h]]_{n-1}, V_h(t^-_n) )_{L^2(\Omega)} + (U_h(t^+_0), V_h(t^-_1))_{L^2(\Omega)} \end{split} \end{equation} where the term $(\partial_t U_h, V_h)_{L^2(\Omega)}$ is needed due to $\partial_t u$ of the original equation, and the linear functional $F_h:\mathbb{W}_h \rightarrow \mathbb{R}$ is \begin{equation} \label{time_IFE_7} F_h(V_h) = \tau \sum_{n=1}^N ( f(t_n), V_h(t^-_n) )_{L^2(\Omega)} + (U_h(t^-_0),V_h(t^-_1))_{L^2(\Omega)} \end{equation} where $U_h(t^-_0)=U^0_h = R^0_hu_0$ is the given initial condition. We emphasize that the bilinear form $A_h$ and the linear form $F_h$ are defined for time-dependent functions not for sequences. Although \eqref{time_IFE_5} is essentially equivalent to \eqref{time_IFE_1}, \eqref{time_IFE_5} is more suitable for analysis. \section{Some Fundamental Estimates} \label{sec:pre_est} In this section, we prepare some fundamental estimates which will be used for stability and error analysis later. Although the IFE spaces $S_h(t)$ are not $H^1$ conforming globally, they are locally $H^1$ functions on each element. This feature enables us to show some nice properties. First of all, we show the following Poincar\'e-Friedrichs-type inequality. \begin{lemma} \label{lem_pf_inequa} There exists a constant $C$ such that for each element $T$ \begin{equation} \label{lem_pf_inequa_eq0} \min_{\chi\in\mathbb{P}_0(T)} \| v_h - \chi \|_{L^2(T)} \le Ch_T | v_h |_{H^1(T)}, ~~~ \forall v_h \in S_{h,T}(t). \end{equation} \end{lemma} \begin{proof} On non-interface elements, the result is trivial since $S_{h,T}(t)=\mathbb{P}_1(T)$. The result on interface elements also directly follows from the fact that $\mathbb{P}_0(T)\subset S_{h,T}(t)\subset H^1(T)$. \end{proof} Recalling that each interpolation $\mathcal{I}_h(t)$ is an isomorphism from $\widetilde{S}_h$ to $S_h(t)$, in the following several results for convenience of presentation, we focus on its inverse $\mathcal{I}^{-1}_h(t)=:\tilde{\mathcal{I}}_h(t):S_h(t)\rightarrow \widetilde{S}_h$. We then show some stability estimatse. \begin{lemma} \label{lem_stability} There exist constants $c$ and $C$ such that for each element $T$ \begin{equation} \label{lem_stability_eq0} c| v_h |_{H^j(T)} \le | \tilde{\mathcal{I}}_{h,T}(t) v_h |_{H^j(T)} \le C | v_h |_{H^j(T)}, ~ j=0,1, ~~~ \forall v_h\in S_{h,T}(t). \end{equation} \end{lemma} \begin{proof} Again the results on non-interface elements are trivial since the isomorphism reduces to the identity operator, and we only discuss the interface elements. We first show the estimate for $j=0$. On each element $T$ with the vertices $A_i$ and edges $e_i$, let $\psi_{i,T}$ and $\phi_{i,T}$, $i=1,2,3$, be the Lagrange shape functions of the IFE space $S_{h,T}(t)$ and the FE space $\widetilde{S}_{h,T}$, respectively. For each $v_h\in S_{h,T}(t)$, noting $v_h|_{\partial T}\in H^1(\partial T)$, we let $e$ be one neighbor edge of $A_1$ and obtain \begin{equation} \begin{split} \label{lem_stability_eq1} v_h(A_1) & \le Ch^{-1/2}_T \| v_h \|_{L^2(e)} + Ch^{1/2}_T \| \partial_{{\bf t}}v_h \|_{L^2(e)} \\ & \le Ch^{-1}_T \| v_h \|_{L^2(T)} + C \| \nabla v_h \|_{L^2(T)} \le Ch^{-1}_T \| v_h \|_{L^2(T)} \end{split} \end{equation} where we have used the standard trace inequality from $A_1$ to $e$ in the first inequality, the trace inequality for IFE functions given by \eqref{trace_inequa} in the second inequality and the inverse inequality for IFE functions given by \eqref{inver_inequa} in the third inequality. Then by the boundedness of $\phi_{i,T}$ and \eqref{lem_stability_eq1} we have \begin{equation} \label{lem_stability_eq2} \vertii{ \tilde{\mathcal{I}}_h(t) v_h}_{L^2(T)} = \vertii{ \sum_{i=1}^3 v_h(A_i) \phi_{i,T} }_{L^2(T)} \le \| v_h \|_{L^2(T)} \end{equation} which gives the right inequality of \eqref{lem_stability_eq0} for $j=0$. The left inequality can be shown through a similar argument with the help of the boundedness of $\psi_{i,T}$ in \eqref{boundedness}. As for $j=1$, we note that $\mathbb{P}_0(T)\subseteq S_{h,T}\cap \widetilde{S}_{h,T}$ and thus $\tilde{\mathcal{I}}_{h,T}(t)$ preserves constants. Then for every $\chi\in \mathbb{P}_0(T)$ and $v_h \in S_{h,T}(t)$ we obtain form the inverse inequality \eqref{inver_inequa}, the $L^2$ stability above and Lemma \ref{lem_pf_inequa} that \begin{equation} \begin{split} \label{lem_stability_eq3} |\tilde{\mathcal{I}}_{h,T}(t)v_h |_{H^1(T)} & = | \tilde{\mathcal{I}}_{h,T}(t)v_h - \chi |_{H^1(T)} \le Ch^{-1}_T \| \tilde{\mathcal{I}}_{h,T}(t)v_h - \chi \|_{L^2(T)} \\ & = Ch^{-1}_T \| \tilde{\mathcal{I}}_{h,T}(t) (v_h - \chi) \|_{L^2(T)} \le Ch^{-1}_T \| v_h - \chi \|_{L^2(T)} \le C | v_h |_{H^1(T)} \end{split} \end{equation} which gives the right inequality of \eqref{lem_stability_eq0} for $j=1$. The left one can be proved by a similar argument. \end{proof} \begin{lemma} \label{lem_glob_stability} There exist constants $c$ and $C$ such that \begin{equation} \label{lem_glob_stability_eq0} c| v_h |_{H^j(\Omega)} \le | \tilde{\mathcal{I}}_{h}(t) v_h |_{H^j(\Omega)} \le C | v_h |_{H^j(\Omega)}, ~ j=0,1, ~~~ \forall v_h\in S_{h}(t). \end{equation} \end{lemma} \begin{proof} It immediately follows from the local stability in Lemma \ref{lem_stability}. \end{proof} \begin{lemma} \label{thm_interp_err} There exists a constant $C$ such that for each element $T$ \begin{equation} \label{thm_interp_err_eq0} \| v_h - \tilde{\mathcal{I}}_{h,T}(t)v_h \|_{L^2(T)} \le Ch_T | v_h|_{H^1(T)} ~~~~ \forall v_h\in S_{h,T}(t). \end{equation} \end{lemma} \begin{proof} Again the results are trivial on non-interface elements. On each interface element $T$, using the properties again that $\mathbb{P}_0(T)\subseteq S_{h,T}\cap \widetilde{S}_{h,T}$ and $\tilde{\mathcal{I}}_{h,T}(t)$ preserves constants, and the $L^2$ stability in Lemma \ref{lem_stability}, we have for any constant $\chi\in \mathbb{P}_0(T)$ \begin{equation} \begin{split} \label{thm_interp_err_eq1} \| v_h - \tilde{\mathcal{I}}_{h,T}(t)v_h \|_{L^2(T)} &= \| v_h - \chi + \tilde{\mathcal{I}}_{h,T}(t)\chi - \tilde{\mathcal{I}}_{h,T}(t)v_h \|_{L^2(T)} \le C \| v_h - \chi \|_{L^2(T)}. \end{split} \end{equation} Then the estimate in Lemma \ref{lem_pf_inequa} yields the desired result. \end{proof} The stability in Lemma \ref{lem_glob_stability} and the estimate in Lemma \ref{thm_interp_err} enable us to show that the IFE functions have similar properties as FE functions such as the following global trace inequality. \begin{theorem} \label{thm_glob_trace} There exists a constant $C$ such that \begin{equation} \label{thm_glob_trace_eq0} \| v_h \|_{L^2(\Gamma(t))} \le C \| v_h \|_{H^1(\Omega)} ~~~ \forall v_h \in S_h(t). \end{equation} \end{theorem} \begin{proof} Note that \eqref{thm_glob_trace_eq0} is true for FE functions due to the global $H^1$-conformity. Given each $v_h\in S_h(t)$, by the triangular inequality we have \begin{equation} \label{thm_glob_trace_eq1} \| v_h \|_{L^2(\Gamma(t))} \le \| v_h - \tilde{\mathcal{I}}_hv_h \|_{L^2(\Gamma(t))} + \| \tilde{\mathcal{I}}_hv_h \|_{L^2(\Gamma(t))}. \end{equation} Since $\tilde{\mathcal{I}}_hv_h \in \widetilde{S}_h(t)$, by the trace inequality and Lemma \ref{lem_glob_stability} we have \begin{equation} \label{thm_glob_trace_eq2} \| \tilde{\mathcal{I}}_hv_h \|_{L^2(\Gamma(t))} \le C\| \tilde{\mathcal{I}}_h v_h \|_{H^1(\Omega)} \le C\| v_h \|_{H^1(\Omega)}. \end{equation} It remains to estimate the first term in the right hand side of \eqref{thm_glob_trace_eq1}. On each interface element $T$, by the trace inequality Lemma 3.1 in \cite{2016WangXiaoXu}, Lemma \ref{thm_interp_err} and Lemma \ref{lem_glob_stability} with $j=1$ we have \begin{equation} \begin{split} \label{thm_glob_trace_eq3} \| v_h - \tilde{\mathcal{I}}_hv_h \|_{L^2(\Gamma(t)\cap T)} & \le Ch_T^{-1/2} \| v_h - \tilde{\mathcal{I}}_hv_h \|_{L^2(T)} + Ch_T^{1/2} | v_h - \tilde{\mathcal{I}}_hv_h |_{H^1(T)} \le C h^{1/2}_T | v_h |_{H^1(T)}. \end{split} \end{equation} Therefore, by the assumption $h_T\le C$ there holds \begin{equation} \label{thm_glob_trace_eq4} \| v_h - \tilde{\mathcal{I}}_hv_h \|^2_{L^2(\Gamma(t))} = \sum_{T\in\mathcal{E}^i_h(t)} \| v_h - \tilde{\mathcal{I}}_hv_h \|^2_{L^2(\Gamma(t)\cap T)} \le C \sum_{T\in\mathcal{E}^i_h(t)} |v_h|^2_{H^1(T)} \le C | v_h |^2_{H^1(\Omega)}. \end{equation} Putting \eqref{thm_glob_trace_eq2} and \eqref{thm_glob_trace_eq4} into \eqref{thm_glob_trace_eq1} yields the desired result. \end{proof} We emphasize that the results above are basically some delicate estimates on the difference between the IFE functions and their FE isomorphic images. To our best knowledge, these results have not appeared in the literature. Next we proceed to discuss the coercivity of the bilinear form $a_h(\cdot,\cdot)$ in \eqref{bilinear_form_1}. Due to the equivalence in \eqref{bilinear_form_3}, the coercivity is already given in Lemma 4.1 in \cite{2015LinLinZhang} and Theorem 4.3 in \cite{2018GuoLinZhuang}. But in order to handle the dynamical IFE spaces, we need more delicate results. For this purpose, we first introduce the uniform energy norm on the general broken space $W_h$: \begin{equation} \begin{split} \label{norm_1} \vertiii{v_h}^2_{h} := \| \sqrt{\beta}\nabla v_h \|^2_{L^2(\Omega)} + \sigma h^{-1} \tau^{-1} \sum_{e\in\mathring{\mathcal{E}}_h} \| [v]_e \|^2_{L^2(e)} + h\tau\sigma^{-1} \sum_{e\in\mathring{\mathcal{E}}_h} \| \{ \beta \nabla v\cdot{\bf n} \}_e \|^2_{L^2(e)} . \end{split} \end{equation} It is easy to see this is a norm on the broken space $W_h$, and we note that this energy norm is widely used in the interior penalty discontinuous Galerkin methods \cite{1982Arnold}. Here we use it for the IFE spaces and note that all the terms $\| [v] \|^2_{L^2(e)}$ on non-interface edges just vanish. A similar energy norm is also used in \cite{2018GuoLinZhuang} with only the penalty terms on interface edges. Here we add the penalty terms on all the edges such that the norm format also keeps unchanged in the dynamics. This feature is important for the following error analysis. Using \eqref{thm_appro_eq0} and the argument of Theorem 4.5 in \cite{2018GuoLinZhuang}, we can show the following estimate. \begin{theorem} \label{thm_appro_energy} Suppose $u\in \widetilde{H}^2(\Omega)$ with some interface $\Gamma(t)$, there exists a constant $C$ such that \begin{equation} \label{thm_appro_energy_eq0} \vertiii{ u - \mathcal{I}_h(t)u }_h \le Ch \vertii{ u }_{H^2(\Omega)}. \end{equation} \end{theorem} We also have the following coercivity in terms of the energy norm $\vertiii{\cdot}_h$. \begin{theorem} \label{thm_coer} Suppose $\sigma$ is sufficiently larger, then there exists constants $\kappa_1$ and $\kappa_2$ such that \begin{subequations} \label{thm_coer_eq0} \begin{align} & a_h(v_h,v_h) \ge \kappa_1 \vertiii{v_h}^2_h, ~~~~~~~~~~~ \forall v_h \in S_h(t), \label{thm_coer_eq01} \\ & a_h(v_h,w_h) \le \kappa_2 \vertiii{v_h}_h \vertiii{w_h}_h, ~~~ \forall v_h,w_h \in W_h. \label{thm_coer_eq02} \end{align} \end{subequations} \end{theorem} \begin{proof} The argument is almost as the same as Theorems 4.3 and 4.4 in \cite{2018GuoLinZhuang}. \end{proof} Since $a_h(\cdot,\cdot)$ is coercive, we can define another uniform norm directly induced from $a_h(\cdot,\cdot)$: \begin{equation} \label{norm_2} \vertiii{v_h}^2_a = a_h(v_h,v_h), ~~~~ \forall v_h\in S_h(t), \end{equation} which, again, is independent of $\Gamma_h(t)$ during the dynamics. The two inequalities in Theorem \ref{thm_coer} together show that $\vertiii{\cdot}_a$ is equivalent to $\vertiii{\cdot}_h$ on each $S_h(t)$ where the hidden constant is uniformly bounded. However we emphasize that $a_h(\cdot,\cdot)$ is not coercive on general broken Sobolev spaces such as $W_h$. So in addition to \eqref{thm_coer_eq01}, we also need the following weak coercivity of which the underling idea is also used in \cite{2006ChrysafinosWalkington,2007FengKarakashian} for error analysis of DG methods on dynamic meshes. \begin{theorem} \label{thm_weak_coer} Suppose $\sigma$ is sufficiently large, then there exist constants $\delta_0$ and $\delta_1$ such that \begin{equation} \label{thm_weak_coer_eq0} a_h(v_h,v_h) \ge \delta_0 \vertiii{v_h}^2_h - \delta_1 h \tau \sigma \sum_{e\in\mathring{\mathcal{E}}_h} \vertii{ \{\beta \nabla v_h\cdot{\bf n}\}_e }^2_{L^2(e)}, ~~~~~ \forall v_h\in W_h. \end{equation} \end{theorem} \begin{proof} Let $\delta_0$ be a constant to be determined later. Then the Young's inequality yields \begin{equation} \begin{split} \label{thm_weak_coer_eq1} a_h(v_h,v_h) - \delta_0 \vertiii{v_h}^2_h &= (1-\delta_0) \| \sqrt{\beta} \nabla v_h \|^2_{L^2(\Omega)} + (1-\delta_0) \sigma h^{-1} \tau^{-1} \sum_{e\in\mathring{\mathcal{E}}_h} \| [v_h]_e \|^2_{L^2(e)} \\ &- \sum_{e\in\mathring{\mathcal{E}}_h} \{ \beta \nabla v_h \cdot {\bf n} \}_e [ v_h ]_e ds - \delta_0 \sigma^{-1} h \tau \sum_{e\in\mathring{\mathcal{E}}_h} \vertii{ \{\beta \nabla v_h\cdot{\bf n}\}_e }^2_{L^2(e)} \\ & \ge (1-\delta_0) \| \sqrt{\beta} \nabla v_h \|^2_{L^2(\Omega)} + (1-\delta_0 - \epsilon) \sigma h^{-1} \tau^{-1} \sum_{e\in\mathring{\mathcal{E}}_h} \| [v_h]_e \|^2_{L^2(e)} \\ -& \left( \delta_0 + \frac{1}{4 \epsilon} \right) \sigma^{-1} h \tau \sum_{e\in\mathring{\mathcal{E}}_h} \vertii{ \{\beta \nabla v_h\cdot{\bf n}\}_e }^2_{L^2(e)}. \end{split} \end{equation} Taking $\delta_0=\epsilon=1/4$ and $\delta_1=\delta_0+1/(4\epsilon)=5/4$ finishes the proof. \end{proof} In the discussion below, we always assume $\sigma$ is sufficiently large such that the coercivity above hold without explicitly mentioning again. Then we present a discrete Poincar\'e inequality and the stability of the elliptic projection $\mathcal{R}_h(t)$. \begin{theorem} \label{thm_dis_poinc} There exists a constant $C$ such that for each $v_h\in W_h$ \begin{equation} \label{thm_dis_poinc_eq0} \| v_h \|_{L^2(\Omega)} \le C \vertiii{ v_h }_h. \end{equation} \end{theorem} \begin{proof} The proof is in the same spirit of Lemma 2.1 in \cite{1982Arnold}. \end{proof} \begin{theorem} \label{thm_lap_stab} There exists a constant $C$ such that $ \vertiii{ \mathcal{R}_h(t) v_h }_h \le C \vertiii{ v_h }_h$. \end{theorem} \begin{proof} It immediately follows from Theorem \ref{thm_coer}. \end{proof} Now let's go back to the operators $\mathcal{R}^n_h$ and $\mathcal{L}^n_h$ at $t_n$, $n=0,1,...,N$, and show the following estimates. \begin{theorem} \label{thm_Rn_h_err} There exists a constant $C$ such that \begin{equation} \label{thm_Rn_h_err_eq0} \vertiii{ \mathcal{R}^n_h v_{h} - v_{h} }_h \le Ch \| \mathcal{L}^{n-1}_h v_h \|_{L^2(\Omega)} + C \tau^{1/2} \vertiii{ v_h }_{h} , ~~~~ \forall v_h\in S^{n-1}_h, ~~ n=1,...,N. \end{equation} \end{theorem} \begin{proof} Using Theorem \ref{thm_weak_coer}, for each $v_h\in S^{n-1}_h$ we have \begin{equation} \begin{split} \label{thm_Rn_err_eq4} \vertiii{ \mathcal{R}^n_h v_{h} - v_{h} }_h & \le C \verti{ a_h(\mathcal{R}^n_h v_{h} - v_{h}, \mathcal{R}^n_h v_{h} - v_{h}) }^{1/2} \\ &+ C h^{1/2} \tau^{1/2} \sigma^{1/2} \sum_{e\in\mathring{\mathcal{E}}_h} \vertii{ \{\beta \nabla ( \mathcal{R}^n_h v_{h} - v_{h} ) \cdot{\bf n}\}_e }_{L^2(e)} \end{split} \end{equation} where we denote $\chi_1=\verti{ a_h( \mathcal{R}^n_h v_{h} - v_{h}, \mathcal{R}^n_h v_{h} - v_{h}) }^{1/2}$ and $\chi_2 = \sum_{e\in\mathring{\mathcal{E}}_h} \vertii{ \{\beta \nabla ( \mathcal{R}^n_h v_{h} - v_{h} ) \cdot{\bf n}\}_e }_{L^2(e)}$, respectively, for the right hand side. For $\chi_1$, inspired by argument of Lemma 2.2 in \cite{1991ErikssonJohnsonI} we obtain: \begin{equation} \begin{split} \label{thm_Rn_err_eq5} \chi^2_1 = \verti{ a_h( \mathcal{R}^n_h v_{h} - v_{h}, \mathcal{R}^n_h v_{h} - v_{h}) } & = \verti{ a_h( \mathcal{R}^n_h v_{h} - v_{h}, v_{h}) } = \verti{ a_h( \mathcal{R}^{n-1}_h \mathcal{R}^n_h v_{h} - v_{h}, v_{h}) } \end{split} \end{equation} where we have used the definition of $\mathcal{R}^n_h$ and $\mathcal{R}^{n-1}_h$ and the fact $v_h\in S^{n-1}_h$. Then since $\mathcal{R}^{n-1}_h \mathcal{R}^n_h v_{h}\in S^{n-1}_h$, using the discrete Laplace operator \eqref{discre_proj}, we have \begin{equation} \label{thm_Rn_err_eq6} \verti{ a_h( \mathcal{R}^{n-1}_h \mathcal{R}^n_h v_{h} - v_{h}, v_{h}) } = |( \mathcal{R}^{n-1}_h \mathcal{R}^n_h v_{h} - v_{h}, \mathcal{L}^{n-1}_h v_h)_{L^2(\Omega)}| \le \| \mathcal{R}^{n-1}_h \mathcal{R}^n_h v_{h} - v_{h} \|_{L^2(\Omega)} \| \mathcal{L}^{n-1}_h v_h \|_{L^2(\Omega)}. \end{equation} We need to estimate the first term in the right hand side of \eqref{thm_Rn_err_eq6}. For this purpose, we split this term into \begin{equation} \label{thm_Rn_err_eq6_1} \mathcal{R}^{n-1}_h \mathcal{R}^n_h v_{h} - v_{h} = \left( \mathcal{R}^{n-1}_h - \mathcal{I} \right) \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h + \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h. \end{equation} By the duality argument, we define two auxiliary functions $z_1\in \widetilde{H}^2_0(\Omega)$ and $z_2\in \widetilde{H}^2_0(\Omega)$ with the interface $\Gamma(t_{n-1})$ and $\Gamma(t_{n})$, respectively, satisfying the equations \begin{equation} \begin{split} \label{thm_Rn_err_eq6_2} & \nabla\cdot(\beta \nabla z_1) = \left( \mathcal{R}^{n-1}_h - \mathcal{I} \right) \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h,\\ & \nabla\cdot(\beta \nabla z_2) = \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h. \end{split} \end{equation} For first equation in \eqref{thm_Rn_err_eq6_2}, multiplying it by $ \left( \mathcal{R}^{n-1}_h - \mathcal{I} \right) \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h$ and noticing the penalties of $a_h$ are added on every interior edge, we use integration by parts to obtain \begin{equation} \begin{split} \label{thm_Rn_err_eq6_3} \vertii{ \left( \mathcal{R}^{n-1}_h - \mathcal{I} \right) \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h }^2_{L^2(\Omega)} & = a_h( z_1, \left( \mathcal{R}^{n-1}_h - \mathcal{I} \right) \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h ) \\ & = a_h( z_1 - \mathcal{I}_h^{n-1} z_1, \left( \mathcal{R}^{n-1}_h - \mathcal{I} \right) \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h ) \\ & \le \vertiii{ z_1 - \mathcal{I}_h^{n-1} z_1 }_h \vertiii{ \left( \mathcal{R}^{n-1}_h - \mathcal{I} \right) \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h }_h \end{split} \end{equation} where $\mathcal{I}_h^{n-1}$ is the interpolation to $S_h^{n-1}$ for the interface $\Gamma(t_{n-1})$. Then Theorem \ref{thm_appro_energy} and regularity of elliptic interface problems \cite{1998ChenZou,2002HuangZou} yield \begin{equation} \label{thm_Rn_err_eq6_4} \vertiii{ z_1 - \mathcal{I}_h^{n-1} z_1 }_h \le Ch \| z_1 \|_{H^2(\Omega)} \le C h \vertii{ \left( \mathcal{R}^{n-1}_h - \mathcal{I} \right) \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h }_{L^2(\Omega)}. \end{equation} Putting \eqref{thm_Rn_err_eq6_4} into \eqref{thm_Rn_err_eq6_3}, and using the stability of $\mathcal{R}^{n-1}_h$, we have \begin{equation} \label{thm_Rn_err_eq6_5} \vertii{ \left( \mathcal{R}^{n-1}_h - \mathcal{I} \right) \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h }_{L^2(\Omega)} \le Ch \vertiii{ \left( \mathcal{R}^{n-1}_h - \mathcal{I} \right) \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h }_h \le Ch \vertiii{ \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h }_h. \end{equation} Employing the second equation in \eqref{thm_Rn_err_eq6_2} with the help of the interpolation $\mathcal{I}_h(t_{n})$, we can similarly obtain \begin{equation} \label{thm_Rn_err_eq6_6} \vertii{ \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h }_{L^2(\Omega)} \le Ch \vertiii{ \left( \mathcal{R}^{n}_h - \mathcal{I} \right)v_h }_h. \end{equation} Combining \eqref{thm_Rn_err_eq6_5} and \eqref{thm_Rn_err_eq6_6} together and using \eqref{thm_Rn_err_eq4}, we have \begin{equation} \begin{split} \label{thm_Rn_err_eq7} \| \mathcal{R}^{n-1}_h \mathcal{R}^n_h v_{h} - v_{h} \|_{L^2(\Omega)} \le C h \vertiii{ \mathcal{R}^n_h v_{h} - v_{h} }_h \le Ch \chi_1 + Ch^{3/2}\tau^{1/2} \sigma^{1/2} \chi_2. \end{split} \end{equation} Now substituting \eqref{thm_Rn_err_eq6} and \eqref{thm_Rn_err_eq7} into \eqref{thm_Rn_err_eq5}, we obtain \begin{equation} \label{thm_Rn_err_eq8} \chi^2_1 \le Ch \| \mathcal{L}^{n-1}_h v_h \|_{L^2(\Omega)} \chi_1 + Ch^{3/2}\tau^{1/2} \sigma^{1/2} \| \mathcal{L}^{n-1}_h v_h \|_{L^2(\Omega)} \chi_2. \end{equation} Note that this quadratic inequality is certainly solvable. Solving this quadratic inequality, we have \begin{equation} \label{thm_Rn_err_eq9} \chi_1 \le Ch \| \mathcal{L}^{n-1}_h v_h \|_{L^2(\Omega)} + Ch^{3/4}\tau^{1/4} \sigma^{1/4} \| \mathcal{L}^{n-1}_h v_h \|^{1/2}_{L^2(\Omega)} \chi^{1/2}_2. \end{equation} Then the arithmetic inequality leads to \begin{equation} \label{thm_Rn_err_eq10} Ch^{3/4}\tau^{1/4} \sigma^{1/4} \| \mathcal{L}^{n-1}_h v_h \|^{1/2}_{L^2(\Omega)} \chi^{1/2}_2\le Ch \| \mathcal{L}^{n-1}_h v_h \|_{L^2(\Omega)} + Ch^{1/2}\tau^{1/2} \sigma^{1/2} \chi_2 . \end{equation} Combining \eqref{thm_Rn_err_eq10} and \eqref{thm_Rn_err_eq9} we obtain \begin{equation} \label{thm_Rn_err_eq11} \chi_1 \le Ch \| \mathcal{L}^{n-1}_h v_h \|_{L^2(\Omega)} + Ch^{1/2}\tau^{1/2} \sigma^{1/2} \chi_2 \end{equation} which is then put into \eqref{thm_Rn_err_eq4}. Now it remains to estimate $\chi_2$ through the trace inequality \eqref{trace_inequa}: \begin{equation} \begin{split} \label{thm_Rn_err_eq12} \chi_2 & \le C\sum_{e\in\mathring{\mathcal{E}}_h} \left( \vertii{ \beta \nabla \mathcal{R}^n_h v_{h} \cdot{\bf n} }_{L^2(e)} + \vertii{ \beta \nabla v_{h} \cdot{\bf n} }_{L^2(e)} \right) \\ & \le C \sum_{T\in \mathcal{T}_h} \left( h^{-1/2}_T \vertii{ \beta \nabla \mathcal{R}^n_h v_{h} }_{L^2(T)} + h^{-1/2}_T \vertii{ \beta \nabla v_{h} }_{L^2(T)} \right) \\ & \le Ch^{-1/2} \left( \vertiii{ \mathcal{R}^n_h v_{h} }_h + \vertiii{ v_{h} }_h \right) \le Ch^{-1/2} \vertiii{ v_{h} }_h \end{split} \end{equation} where in the last inequality we have also use the stability of $\mathcal{R}^n_h$ in terms of the norm $\vertiii{\cdot}_h$. Finally combining \eqref{thm_Rn_err_eq11}, \eqref{thm_Rn_err_eq12} and \eqref{thm_Rn_err_eq4}, we have the desired result. \end{proof} \begin{theorem} \label{thm_Rn_err} There exists a constant $C$ such that \begin{equation} \label{thm_Rn_err_eq0} \| \mathcal{R}^n_h v_{h} - v_{h} \|_{L^2(\Omega)} \le Ch^2 \| \mathcal{L}^{n-1}_h v_h \|_{L^2(\Omega)} + C \tau^{1/2} h \vertiii{ v_h }_{h} , ~~~~ \forall v_h\in S^{n-1}_h, ~~ n=1,...,N. \end{equation} \end{theorem} \begin{proof} By the duality argument similar to \eqref{thm_Rn_err_eq6_2}-\eqref{thm_Rn_err_eq6_5} above we can show \begin{equation} \begin{split} \label{thm_Rn_err_eq3_1} \| \mathcal{R}^n_h v_{h} - v_{h} \|_{L^2(\Omega)} & \le C h \vertiii{ \mathcal{R}^n_h v_{h} - v_{h} }_h \end{split} \end{equation} which gives the desired result by Theorem \ref{thm_Rn_h_err}. \end{proof} \begin{remark} \label{rem_Rn_err} Theorem \ref{thm_Rn_err} can be understood as a generalized estimate of (2.12) in \cite{1991ErikssonJohnsonI} that a standard continuous Galerkin method is used and the bilinear form is simply the standard $H^1$ inner product. A similar result is also derived in \cite{2013Zunino} for the $H^1$ inner product with discontinuous coefficients. But as the major difference/difficulty, the bilinear form $a_h(\cdot,\cdot)$ used in the IFE method may not be coercive on the general broken space $W_h$ which is the essential reason of the extra term $C\tau^{1/2}h\vertiii{v_h}_h$ appearing in \eqref{thm_Rn_err_eq0}. This feature also makes the proof much more technical. \end{remark} \section{Error Estimates} \label{sec:error_est} In this section, we proceed to estimate the errors of the fully discrete IFE scheme. For simplicity we shall assume $f=0$. We begin with the following stability results. \begin{theorem}[Stability] \label{thm_stab} Given each initial condition $U^0_h$, let $U^n_h$, $n=1,2.,...,N$ be the solutions to the scheme \eqref{time_IFE_4} or \eqref{time_IFE_5}, then there exists a constant such that for any integer $M\le N$ \begin{subequations} \label{thm_stab_eq0} \begin{align} & \| U^M_h \|^2_{L^2(\Omega)} + 2\tau \kappa_1 \sum_{n=1}^M \vertiii{ U^n_h }^2_h + \sum_{n=1}^M \| [[ U_h ]]_{n-1} \|^2_{L^2(\Omega)} \le \| U^0_h \|^2_{L^2(\Omega)}, \label{thm_stab_eq01} \\ & t_M \| U^M_h \|^2_{L^2(\Omega)} + 2 \tau \kappa_1 \sum_{n=1}^M t_n \vertiii{ U^n_h }^2_h + \sum_{n=1}^M t_{n} \| [[ U_h ]]_{n-1} \|^2_{L^2(\Omega)} \le C\| U^0_h \|^2_{L^2(\Omega)}, \label{thm_stab_eq03} \end{align} and if $h^2 \le \gamma \tau$ for some positive constant $\gamma$ small enough, then there exists a constant $C$ such that for any $M\le N$ \begin{align} & t_M \vertiii{ U^M_h }^2_h + \tau \sum_{n=1}^M t_n \| \mathcal{L}^{n}_h U^n_h \|^2_{L^2(\Omega)} + \tau^{-1} \sum_{n=1}^{M-1} t_n \| [[ U_h ]]_{n} \|^2_{L^2(\Omega)} \le C \| U^0_h \|^2_{L^2(\Omega)}, \label{thm_stab_eq02} \end{align} \end{subequations} \end{theorem} \begin{proof} We prove each of the inequalities above individually. \vspace{0.1in} \textit{Proof of \eqref{thm_stab_eq01}}. In \eqref{time_IFE_5}, taking $V_h=U_h$, namely in \eqref{time_IFE_4} setting $V^n_h=U^n_h$, $n=1,2,...,M$, and summing the equalities together, we have \begin{equation} \begin{split} \label{thm_stab_eq1} & \tau \sum_{n=1}^M a_h(U^n_h,U^n_h) + \sum_{n=1}^M ( [[U_h]]_{n-1}, U^n_h )_{\Omega} \\ = & \tau \sum_{n=1}^M a_h(U^n_h,U^n_h) + \frac{1}{2} \| U^M_h \|^2_{L^2(\Omega)} + \frac{1}{2} \sum_{n=1}^M \| [[ U_h ]]_{n-1} \|^2_{L^2(\Omega)} - \frac{1}{2} \| U^0_h \|^2_{L^2(\Omega)} = 0 \end{split} \end{equation} where we have used the identity $2( [[U_h]]_{n-1}, U^n_h )_{\Omega} = \| U^n_h \|^2_{L^2(\Omega)} + \| [[ U ]]_{n-1} \|^2_{L^2(\Omega)} - \| U^{n-1}_h \|^2_{L^2(\Omega)}$. Then the coercivity \eqref{thm_coer_eq01} finishes the proof. \vspace{0.1in} \textit{Proof of \eqref{thm_stab_eq03}}. The argument is similar. In \eqref{time_IFE_4} setting $V^n_h=U^n_h$, multiplying it by $t_n$ and summing from $n=1,2,...,M$, we have \begin{equation} \label{thm_stab_eq1_1} \tau \sum_{n=1}^M t_n a_h(U^n_h, U^n_h) + \frac{1}{2} \sum_{n=1}^M t_n \| [[ U_h ]]_{n-1} \|^2_{L^2(\Omega)} + \frac{1}{2} t_M \| U^M_h \|^2_{L^2(\Omega)} = \tau \sum_{n=1}^{M} \| U^{n-1}_h \|^2_{L^2(\Omega)}. \end{equation} Then applying the bound of the second term in \eqref{thm_stab_eq01} together with the discrete Poincar\'e inequality in Theorem \ref{thm_dis_poinc} and the coercivity \eqref{thm_coer_eq01} we have the desired result. \vspace{0.2in} \textit{Proof of \eqref{thm_stab_eq02}}. First of all we observe the following identities \begin{subequations} \label{thm_stab_eq3} \begin{align} & a_h(U^n_h, \mathcal{L}^n_hU^n_h) = ( \mathcal{L}^n_hU^n_h, \mathcal{L}^n_h U^n_h)_{L^2(\Omega)} = \| \mathcal{L}^n_h U^n_h \|^2_{L^2(\Omega)}, \label{thm_stab_eq3_1} \\ & (U^n_h, \mathcal{L}^n_h U^n_h )_{L^2(\Omega)} = a_h( U^n_h, U^n_h ) = \vertiii{ U^n_h }^2_a, \label{thm_stab_eq3_2} \\ & ( \mathcal{R}^n_hU^{n-1}_h, \mathcal{L}^n_h U^n_h )_{L^2(\Omega)} = a_h( \mathcal{R}^n_hU^{n-1}_h, U^n_h ). \label{thm_stab_eq3_3} \end{align} \end{subequations} Note that $\mathcal{R}^n_hU^{n-1}_h$ and $U^n_h$ are both in $S^n_h$, we have the following identity \begin{equation} \label{thm_stab_eq4} a_h( \mathcal{R}^n_hU^{n-1}_h, U^n_h ) = \frac{1}{2} \vertiii{ U^n_h }^2_a + \frac{1}{2} \vertiii{ \mathcal{R}^n_h U^{n-1}_h }^2_a - \frac{1}{2} \vertiii{ U^n_h - \mathcal{R}^n_h U^{n-1}_h }^2_a. \end{equation} Setting $V^n_h = \mathcal{L}^n_h U^n_h$ in \eqref{time_IFE_4}, we then rewrite \begin{equation} \label{thm_stab_eq2} \tau a_h(U^n_h, \mathcal{L}^n_h U^n_h) + (U^n_h, \mathcal{L}^n_h U^n_h)_{L^2(\Omega)} = (U^{n-1}_h, \mathcal{L}^n_h U^n_h)_{L^2(\Omega)} . \end{equation} Subtracting the term $( \mathcal{R}^n_hU^{n-1}_h, \mathcal{L}^n_h U^n_h )_{L^2(\Omega)}$ from \eqref{thm_stab_eq2}, and using \eqref{thm_stab_eq3} and \eqref{thm_stab_eq4}, we arrive at the identity \begin{equation} \begin{split} \label{thm_stab_eq5} & \frac{1}{2} \vertiii{ U^n_h }^2_a - \frac{1}{2} \vertiii{ U^{n-1}_h }^2_a + \tau \| \mathcal{L}^n_h U^n_h \|^2_{L^2(\Omega)} + \frac{1}{2} \vertiii{ U^n_h - \mathcal{R}^n_h U^{n-1}_h }^2_a \\ = & ( (\mathcal{I} - \mathcal{R}^n_h) U^{n-1}_h, \mathcal{L}^n_h U^n_h)_{L^2(\Omega)} + \frac{1}{2} \vertiii{ \mathcal{R}^n_h U^{n-1}_h }^2_a - \frac{1}{2} \vertiii{ U^{n-1}_h }^2_a . \end{split} \end{equation} We need to estimate each term in the right hand side of \eqref{thm_stab_eq5}. By orthogonality, boundedness \eqref{thm_coer_eq02}, the estimate in Theorem \eqref{thm_Rn_h_err} we have \begin{equation} \begin{split} \label{thm_stab_eq6} \vertiii{ \mathcal{R}^n_h U^{n-1}_h }^2_a - \vertiii{ U^{n-1}_h }^2_a &= -a_h ( \mathcal{R}^n_h U^{n-1}_h - U^{n-1}_h, \mathcal{R}^n_h U^{n-1}_h - U^{n-1}_h ) \le \vertiii{ \mathcal{R}^n_h U^{n-1}_h - U^{n-1}_h }^2_h \\ & \le Ch^2 \| \mathcal{L}^{n-1}_h U^{n-1}_h \|^2_{L^2(\Omega)} + C \tau \vertiii{ U^{n-1}_h }^2_{h} \\ & \le C \gamma \tau \| \mathcal{L}^{n-1}_h U^{n-1}_h \|^2_{L^2(\Omega)} + C \tau \vertiii{ U^{n-1}_h }^2_{h} . \end{split} \end{equation} In addition, Theorem \ref{thm_Rn_err} and Young's inequality imply \begin{equation} \begin{split} \label{thm_stab_eq7} ( (\mathcal{I} - \mathcal{R}^n_h) U^{n-1}_h, \mathcal{L}^n_h U^n_h )_{L^2(\Omega)} & \le \| (\mathcal{I} - \mathcal{R}^n_h) U^{n-1}_h \|_{L^2(\Omega)} \| \mathcal{L}^n_h U^n_h \|_{L^2(\Omega)} \\ & \le \left( Ch^2 \| \mathcal{L}^{n-1}_h U^{n-1}_h \|_{L^2(\Omega)} + C \tau^{1/2} h \vertiii{ U^{n-1}_h }_{h} \right) \| \mathcal{L}^n_h U^n_h \|_{L^2(\Omega)} \\ & \le C h^2 \| \mathcal{L}^{n-1}_h U^{n-1}_h \|^2_{L^2(\Omega)} + C h^2 \| \mathcal{L}^{n}_h U^{n}_h \|^2_{L^2(\Omega)} + C \tau \vertiii{ U^{n-1}_h }^2_{h} \\ & \le C \gamma \tau \| \mathcal{L}^{n-1}_h U^{n-1}_h \|^2_{L^2(\Omega)} + C \gamma \tau \| \mathcal{L}^{n}_h U^{n}_h \|^2_{L^2(\Omega)} + C \tau \vertiii{ U^{n-1}_h }^2_{h}. \end{split} \end{equation} Putting \eqref{thm_stab_eq6} and \eqref{thm_stab_eq7} into \eqref{thm_stab_eq5}, we obtain \begin{equation} \begin{split} \label{thm_stab_eq8} \frac{1}{2} \vertiii{ U^n_h }^2_a - \frac{1}{2} \vertiii{ U^{n-1}_h }^2_a + \tau \| \mathcal{L}^n_h U^n_h \|^2_{L^2(\Omega)} \le C \gamma \tau \| \mathcal{L}^{n-1}_h U^{n-1}_h \|^2_{L^2(\Omega)} + C \gamma \tau \| \mathcal{L}^{n}_h U^{n}_h \|^2_{L^2(\Omega)} + C \tau \vertiii{U^{n-1}_h}^2_h. \end{split} \end{equation} Note that $t_n=t_{n-1}+\tau \le 2t_{n-1}$ for $n \ge 2$. Then multiplying \eqref{thm_stab_eq8} by $t_n$ and summing it from $n=2$ to $n=M$ yields \begin{equation} \begin{split} \label{thm_stab_eq9} \frac{t_M}{2} \vertiii{ U^M_h }^2_a + (1-C\gamma) \tau \sum_{n=2}^M t_n \| \mathcal{L}^n_h U^n_h \|^2_{L^2(\Omega)} \le C \gamma \tau^2 \| \mathcal{L}^{1}_h U^{1}_h \|^2_{L^2(\Omega)} + C \tau \sum_{n=2}^M \vertiii{U^{n-1}_h}^2_h . \end{split} \end{equation} Special attention needs for $n=1$. Putting \eqref{thm_stab_eq3_1} and \eqref{thm_stab_eq3_2} into \eqref{thm_stab_eq2} with $n=1$, we have \begin{equation} \begin{split} \label{thm_stab_eq10} \vertiii{ U^1_h }^2_a + \tau \vertii{ \mathcal{L}^1_h U^1_h }^2_{L^2(\Omega)} & = ( U^0_h, \mathcal{L}^1_h U^1_h )_{L^2(\Omega)} \\ & \le \vertii{ U^0_h }_{L^2(\Omega)} \vertii{ \mathcal{L}^1_h U^1_h }_{L^2(\Omega)} \le \tau^{-1} \vertii{ U^0_h }^2_{L^2(\Omega)} + \frac{\tau}{4} \vertii{ \mathcal{L}^1_h U^1_h }^2_{L^2(\Omega)}. \end{split} \end{equation} Multiplying \eqref{thm_stab_eq10} by $t_1=\tau$ leads to \begin{equation} \begin{split} \label{thm_stab_eq10_1} \tau \vertiii{ U^1_h }^2_a + \frac{3\tau t_1}{4} \vertii{ \mathcal{L}^1_h U^1_h }^2_{L^2(\Omega)} \le \vertii{ U^0_h }^2_{L^2(\Omega)}. \end{split} \end{equation} Combining \eqref{thm_stab_eq10_1} with \eqref{thm_stab_eq9}, using the stability \eqref{thm_stab_eq01}(the second term), replacing the norm $\vertiii{\cdot}_a$ by $\vertiii{\cdot}_h$ through the equivalence, and assuming $\gamma$ is small enough such that $1-C\gamma>0$ we obtain the bounds for the first two terms on the left side of \eqref{thm_stab_eq02}. For the third term on the left of \eqref{thm_stab_eq02}, we apply the $L^2$ projection $\mathcal{P}^n_h~:~ W_h \rightarrow S^n_h$ to write \begin{equation} \label{thm_stab_eq11} a_h( U^n_h, \mathcal{P}^n_h[[U_h]]_{n-1} ) = ( \mathcal{L}^n_hU^n_h, \mathcal{P}^n_h[[U_h]]_{n-1} )_{L^2(\Omega)} = ( \mathcal{L}^n_hU^n_h, [[U_h]]_{n-1} )_{L^2(\Omega)}. \end{equation} Then in \eqref{time_IFE_4}, taking $V^n_h = \mathcal{P}^n_h [[U_h]]_{n-1}$, and using \eqref{thm_stab_eq11} we have \begin{equation} \begin{split} \label{thm_stab_eq12} \vertii{ [[ U_h ]]_{n-1} }^2_{L^2(\Omega)} & = ( [[ U_h ]]_{n-1}, [[ U_h ]]_{n-1} - \mathcal{P}^n_h[[ U_h ]]_{n-1} )_{L^2(\Omega)} + ( [[ U_h ]]_{n-1}, \mathcal{P}^n_h[[ U_h ]]_{n-1} )_{L^2(\Omega)} \\ & = ( [[ U_h ]]_{n-1}, [[ U_h ]]_{n-1} - \mathcal{P}^n_h[[ U_h ]]_{n-1} )_{L^2(\Omega)} - \tau ( \mathcal{L}^n_hU^n_h, [[U_h]]_{n-1} )_{L^2(\Omega)}. \end{split} \end{equation} The first term on the right of \eqref{thm_stab_eq12} can be bounded through the Young's inequality and Theorem \ref{thm_Rn_err}: \begin{equation} \begin{split} \label{thm_stab_eq13} &( [[ U_h ]]_{n-1}, [[ U_h ]]_{n-1} - \mathcal{P}^n_h[[ U_h ]]_{n-1} )_{L^2(\Omega)} = ( [[ U_h ]]_{n-1}, \mathcal{P}^n_h U^{n-1}_h - U^{n-1}_{h} )_{L^2(\Omega)} \\ \le & \frac{1}{4} \| [[ U_h ]]_{n-1} \|^2_{L^2(\Omega)} + \| (\mathcal{I} - \mathcal{P}^n_h)U^{n-1}_h \|^2_{L^2(\Omega)} \\ \le & \frac{1}{4} \| [[ U_h ]]_{n-1} \|^2_{L^2(\Omega)} + Ch^4 \| \mathcal{L}^{n-1}_h U^{n-1}_h \|^2_{L^2(\Omega)} + C\tau h^2 \vertiii{ U^{n-1}_h }^2_h \end{split} \end{equation} where we have also used the smallest distance property $\| (\mathcal{I} - \mathcal{P}^n_h)U^{n-1}_h \|^2_{L^2( \Omega)} \le \| (\mathcal{I} - \mathcal{R}^n_h)U^{n-1}_h \|^2_{L^2(\Omega)}$. The second term on the right of \eqref{thm_stab_eq12} is also bounded through the Young's inequality: \begin{equation} \label{thm_stab_eq14} \tau ( \mathcal{L}^n_hU^n_h, [[U_h]]_{n-1} )_{L^2(\Omega)} \le \frac{1}{4} \| [[U_h]]_{n-1} \|^2_{L^2(\Omega)} + \tau^2 \| \mathcal{L}^n_hU^n_h \|^2_{L^2(\Omega)}. \end{equation} Substituting \eqref{thm_stab_eq13} and \eqref{thm_stab_eq14} into \eqref{thm_stab_eq12} together with the assumption $h^2\le \gamma \tau$, we obtain \begin{equation} \label{thm_stab_eq15} \tau^{-1} \vertii{ [[ U_h ]]_{n-1} }^2_{L^2(\Omega)} \le C \tau \left( \| \mathcal{L}^{n-1}_h U^{n-1}_h \|^2_{L^2(\Omega)} + \| \mathcal{L}^{n}_h U^{n}_h \|^2_{L^2(\Omega)} + \vertiii{ U^{n-1}_h }^2_h \right). \end{equation} Now we multiply \eqref{thm_stab_eq15} by $t_{n-1}$ and note $t_{n-1}\le t_n$. Then summing the resulted inequalities from $n=2$ to $M$ and using the bounds for second terms in \eqref{thm_stab_eq03} and \eqref{thm_stab_eq02}, we arrive at the estimate for the third term in \eqref{thm_stab_eq02}. \end{proof} \begin{remark} \label{rem_stability_1} We note that one of the keys in the proof of \eqref{thm_stab_eq02} is the employment of the norm $\vertiii{ \cdot }_a$ induced from $a_h(\cdot,\cdot)$ in the identity \eqref{thm_stab_eq5}. Roughly speaking if it is replaced by the energy norm $\vertiii{\cdot}_h$, then the coercivity and boundedness in Theorem \ref{thm_coer} have to be used to bound $a_h(U^n_h,U^n_h)$ and $a_h(U^{n-1}_h, U^{n-1}_h)$ which can not give the same coefficients for $\vertiii{ U^n_h }_h$ and $\vertiii{ U^{n-1}_h }_h$ as \eqref{thm_stab_eq5}, and thus one can not do cancellation when summing these identities as \eqref{thm_stab_eq9}. Moreover, the estimate \eqref{thm_stab_eq6} relies on the orthogonality property of $a_h(\cdot,\cdot)$, and one order will be lost if $\vertiii{\cdot}_a$ is replaced by $\vertiii{\cdot}_h$ in that estimate. So we think the uniform degrees of freedom and weak form in dynamics does not only benefit computation but also analysis. \end{remark} \begin{remark} \label{rem_stability_2} Similar stability results are also derived in \cite{2013Zunino} to analyze XFEM for moving interface problems. However their approach relies on certain assumptions on the interpolation errors between the extended finite element spaces and the standard continuous finite element spaces, see (11)-(15) in \cite{2013Zunino}. As mentioned in the article, the rigorous proof of those assumptions is still an open problem which we think are also difficult to prove even for the IFE spaces. \end{remark} Next we proceed to estimate the fully discrete errors. Given the IFE solution $U_h$ to the scheme \eqref{time_IFE_5} or \eqref{time_IFE_4}, we define the total error \begin{equation} \label{error} E_h := u - U_h \in \mathbb{W}_h \end{equation} We follow the argument of \cite{2013Zunino} to show the following estimate on the consistency error. \begin{theorem} \label{thm_consist} Suppose the exact solution has the regularity $u\in L^2(0,T;\widetilde{H}^2(\Omega))\cap H^1(0,T;H^1(\Omega^-\cup \Omega^+))\cap H^2(0,T;L^2(\Omega))$, let $U_h$ be the IFE solution to \eqref{time_IFE_5} or \eqref{time_IFE_4}, and let $\mathcal{V}\in L^{\infty}(\Gamma(t))$ with $\|\mathcal{V}(t)\cdot{\bf n}\|_{L^{\infty}(\Gamma(t))}\le K$, $\forall t$, then for any $V_h\in \mathbb{S}_h$ and $\epsilon>0$ there holds \begin{equation} \begin{split} \label{thm_consist_eq0} A_h( E_h, V_h ) & \le C \tau^2\epsilon^{-1} \left( \| \partial_{tt}u \|^2_{L^2(0,T;L^2(\Omega))} + K \| \partial_t u \|^2_{L^2(0,T;H^1(\Omega))} \right) \\ &+ \frac{ \epsilon }{2} \left( \max_{n=1,...,N} \| V^n_h \|^2_{L^2(\Omega)} + K\tau \sum_{n=1}^N \| V^n_h \|^2_{H^1(\Omega)} \right) + C\epsilon^{-1}h^4 \| u_0 \|^2_{H^2(\Omega)}. \end{split} \end{equation} \end{theorem} \begin{proof} By the assumption $f=0$, noticing the regularity of $u$, i.e., $[u]_e=0$, $\forall e\in\mathring{\mathcal{E}}_h$, $[[u]]_{n-1}=0$ for $n=2,...,N$, and using \eqref{time_IFE_5}-\eqref{time_IFE_7} we have \begin{equation} \begin{split} \label{thm_consist_eq1} & A_h(E_h,V_h) = A_h( u, V_h ) - A_h( U_h, V_h ) = A_h( u, V_h ) - ( U^0_h, V^1_h )_{L^2(\Omega)} \\ = & \sum_{n=1}^N \left( \int_{J_n} ( \partial_t u, V_h )_{L^2(\Omega)} + (\sqrt{\beta} \nabla u(t^-_{n}) ,\sqrt{\beta} \nabla V^n_h )_{L^2(\Omega)} - \sum_{e\in\mathring{\mathcal{E}}_h} ( {\beta \nabla u(t^-_{n}) \cdot {\bf n}}, [V^n_h]_e )_{L^2(e)} \right) dt \\ & + (u(t_0)-U^0_h, V^1_h)_{L^2(\Omega)} \end{split} \end{equation} where we denote the first summation above by $I$ and the second term $(u(t_0)-U^0_h,V^1_h)_{L^2(\Omega)}$ by $II$. Using the equation for $u$ and applying integration by parts we have \begin{equation} \label{thm_consist_eq2} I = \sum_{n=1}^N \int_{J_n} ( \beta \triangle u(t), V_h )_{L^2(\Omega)} dt - \tau ( \beta \triangle u(t^-_{n}), V_h )_{L^2(\Omega)}. \end{equation} We introduce a function $\mathcal{G}(t)=(\beta \triangle u(t), V_h)_{L^2(\Omega)}$. By the mean value theorem there exists $z_{n}\in J_n=[t_{n-1},t_n]$ such that $I$ in \eqref{thm_consist_eq2} can be expressed into \begin{equation} \begin{split} \label{thm_consist_eq3} I = \sum_{n=1}^N \int_{J_n} \mathcal{G}(t) dt - \tau \mathcal{G}(t^-_{n}) = \sum_{n=1}^N \tau \mathcal{G}(z_{n}) - \tau \mathcal{G}(t^-_{n}) = \tau \sum_{n=1}^N \int_{t_{n}}^{z_n} \frac{d}{dt} \mathcal{G}(t) dt \le \tau \sum_{n=1}^N \int_{J_n} \verti{ \frac{d}{dt} \mathcal{G}(t) } dt. \end{split} \end{equation} Now we need to estimate $\verti{ \frac{d}{dt} \mathcal{G}(t) }$. For this purpose, we split the integral on $\Omega$ into the integrals on $\Omega^{\pm}(t)$ which are evolving with respect to time. The temporal derivative of the domain integral is based on the formula \eqref{functional_2}: \begin{equation} \begin{split} \label{thm_consist_eq4} \verti{ \frac{d}{dt} \mathcal{G}(t) } & = \verti{ \sum_{s=\pm} \frac{d}{dt} \int_{\Omega^s(t)} \beta \triangle u V_h dX } \\ & = \verti{ \sum_{s=\pm} \int_{\Omega^s(t)} \partial_t \left( \beta \triangle u V_h \right) dX + \int_{\Gamma(t)} \beta \triangle u V_h \mathcal{V}\cdot{\bf n} ds } \\ & = \verti{ \sum_{s=\pm} \int_{\Omega^s(t)} \partial_{tt} u V_h dX + \int_{\Gamma(t)} \partial_t u V_h \mathcal{V}\cdot{\bf n} ds } \\ & \le \int_{\Omega} \verti{ \partial_{tt} u V_h } dX + K \int_{\Gamma(t)} \verti{ \partial_t u V_h } ds. \end{split} \end{equation} Putting \eqref{thm_consist_eq4} into \eqref{thm_consist_eq3} and applying the Young's inequality, we first have \begin{equation} \begin{split} \label{thm_consist_eq5} \tau \sum_{n=1}^N \int_{J_n}\int_{\Omega} \verti{ \partial_{tt} u V_h } dX dt & \le \tau \sum_{n=1}^N \int_{J_n} \vertii{ \partial_{tt} u }_{L^2(\Omega)} \|V_h\|_{L^2(\Omega)} dt \\ & = \tau \sum_{n=1}^N \|V^n_h\|_{L^2(\Omega)} \int_{J_n} \vertii{ \partial_{tt} u }_{L^2(\Omega)} dt \\ & \le T^{1/2}\max_{n=1,...,N} \|V^n_h\|_{L^2(\Omega)} \tau \vertii{ \partial_{tt} u }_{L^2(0,T;L^2(\Omega))} \\ & \le T \tau^2 \epsilon^{-1} \vertii{ \partial_{tt} u }^2_{L^2(0,T;L^2(\Omega))} + \frac{\epsilon}{4} \max_{n=1,...,N} \|V^n_h\|^2_{L^2(\Omega)} \end{split} \end{equation} where $T$ is the total time. Using the trace inequality from $\Gamma(t)$ to $\Omega$ given by Theorem \ref{thm_glob_trace} and the standard trace inequality, we can use a similar argument above to get the bound \begin{equation} \begin{split} \label{thm_consist_eq6} K\tau \sum_{n=1}^N \int_{J_n} \int_{\Gamma(t)} \verti{ \partial_t u V_h } ds dt & \le K \sum_{n=1}^N \int_{J_n} \tau \vertii{ \partial_t u }_{L^2(\Gamma(t))} \vertii{ V_h }_{L^2(\Gamma(t))} dt \\ & \le K \sum_{n=1}^N \int_{J_n} C \tau \vertii{ \partial_t u }_{H^1(\Omega)} \vertii{ V_h }_{H^1(\Omega)} dt \\ & \le K \sum_{n=1}^N \int_{J_n} C \tau^2 \epsilon^{-1} \vertii{ \partial_t u }^2_{H^1(\Omega)} + \frac{ \epsilon }{4} \vertii{ V_h }^2_{H^1(\Omega)} dt \\ & = CK \tau^2 \epsilon^{-1} \| \partial_t u \|^2_{L^2(0,T;H^1(\Omega))} + \frac{ K \epsilon \tau}{4} \sum_{n=1}^N \| V^n_h \|^2_{H^1(\Omega)}. \end{split} \end{equation} \eqref{thm_consist_eq5} and \eqref{thm_consist_eq6} give the bound of $I$. In addition, the term $II$ can be directly bounded by the Young's inequality and Theorem \ref{thm_ellip_proj_err}: \begin{equation} \begin{split} \label{thm_consist_eq7} II & = (u_0 - U^0_h, V^1_h)_{L^2(\Omega)} = ( u_0 - \mathcal{R}^0_h u_0, V^1_h)_{L^2(\Omega)} \le \| u_0 - \mathcal{R}^0_h u_0 \|_{L^2(\Omega)} \| V^1_h \|_{L^2(\Omega)} \\ & \le \epsilon^{-1} \| u_0 - \mathcal{R}^0_h u_0 \|^2_{L^2(\Omega)} + \frac{\epsilon}{4} \| V^1_h \|^2_{L^2(\Omega)} \le C \epsilon^{-1} h^4 \| u_0 \|^2_{H^2(\Omega)} + \frac{\epsilon}{4} \| V^1_h \|^2_{L^2(\Omega)}. \end{split} \end{equation} Combing it with the estimate of the term $II$ we have the desired result. \end{proof} An alternative expression of the bilinear form $A_h(\cdot,\cdot)$ to \eqref{time_IFE_6} is also needed. \begin{lemma} \label{lemma_Ah} For every $U_h \in \mathbb{W}_h$ and $V_h\in \mathbb{S}_h$, there holds \begin{equation} \begin{split} \label{lemma_Ah_eq0} A_h(U_h,V_h) &= \tau \sum_{n=1}^N a_h(U_h(t^-_n),V_h(t^-_n)) - \sum_{n=1}^{N-1} ( U_h(t^-_n), [[V_h]]_n )_{L^2(\Omega)} + (U_h(t^-_N), V_h(t^-_N))_{L^2(\Omega)}. \end{split} \end{equation} \end{lemma} \begin{proof} Using the integration by parts for the temporal direction, we have \begin{equation} \begin{split} \label{lemma_Ah_eq1} & \int_{J_n} \int_{\Omega} \partial_t U_h V_h dX dt = \int_{\Omega} \int^{t_n}_{t_{n-1}} \partial_t U_h V_h dt dX \\ = & (U_h(t^-_n),V_h(t^-_n))_{L^2(\Omega)} - (U_h(t^+_{n-1}),V_h(t^+_{n-1}))_{L^2(\Omega)} - \int_{J_n} ( U_h, \partial_t V_h )_{L^2(\Omega)} dt \end{split} \end{equation} where the last term vanishes since $V_h\in \mathbb{S}_h$. Then we note the following identity \begin{equation} \begin{split} \label{lemma_Ah_eq2} & \sum_{n=1}^N (U_h(t^-_n),V_h(t^-_n))_{L^2(\Omega)} - (U_h(t^+_{n-1}),V_h(t^+_{n-1}))_{L^2(\Omega)} \\ = & (U_h(t^-_N),V_h(t^-_N))_{L^2(\Omega)} - (U_h(t^+_0),V_h(t^+_0))_{L^2(\Omega)} \\ - & \sum_{n=1}^{N-1} \left( (U_h(t^+_n), V_h(t^+_n))_{L^2(\Omega)} - (U_h(t^-_n), V_h(t^-_n))_{L^2(\Omega)} \right) \\ = & (U_h(t^-_N),V_h(t^-_N))_{L^2(\Omega)} - (U_h(t^+_0),V_h(t^+_0))_{L^2(\Omega)} \\ - & \sum_{n=1}^{N-1} \left( ( [[U_h]]_n, V_h(t^+_n))_{L^2(\Omega)} + (U_h(t^-_n), [[V_h]]_n)_{L^2(\Omega)} \right). \end{split} \end{equation} In \eqref{lemma_Ah_eq2} we further note $V_h(t^+_{n})=V_h(t^-_{n+1})$ since $V_h\in \mathbb{S}_h$. Putting \eqref{lemma_Ah_eq2} into \eqref{time_IFE_6} yields the desired result. \end{proof} Now based on the estimates prepared above, we can use the duality argument to analyze the solution errors. This idea was introduced in \cite{1991ErikssonJohnsonI,1995ErikssonClaes} for time-dependent adaptive mesh methods. \begin{theorem} \label{thm_label_err_L2} Under the conditions of Theorem \ref{thm_stab}, suppose the exact solution satisfies the regularity $u\in L^2(0,T;\widetilde{H}^2(\Omega)) \cap H^1(0,T;H^1(\Omega^-\cup \Omega^+))\cap H^2(0,T;L^2(\Omega)) \cap L^{\infty}(0,T; \widetilde{H}^2(\Omega))$, let $U_h$ be the IFE solution to \eqref{time_IFE_5} or \eqref{time_IFE_4} and let $\mathcal{V}\in L^{\infty}(\Gamma(t))$ with $\|\mathcal{V}(t)\cdot{\bf n}\|_{L^{\infty}(\Gamma(t))}\le K$, $\forall t$, then there holds \begin{equation} \begin{split} \label{thm_label_err_L2_eq0} \| u(t_N) - U^N_h \|_{L^2(\Omega)} + h| u(t_N) - U^N_h |_{H^1(\Omega)} & \le (1+ \sqrt{K}) C \tau \left( \| \partial_{tt}u \|_{L^2(0,T;L^2(\Omega))} + \| \partial_t u \|_{L^2(0,T;H^1(\Omega))} \right) \\ & + \sqrt{\log(1+N)} C h^2\| u \|_{L^{\infty}(0,T;H^2(\Omega))} . \end{split} \end{equation} \end{theorem} \begin{proof} First of all, we show the estimate in the $L^2$-norm. By the discrete duality argument to \eqref{time_IFE_4}, since $a_h(\cdot,\cdot)$ is coercive on every $S^n_h$, given each $Z^{N+1}_h\in W_h$ we can define a sequence $Z^n_h\in S^n_h$, $n=N,...,1$ such that \begin{equation} \label{thm_label_err_L2_eq1} \tau a_h(V^n_h,Z^n_h) + ( V^n_h, Z^{n}_h )_{L^2(\Omega)} = ( V^n_h, Z^{n+1}_h )_{L^2(\Omega)}, ~~~ \forall V^n_h\in S^n_h. \end{equation} Using the expression in Lemma \ref{lemma_Ah} we can write the equivalent format to \eqref{thm_label_err_L2_eq1} in terms of the bilinear form $A_h(\cdot,\cdot)$ by summing \eqref{thm_label_err_L2_eq1} from $n=N,...,1$, namely we need to find $Z_h \in \mathbb{S}_h$ such that \begin{equation} \label{thm_label_err_L2_eq2} A_h(V_h, Z_h ) = (V^N_h, Z^{N+1}_h)_{L^2(\Omega)}, ~~~~ \forall V_h \in \mathbb{S}_h. \end{equation} Let's employ the error decomposition similar to \eqref{err_decomp}, and define the corresponding functions $\xi_h\in \mathbb{S}_h$ with $\xi_h|_{J_n}=\xi^n_h$ and $\eta_h\in \mathbb{W}_h$ with $\eta_h|_{J_n}=u(t) - \mathcal{R}^n_hu$, $n=1,...,N$ which leads to $E_h=\xi_h + \eta_h$ with $E_h$ defined in \eqref{error}. In particular we also note that $\eta_h(t^-_{n})=u(t_{n}) - \mathcal{R}^n_hu = \eta^n_h$. Letting $Z^{N+1}_h = \xi^N_h$ and $V_h = \xi_h$ in \eqref{thm_label_err_L2_eq2} and using Lemma \ref{lemma_Ah} we have the function $Z_h$ satisfying \begin{equation} \begin{split} \label{thm_label_err_L2_eq4} &\| \xi^N_h \|^2_{L^2(\Omega)} = A_h( \xi_h, Z_h ) = A_h(E_h,Z_h) - A_h(\eta_h,Z_h) \\ = & A_h(E_h,Z_h) - \tau \sum_{n=1}^N a_h( \eta^n_h,Z^n_h) + \sum_{n=1}^{N-1} ( \eta^n_h , [[Z_h]]_{n} )_{L^2(\Omega)} - (\eta^N_h, Z^N_h)_{L^2(\Omega)}, \end{split} \end{equation} where the terms in the right hand side are denoted by $Q_i$, $i=1,2,3,4$, respectively. Now we proceed to estimate each term $Q_i$ individually. First of all, we have $\| V^n_h \|_{H^1(\Omega)}\le C \vertiii{V^n_h}_h$ by the discrete Poincar\'e inequality in Theorem \ref{thm_dis_poinc}. Then applying the counterpart of \eqref{thm_stab_eq01} in Theorem \ref{thm_stab} for the sequence $Z^n_h$ together with Theorem \ref{thm_consist}, we obtain \begin{equation} \begin{split} \label{thm_label_err_L2_eq5} Q_1 = A_h( E_h, V_h ) \le & C \tau^2\epsilon^{-1} \left( \| \partial_{tt}u \|^2_{L^2(0,T;L^2(\Omega))} + K \| \partial_t u \|^2_{L^2(0,T;H^1(\Omega))} \right) \\ &+ \frac{\epsilon C(K+1)}{2} \| Z^{N+1}_h \|^2_{L^2(\Omega)} + C\epsilon^{-1}h^4 \| u_0 \|^2_{H^2(\Omega)}, \end{split} \end{equation} where $Z^{N+1}_h = \xi^N_h$. Next we note that $Z^n_h\in S^n_h$ and thus \begin{equation} \label{thm_label_err_L2_eq6} Q_2 = \tau\sum_{n=1}^N a_h(\eta^n_h,Z^n_h) = \tau\sum_{n=1}^N a_h( u(t_n) - \mathcal{R}^n_hu, Z^n_h ) = 0. \end{equation} By Schwarz inequality, using the last term in \eqref{thm_stab_eq02} of Theorem \ref{thm_stab} (also the counterpart for the sequence $Z^n_h$) and applying the estimate for $\eta^n_h$, we can bound $Q_3$ by \begin{equation} \begin{split} \label{thm_label_err_L2_eq7} Q_3 & \le \sum_{n=1}^{N-1} \| \eta^n_h \|_{L^2(\Omega)} \| [[Z_h]]_n \|_{L^2(\Omega)} \\ & \le \left( \sum_{n=1}^{N-1} \tau t^{-1}_n \| \eta^n_h \|^2_{L^2(\Omega)} \right)^{1/2} \left( \sum_{n=1}^{N-1} \tau^{-1} t_n \| [[Z_h]]_n \|^2_{L^2(\Omega)}\right)^{1/2} \\ & \le \max_{n=1,...,N} \| \eta^n_h \|_{L^2(\Omega)}\left( \sum_{n=1}^{N-1} \tau \frac{1}{n \tau} \right)^{1/2} C \| Z^{N+1}_h \|_{L^2(\Omega)} \\ & \le C \sqrt{\log(1+N)} h^2 \| u \|_{L^{\infty}(0,T; H^2(\Omega))} \| \xi^N_h \|_{L^2(\Omega)} \\ & \le C \log(1+N) h^4 \epsilon^{-1} \| u \|^2_{L^{\infty}(0,T; H^2(\Omega))} + \frac{\epsilon}{4} \| \xi^N_h \|^2_{L^2(\Omega)}. \end{split} \end{equation} The last term $Q_4$ can be bounded by the estimate for $\eta^N_h$ and the first term in \eqref{thm_stab_eq01} of Theorem \ref{thm_stab}: \begin{equation} \begin{split} \label{thm_label_err_L2_eq8} Q_4 & \le \| \eta^N_h \|_{L^2(\Omega)} \| Z^N_h \|_{L^2(\Omega)} \le Ch^2 \| u \|_{L^{\infty}(0,T;H^2(\Omega))} \| \xi^{N}_h \|_{L^2(\Omega)} \le Ch^4 \epsilon^{-1} \| u \|^2_{L^{\infty}(0,T;H^2(\Omega))} + \frac{\epsilon}{4} \| \xi^{N}_h \|^2_{L^2(\Omega)} . \end{split} \end{equation} Substituting \eqref{thm_label_err_L2_eq5}-\eqref{thm_label_err_L2_eq8} into \eqref{thm_label_err_L2_eq4}, we finally obtain \begin{equation} \begin{split} \label{thm_label_err_L2_eq9} \left( 1 - C(K+1)\epsilon \right) \| \xi^N_h \|^2_{L^2(\Omega)} & \le C \tau^2\epsilon^{-1} \left( \| \partial_{tt}u \|^2_{L^2(0,T;L^2(\Omega))} + K \| \partial_t u \|^2_{L^2(0,T;H^1(\Omega))} \right) \\ &+ C \log(1+N) h^4 \epsilon^{-1} \| u \|^2_{L^{\infty}(0,T; H^2(\Omega))}. \end{split} \end{equation} Choosing $\epsilon$ sufficiently small such that $1-C(K+1)\epsilon>0$, we have the error bound for $\xi^N_h$. Combining it with the estimate for $\eta^N_h$, we have the desired estimate for the $L^2$-norm. Finally the $H^1$-norm estimate simply follows from the inverse estimate $|\xi^N_h|_{H^1(T)}\le Ch_T^{-1} \|\xi^N_h\|_{L^2(T)}$ by \eqref{inver_inequa} for each element $T$ since $\xi^N_h\in S^N_h$. \end{proof} \begin{remark} \label{rem_regularity} We comment on the results between Theorem \ref{thm_label_err_L2} and the standard finite element method solving the stationary parabolic interface problem in \cite{1998ChenZou}. We first note that the regularity assumptions in Theorem \ref{thm_label_err_L2} on the temporal direction are stronger than Theorems 3.2 and 3.3 in \cite{1998ChenZou}. The regularity of parabolic interface problems with a stationary interface is discussed in \cite{2002HuangZou} which is also indeed weaker than those of Theorem \ref{thm_label_err_L2}. But we do not know of any literature where the regularity of parabolic equations with moving interface is studied. In addition it is interesting to note that there is also a ``log" term in Theorems 3.2 and 3.3 in \cite{1998ChenZou} but on the spatial mesh size $h$, i.e., $\log(h)$, while the ``log" term in Theorem \ref{thm_label_err_L2} appears to be on the temporal step size $\tau$, i.e., $|\log{(1+N)}|\approx |\log{(\tau)}|$. In general $\tau$ is taken in some order of $h$ to guarantee convergence, then these two results are actually comparable. However we should also read from these results that for the IFE method based on unfitted meshes the errors in the spatial direction and temporal direction are not completely decoupled due to the bound $\sqrt{\log(1+N)}h^2$, but as usual $\sqrt{\log(1+N)}$ has very limited affect on the total error. \end{remark} \begin{remark} \label{rem_const} Note that the generic constant in Theorem \ref{thm_label_err_L2} is time-dependent, i.e., $C=C(T)$. The source of the time dependence comes from Theorems \ref{thm_ellip_proj_err}, \ref{thm_Rn_err} and \ref{thm_dis_poinc} based on the duality argument that involves the constants of the elliptic regularity \cite{2010ChuGrahamHou,2002HuangZou,1987Leguillon}. The result in \cite{2010ChuGrahamHou} states that the regularity constant depends on the distance from the interface to the domain boundary. Moreover, the analysis in \cite{1987Leguillon} shows singularity may occur if the interface touches the boundary. However, to our best knowledge, there is no work in the literation giving detailed analysis on how these regularity constants depend on the interface geometry. Since the geometry can be really arbitrary during the motion of interface which is different from the stationary interface problems, a rigorous geometric analysis on the regularity constants can be important and interesting. \end{remark} \section{Numerical Experiments} In this section, we present a group of numerical experiments to validate the theoretical analysis above. Note that some exploratory numerical experiments were given in \cite{2013HeLinLinZhang,2013LinLinZhang1}, but the IFE method they used does not include the penalties on interface edges which is then shown to only produce suboptimal convergent solutions for elliptic interface problems \cite{2015LinLinZhang} and thus can not be expected to be a good choice for moving interface problems. Here we consider a domain $\Omega=(-1,1)\times(-1,1)$ with three types of moving interface: \begin{subequations} \label{interf} \begin{align} \text{(a translating line)}& ~ \Gamma_1(t): \varphi_1 =0 ~~ \text{with}~ \varphi_1 = x- (\pi/5+t ), \\ \text{(a moving circle)} &~ \Gamma_2(t): \varphi_2 =0 ~~ \text{with}~ \varphi_2= (x-0.3\cos(\pi t))^2 + (y-0.3\sin(\pi t))^2 - (\pi/6)^2, \\ \text{(a rotating ellipse)} &~ \Gamma_3(t): \varphi_3 =0 ~~ \text{with}~\varphi_3 = 16(\cos(\pi t)x + \sin(\pi t)y)^2 + 49(-\sin(\pi t)x + \cos(\pi t)y)^2 - \pi^2, \end{align} \end{subequations} which are illustrated in Figure \ref{fig:move_interf} where the red solid line is the interface curve and the blue dashed line denotes the trajectory of the center/focus. Here we mention that rotation motion widely appears in fluid-structure-interaction (FSI), for instance the vibration of turbine blades impacted by the fluid flow \cite{2020LanRamirezSun}. The considered situations can be all considered as large rotational/translational motions which in general can cause elements to become ill-shaped and thus reduce the accuracy of numerical solutions for some conventional moving mesh methods. For each of these interfaces and their motions, we define the subdomains $\Omega^+_i(t)=\{X\in \Omega:\varphi_i(X,t)>0\}$ and $\Omega^-_i(t)=\{X\in\Omega:\varphi_i(X,t)<0\}$, $i=1,2,3$, fix $\beta^-=1$ and $\beta^+=10$, and define the corresponding analytical solutions as \begin{subequations} \label{ana_solu} \begin{align} & u_1(X,t) = \sin(x- (\pi/5+t ))/\beta^{\pm}~~ \text{in} ~ \Omega^{\pm}_1(t), \\ & u_2(X,t) = \begin{cases} & \frac{((x-0.3\cos(\pi t))^2 + (y-0.3\sin(\pi t))^2)^{5/2}(\pi/6)^{-1}}{\beta^{-}} ~~ \text{in} ~ \Omega^{-}_2(t), \\ & \frac{((x-0.3\cos(\pi t))^2 + (y-0.3\sin(\pi t))^2)^{5/2}(\pi/6)^{-1}}{\beta^{+}} +(\pi/6)^{4}(\frac{1}{\beta^-} - \frac{1}{\beta^+} ) ~~ \text{in} ~ \Omega^{+}_2(t), \end{cases} \\ & u_3(X,t) = \begin{cases} & \frac{(\pi/4)^2(\pi/7)^2}{\beta^{-}}\left( \frac{(\cos(\pi t)x + \sin(\pi t)y)^2}{(\pi/4)^2} + \frac{(-\sin(\pi t)x + \cos(\pi t)y)^2}{(\pi/7)^2} \right)^{5/2}~~ \text{in} ~ \Omega^{-}_3(t), \\ &(\frac{\pi}{4})^2(\frac{\pi}{7})^2 \left( \frac{1}{\beta^{+}}\left( \frac{(\cos(\pi t)x + \sin(\pi t)y)^2}{(\pi/4)^2} + \frac{(-\sin(\pi t)x + \cos(\pi t)y)^2}{(\pi/7)^2} \right)^{5/2} + (\frac{1}{\beta^{-}} - \frac{1}{\beta^{+}}) \right) ~~ \text{in} ~ \Omega^{+}_3(t). \end{cases} \end{align} \end{subequations} \begin{figure}[h] \centering \begin{subfigure}{.3\textwidth} \includegraphics[width=2.1in]{line} \label{trans_line} \end{subfigure} ~ \begin{subfigure}{.3\textwidth} \includegraphics[width=2.1in]{cir} \label{move_cir} \end{subfigure} ~ \begin{subfigure}{.3\textwidth} \includegraphics[width=2.1in]{ell} \label{rot_elli} \end{subfigure} \caption{The interface and movement: a translating line (left), a moving circle (middle) and a rotating ellipse (right).} \label{fig:move_interf} \end{figure} \begin{figure}[h] \centering \begin{subfigure}{.32\textwidth} \includegraphics[width=2.2in]{fig_line_error-eps-converted-to} \label{trans_line_error} \end{subfigure} ~ \begin{subfigure}{.32\textwidth} \includegraphics[width=2.2in]{fig_cir_error-eps-converted-to} \label{move_cir_error} \end{subfigure} ~ \begin{subfigure}{.32\textwidth} \includegraphics[width=2.2in]{fig_rot_error-eps-converted-to} \label{rot_elli_error} \end{subfigure} \caption{Solution errors: a translating line (left), a moving circle (middle) and a rotating ellipse (right).} \label{fig:move_interf_err} \end{figure} \begin{figure}[h] \centering \begin{subfigure}{.32\textwidth} \includegraphics[width=2.2in]{fig_line_cond-eps-converted-to} \label{trans_line_cond} \end{subfigure} ~ \begin{subfigure}{.32\textwidth} \includegraphics[width=2.2in]{fig_cir_cond-eps-converted-to} \label{move_cir_con} \end{subfigure} ~ \begin{subfigure}{.32\textwidth} \includegraphics[width=2.2in]{fig_rot_cond-eps-converted-to} \label{rot_elli_cond} \end{subfigure} \caption{Condition numbers: a translating line (left), a moving circle (middle) and a rotating ellipse (right)} \label{fig:move_interf_cond} \end{figure} We generate the mesh by partitioning $\Omega$ into $N\times N$ squares and cutting each square into two triangles. The step size is chosen as $T/N^2$ where $T$ is the total time and $N=10,20,...,100$. In Figure \ref{fig:move_interf_err}, we plot the errors of the solutions at $T=1$ gauged by the $L^{\infty}$-, $L^2$- and $H^1$-norm. For each of the error curves, we also plot a reference line which matches the ending point of the error curve and has the expected ratio, i.e., $h^{-2}$, $h^{-2}$ and $h^{-1}$ for $L^{\infty}$-, $L^2$- and $H^1$ errors, respectively. From Figure \ref{fig:move_interf_err}, we can clearly see the optimal convergence, and especially even the error in $L^{\infty}$-norm converges optimally. In particular, for the linear interface, we can see the error curves almost overlap with the reference lines. These results certainly agree with the theoretical analysis. Furthermore, for unfitted mesh methods solving moving interface problems, since the interface can be really arbitrary relative to the mesh, it is critical that condition numbers of the methods are bounded regardless of the relative location. Here to investigate this issue we also plot the condition numbers of the matrices on $N=100$ associated with the weak form $\tau a_h(u_h,v_h) + (u_h,v_h)_{L^2(\Omega)}$, $\forall u_h,v_h\in S^n_h$, $n=1,2,....$, in Figure \ref{fig:move_interf_cond}. We can clearly see all the conditions numbers are uniformly bounded during the dynamics. In particular we note that during the motion of the linear interface, at certain points the linear interface may cut all the elements with small subelements, but we can see from Figure \ref{fig:move_interf_cond} that this small-subelement issue does not cause blow-up of the condition numbers. Note that it has been theoretically addressed in \cite{2020AdjeridBabukaGuoLin} that IFE methods do not suffer from the small-subelement issue. \bibliographystyle{plain}
1,108,101,565,819
arxiv
\section{Introduction} The fractional Hankel transform was introduced in \cite{Namias1980b} (see also \cite{Kerr1991}). It arises as fundamental tool in different areas of sciences, see e.g. \cite{Krenk1982,Karp1994,SheppardLarkin1998,Bracewell1999,Uzun2015}. A quaternionic analogue has been investigated recently in \cite{ElkGhHa20}. It is realized there \`a la Bargmann by means of the hyperholomorphic second Bargmann transform \cite{ElkachGh2018} \begin{equation}\label{BargmannT} [\SHyperBTransR \varphi](q)= \frac{1}{\left( 1-q\right)^{\alpha +1}} \int_{0}^{+\infty} t^\alpha \exp\left(\frac{ t }{1-q}\right) \varphi(t) dt; \, \, \alpha>-1, \end{equation} defined on $L^{2,\alpha}_{\Hq}(\R^+):= L^{2}_{\Hq}(\R^+,x^{\alpha}e^{-x}dx)$, the right quaternionic Hilbert space of all $\Hq$-valued functions on the half real line $\R^+$ that are $x^{\alpha}e^{-x}dx$-square integrable. Such transform defines a unitary isometric transformation from $L^{2,\alpha}_{\Hq}(\R^+)$ onto the slice hyperholomorphic Bergman space $ A^{2,\alpha}_{slice} := \mathcal{SR}(\mathbb{B}) \cap L^{2,\alpha}_{\Hq}(\mathbb{B}_I), $ where $L^{2,\alpha}_{\Hq}(\mathbb{B}_I)$ denotes the Hilbert space of quaternionic-valued functions $f$ on the unit ball $\mathbb{B}$ whose restrictions $f|_{\C_I}$ to given slice $\C_I=\R+I\R$, with $I\in \Sq=\{q\in{\Hq};q^2=-1\}$, are square integrable with respect to the Bergman measure $d\lambda^{\alpha}_I(z) = \left(1-|z|^2 \right)^{\alpha-1} dxdy$; $z=x+Iy$, on the unit disc $\mathbb{B}_I:= \mathbb{B}\cap\C_I$, while, the space $ \mathcal{SR}(\mathbb{B})$ is formed of all slice regular functions on $\mathbb{B}$, i.e., those whose slice derivative \begin{align*} ( \overline{\partial_I} f)(x+Iy) := \dfrac{1}{2}\left(\frac{\partial }{\partial x}+I\frac{\partial }{\partial y}\right)f|_{\C_I}(x+yI) \end{align*} vanishes identically for every $I\in \Sq$. In the present paper, we will consider the one--parameter (left) integral transforms $S^{\alpha}_{y}$; $\alpha >-1$, defined on $L^{2,\alpha}_{\Hq}(\R^+)$ as the dual transform at $y\in (0,+\infty)$ of the quaternionic fractional Hankel transform $\mathcal{L}_\theta^\alpha$ defined in \cite{ElkGhHa20}. More precisely, we deal with \begin{align}\label{SIntT} S^{\alpha}_{y} \varphi (q) := \frac{1}{1-q} \int_{0}^\infty \exp\left(-\frac{x+\theta y}{1-q}\right) I_\alpha\left( \frac{2\sqrt{q xy }}{1-q} \right) \varphi (x) d , \end{align} where $I_\alpha$ stands for the modified Bessel function \cite[p. 222, (4.12.2)]{AndrewsAskeyRoy1999} $$ I_\alpha (\xi) = \sum_{n=0}^{\infty} \frac{1}{n! \Gamma(\alpha+n+1)} \left( \frac{\xi}{2}\right)^{2n+\alpha} $$ The motivation of considering $S^{\alpha}_{y} $ lies on the observation that the limiting case $y=0$ gives rise to the hyperholomorphic second Bargmann transform in \eqref{BargmannT}. Accordingly, the study of these new operators is required in order to generate hyperholomorphic-like Bergman spaces. Our first aim is to identify the null space and the range of $S^{\alpha}_{y}$ for arbitrary $y\geq 0$. We show that the range of $L^{2,\alpha}_{\Hq}(\R^+)$ by the transform $S^{\alpha}_{y}$ is contained in a reproducing kernel weighted slice hyperholomorphic with suitable weight $\omega$ on the unit ball $\mathbb{B}$, extending $A^{2,\alpha}_{slice}$. The description of $S^{\alpha}_{y}(L^{2,\alpha}_{\Hq}(\R^+))$ is given by Proposition \ref{range} and makes appeal to the zeros of the Laguerre polynomials. We also study their boundedness (Proposition \ref{thmBound}) and compactness (Proposition \ref{corcomp}). Moreover, we determine their singular values (Proposition \ref{propsv}) and we discuss their membership in $p$-Schatten classes (Proposition \ref{thmSchayyen}). Explicit illustration are given for a specific weight function $\omega=\omega_{\beta,\eta}$. \section{Weighted hyperholomorphic left Bergman Hilbert spaces} In order to identify the range of the integral transform $S^{\alpha}_{y}$ in \eqref{SIntT} when acting on $L^{2,\alpha}_{\Hq}(\R^+)$, we begin by examining a class of weighted hyperholomorphic left Bergman Hilbert spaces for which we provide a closed expression of their reproducing kernel in terms of a $\star$-regularization of Gauss hypergeometric function. Let $\omega$ be a given positive measurable mapping on $(0,1)$ such that $\omega(t) dt$ be a finite measure. We extend $\omega$ to the whole unit ball $\mathbb{B}\subset \Hq$ by taking $\widetilde{\omega}(q):= \omega (|q|^2)$. We define the $\omega$-hyperholomorphic left Bergman Hilbert space $A^{2,\omega}_{slice} := \mathcal{SR}(\mathbb{B}) \cap L^{2,\omega}_{\Hq}(\mathbb{B}_I) $ as the space of all slice left slice regular functions $\varphi $ in $\mathbb{B}$ belonging to $L^{2,\omega}_{\Hq}(\mathbb{B}_I):=L^2\left( \mathbb{B}_I, \omega (|q|^2) dxdy\right)$ and endowed with the norm induced from the slice inner product on $\mathbb{B}_I=\mathbb{B}\cap \C_I$, $$\scal{f,g}_{\omega} =\int_{\mathbb{B}_I}\overline{f(x+Iy)} g(x+Iy) \omega (x^2+y^2) dxdy .$$ More explicitly, the Hilbert space $A^{2,\omega}_{slice}$ consists of all convergent power series $\varphi(q)=\sum_{n=0}^\infty q^n c_n$ on $\mathbb{B}$ for which the quaternionic sequence $(c_n)_n$ satisfies the growth condition $$ \sum_{n=0}^\infty \gamma_{n} |c_n|^2 <\infty ; \quad \gamma_n := \int_0^1 t^n \omega(t) dt . $$ The specification of the weight function $$\omega_{\beta,\eta}(t):=t^{\beta-1}(1-t)^{\eta-1}, \,\, \eta,\beta>0,$$ gives rise to the weighted hyperholomorphic Hilbert space \begin{align} \label{SeqCharGrgSpace} A^{2,\beta,\eta}_{slice}:= \left\{ \varphi(q)=\sum_{n=0}^\infty q^n c_n; \, \sum_{n=0}^\infty \gamma_n^{\beta,\eta} |c_n|^2 <\infty \right\}, \end{align} where $$ \gamma_n^{\beta,\eta} : = \frac{\Gamma(\eta)\Gamma(\beta+n)}{\Gamma(\beta+\eta+n)}.$$ It should be mentioned here that the monomials $e_n(q)= q^n$ form an orthogonal basis of $A^{2,\beta,\eta}_{slice}$ with square norm given by $$ \norm{e_n}_{\beta,\eta}^2 = \pi \gamma_n^{\beta,\eta}.$$ Moreover, appealing to the continuity of the evaluation linear form and the quaternionic version of Riesz representation theorem, we claim that $A^{2,\beta,\eta}_{slice}$ is a reproducing kernel Hilbert space, whose kernel function is expressible in terms of the quaternionic Gauss hypergeometric function (of first kind) \begin{align} \label{SliceGauss} {_2F_1}^*\left( \begin{array}{c}a, b \\ c \end{array} \bigg | [p,q] \right) = \sum_{k=0}^\infty \frac{(a)_k (b)_k}{(c)_k} \frac{p^k q^k}{k!} \end{align} for $p,q\in \mathbb{B}$ and reals $a,b$ and $c$, where $(a)_k= a(a+1) \cdots (a+k-1)$ with $(a)_0=1$. The series in \eqref{SliceGauss} converges absolutely and uniformly on $K\times K'$ for any compact subsets $K,K'\subset \mathbb{B}$. The function ${_2F_1}^*$ can be seen as the slice regularization of the classical Gauss hypergeometric function with respect to star product fo slice functions, in order to get a left slice regular function in $p$ and a right slice one in $q$. Namely, we assert \begin{proposition}\label{kernel} The reproducing kernel of $A^{2,\beta,\eta}_{slice}$ is given by \begin{align} \label{RepKerSlice} K_{\beta,\eta}(p,q) = \frac{\Gamma(\beta+\eta)}{\pi \Gamma(\eta)\Gamma(\beta)} {_2F_1}^*\left( \begin{array}{c}1, \eta +\beta \\ \beta \end{array} \bigg | [p,\overline{q}] \right) . \end{align} \end{proposition} \begin{proof} The explicit expression of $K_{\beta,\eta}(p,q)$ follows easily since \begin{align}\label{RepKerExp} K_{\beta,\eta}(p,q) &= \frac{1}{\pi} \sum_{n=0}^\infty \frac{e_n(p) \overline{e_n(q)}}{\gamma^{\beta,\eta}_n} \nonumber \\& = \frac{\Gamma(\beta+\eta)}{\pi \Gamma(\eta)\Gamma(\beta)} \sum_{n=0}^\infty \frac{(\beta+\eta)_n}{(\beta)_n} e_n(p) \overline{e_n(q)}. \end{align} \end{proof} \begin{remark} For $\beta =1$, the space $A^{2,\eta,1}_{slice}$ is the one described in the introduction, $A^{2,\eta,1}_{slice} = A^{2,\eta}_{slice}$. Moreover, the $K_{\eta,1}(p,q)$ reduces further to the reproducing kernel of classical weighted Bergman space $A^{2,\eta}_{slice}$ given by \cite[Theorem 3.1]{ElkachGh2018} \begin{align} K_{\eta,1}(p,q) = \frac{\eta}{\pi} {_1F_0}^*\left( \begin{array}{c} -\eta -1 \\ - \end{array} \bigg | [p,\overline{q}] \right) \left( 1 - 2 \Re(q) \overline{p} + |q|^2\overline{p}^2 \right)^{-\eta-1}. \end{align} The restriction of $K_{\eta,1}$ to $ \mathbb{B}_I$ coincides with the classical Bergman kernel $ K_{\eta,1}(z,w) = (\eta/\pi)( 1 - z\overline{w} )^{-\eta-1}$; $z,w\in \mathbb{B}_I$. \end{remark} \section{Boundedness of the dual transforms $S^{\alpha}_{y}$} We begin by noticing that the transform $ S^{\alpha}_{y}$ satisfies $ S^{\alpha}_{y}\varphi (pq) = S^{\alpha}_{y}\circ \mathcal{L}^\alpha_q (\varphi)(p)$ by means of the semi-group property $\mathcal{L}_p^\alpha\circ\mathcal{L}^\alpha_q =\mathcal{L}^\alpha_{pq} $ for the quaternionic fractional Hankel transform, as well as the eigenvalue equation $ S^{\alpha}_{y}( \varphi_n^{\alpha} ) = \varphi_n^{\alpha} (y) e_n$ since the normalized generalized Laguerre polynomials \begin{equation} \label{basisLaguerre} \varphi_n^{\alpha} (x) =\left( \frac{n!}{\Gamma(\alpha +n+1)}\right)^{1/2} L^{(\alpha)}_{n}(x) \end{equation} are solutions of $\mathcal{L}_q^\alpha(L ^{(\alpha)}_{n}) = q^{n}L^{(\alpha)}_{n}$. Moreover, the kernel function \begin{align}\label{KernelBessel} R_q^\alpha(x,y) &= \frac{1}{(1-q) \sqrt{q xy}^{\alpha}} \exp\left(-\frac{q(x+y)}{1-q}\right) I_\alpha\left( \frac{2\sqrt{qxy}}{1-q} \right), \end{align} for the transform $S^{\alpha}_{y}$ in \eqref{SIntT}, has the expansion series \cite{Namias1980b,ElkGhHa20}, \begin{align} \label{expKerRHH} R_p^\alpha(x,y)= \sum_{n=0}^\infty e_n(p) \varphi_n^{\alpha} (x) \varphi_n^{\alpha} (y) \end{align} which follows from the Hille--Hardy formula for the Laguerre polynomials \cite[(6.2.25) p. 288]{AndrewsAskeyRoy1999}. Such kernel function satisfies the following reproducing property. \begin{proposition}\label{KerKer} Let $K_{\beta,\eta}(p,q) $ be as in \eqref{RepKerSlice}. Then, for every $y\in (0,+\infty)$, we have \begin{align}\label{KernelRern} R_q^\alpha(x,y) = \int_{\mathbb{B}_I} \overline{K_{\beta,\eta}(p,q) } R_p^\alpha(x,y) \omega_{\beta,\eta}(|p|^2) d\lambda_I(p) \end{align} and \begin{align} \label{EqWD} R_{|q|^2}^\alpha(y,y) = \int_{\R^+} |R_q^\alpha(x,y) |^2 x^\alpha e^{-x} dx . \end{align} \end{proposition} \begin{proof} Both \eqref{KernelRern} and \eqref{EqWD} can be proved, at least formally, using the expansion series of the involved kernels given by \eqref{RepKerExp} and \eqref{expKerRHH}. \end{proof} Proposition \ref{KerKer} can be used to reprove the reproducing property satisfied by the functions in the range of $S^{\alpha}_{y}$ by means of the kernel $K_{\beta,\eta}$. Indeed, by rewriting $S^{\alpha}_{y}$ as $ S^{\alpha}_{y}\varphi (q) = \scal{\overline{R_q^\alpha(\cdot, y)},\varphi} _{L^{2,\alpha}_{\Hq}(\R^+)} $, inserting \eqref{KernelRern} and making use of Fubini theorem we obtain \begin{align*} S^{\alpha}_{y} ( \varphi)(q) &= \scal{ \scal{ K_{\bullet}^\alpha(\cdot,y) , K_{\beta,\eta}(\bullet ,q) }_{A^{2,\omega}_{slice}} ,\varphi (\cdot)}_{L^{2,\alpha}_{\Hq}(\R^+)} \\&= \scal{ K_{\beta,\eta}(\bullet ,q), \scal{ \overline{ K_{\bullet}^\alpha(\cdot,y) } ,\varphi(\cdot) }_{L^{2,\alpha}_{\Hq}(\R^+)} }_{A^{2,\omega}_{slice}} \\&= \scal{ K_{\beta,\eta}(\bullet ,q), S^{\alpha}_{y} ( \varphi)(\bullet) }_{A^{2,\omega}_{slice}} \end{align*} for every $\varphi \in L^{2,\alpha}_{\Hq}(\R^+)$. Furthermore, we get easily \begin{align} |S^{\alpha}_{y}\varphi (q)|^2 &\leq \scal{R_{q}^\alpha(\cdot,y), R_{q}^\alpha(\cdot,y)}_{L^{2,\alpha}_{\Hq}(\R^+)} \norm{\varphi}_{L^{2,\alpha}_{\Hq}(\R^+)}^2 \nonumber \leq R_{|q|^2}^\alpha(y,y) \norm{\varphi}_{L^{2,\alpha}_{\Hq}(\R^+)}^2 \label{ineqWD} \end{align} by means of Cauchy-Schwarz inequality and identity \eqref{EqWD}. This proves that the transform $S^{\alpha}_{y}$ is well defined on $L^{2,\alpha}_{\Hq}(\R^+)$. In addition, we have \begin{align*} \int_{\mathbb{B}_I} |S^{\alpha}_{y}\varphi (q)|^2 \omega (|q|^2) d\lambda_I(q) \leq \left( \int_{\mathbb{B}_I} R_{|q|^2}^\alpha(y,y) \omega (|q|^2) d\lambda_I(q)\right) \norm{\varphi}_{L^{2,\alpha}_{\Hq}(\R^+)}^2 . \end{align*} Accordingly, under the assumption that \begin{equation}\label{BoundeCond} \int_{\mathbb{B}_I} R_{|q|^2}^\alpha(y,y) \omega (|q|^2) dudv <+\infty; \quad q=u+Iv, \end{equation} the transform $S^{\alpha}_{y}$ is a bounded operator from $ L^{2,\alpha}_{\Hq}(\R^+)$ into $L^{2,\omega}_{\Hq}(\mathbb{B}_I)$. For $y=0$, the assumption that \eqref{BoundeCond} reduces further to \begin{equation}\label{BoundeCondy0} \int_0^1 R_{t}^\alpha(0,0) \omega (t) dt = \frac{1}{2^\alpha \Gamma(\alpha+1)} \int_0^1 \frac{\omega (t)}{(1-t)^{\alpha+1}} dt \end{equation} be finite. The convergence of the integral in \eqref{BoundeCondy0} readily holds when $\eta > \alpha+1$ for the special case of $\omega(t) =\omega_{\beta,\eta}(t):=t^{\beta-1}(1-t)^{\eta-1}$ with $\alpha>-1$ and $\beta,\eta>0$. The next result extends this condition to includes $y>0$. \begin{proposition}\label{thmBound} Let $\alpha>-1$, $\beta,\eta>0$ and $y\geq 0$. It in addition $\eta >\alpha +1$, then the integral operator $S^{\alpha}_{y}: L^{2,\alpha}_{\Hq}(\R^+) \longrightarrow L^{2,\omega_{\beta,\eta}}_{\Hq}(\mathbb{B}_I)$ is bounded. \end{proposition} \begin{proof} Let denote the quantity in \eqref{BoundeCond} by $\ell_\alpha^{\beta,\eta}$ for $\omega =\omega_{\beta,\eta}$. Then, we have \begin{align*} \ell_\alpha^{\beta,\eta} &= \pi\int_0^1 R_{t}^\alpha(y,y) \omega_{\beta,\eta} (t) dt\\ &=\pi\int_0^1 t^{\beta-1} (1-t)^{\eta-\alpha-2} i_\alpha\left( \frac{2y\sqrt{t}}{1-t}\right) \exp\left( -\frac{2yt}{1-t}\right) dt \end{align*} where $i_\alpha(x) := (x/2)^{-\alpha}I_\alpha(x)$ is nonnegative and bounded on $\R^+$ by some constant $c_\alpha$. Thus, we have \begin{align*} \ell_\alpha^{\beta,\eta} &\leq c_\alpha \int_0^{1} t^{\beta-1} (1-t)^{\eta-\alpha-2} \exp\left( -\frac{2yt}{1-t}\right) dt \\& \leq c_\alpha \int_0^{+\infty} u^{\beta-1} (1+u)^{\alpha-\beta-\eta+1} \exp\left( -2y u\right) du. \end{align*} The last integral follows making the change of variable $u= t/(1-t)$. It is clearly convergent when $\eta>\alpha+1$ and $\beta >0$. \end{proof} The next result refines the boundedness condition of $S^{\alpha}_{y}$ provided in the previous assertion. It shows that $\eta > \alpha+1$ can be relaxed. To this end, we distinguish two cases $y=0$ and $y>0$. \begin{proposition}\label{thmBound} Let $\alpha>-1$, $\beta,\eta>0$ and $y\geq 0$. Then, the integral operator $S^{\alpha}_{y}: L^{2,\alpha}_{\Hq}(\R^+) \longrightarrow L^{2,\omega_{\beta,\eta}}_{\Hq}(\mathbb{B}_I)$ is bounded for any $y>0$. The boundedness of $S^{\alpha}_{y}$ at $y=0$ holds when $\eta\geq \alpha$. \end{proposition} \begin{proof} Set $$c^{\alpha,\beta,\eta}_n(y) := \pi \gamma_n^{\beta,\eta}\left| \varphi_n^{\alpha}(y)\right|^2 = \pi \frac{\Gamma(\eta)\Gamma(\beta+n)}{\Gamma(\beta+\eta+n)} \left| \varphi_n^{\alpha}(y)\right|^2 . $$ Then, for every $\varphi = \sum_{n=0}^\infty a_n \varphi_n^{\alpha} \in L^{2,\alpha}_{\Hq}(\R^+)$, we have $\norm{\varphi}_{L^{2,\alpha}_{\Hq}(\R^+)}^2 = \sum_{n=0}^\infty |a_n|^2 <+\infty$ and by means of \eqref{NormSalpha} we get \begin{align*} \norm{S^{\alpha}_{y} \varphi}_{\omega_{\beta,\eta}}^2 \leq \pi \sup_{n}\left( \gamma_n^{\beta,\eta} \left| \varphi_n^{\alpha}(y)\right|^2\right) \sum_{n=0}^\infty \left|a_n\right| ^2 \leq \sup_{n}\left( c^{\alpha,\beta,\eta}_n(y)\right) \norm{\varphi}_{L^{2,\alpha}_{\Hq}(\R^+)}^2. \end{align*} Subsequently, $ S^{\alpha}_{y} $ is bounded, as operator from $ L^{2,\alpha}_{\Hq}(\R^+)$ into $L^{2,\omega_{\beta,\eta}}_{\Hq}(\mathbb{B}_I)$, if $\sup_{n} \left( c^{\alpha,\beta,\eta}_n(y)\right) $ is finite. For the special case of $y=0$ we have \begin{align*} c^{\alpha,\beta,\eta}_n(0) &= \frac{\pi\Gamma(\eta)}{\Gamma^2(\alpha+1)} \frac{\Gamma(\alpha+n+1)}{\Gamma(n+1)} \frac{ \Gamma(\beta+n)}{\Gamma(\beta+\eta+n)} \sim \frac{\pi\Gamma(\eta)}{\Gamma^2(\alpha+1)} n^{\alpha-\eta} \end{align*} for $n$ large enough, and therefore, its supermum is finite if and only if $\eta\geq\alpha$. Thus, $S^{\alpha}_{0}$ is bounded when $\eta\geq \alpha$. For arbitrary fixed $y>0$, there exists some positive constant $ M^{\alpha,\eta}(y)$, depending only in $\alpha,\eta$ and $y$, such that \begin{align}\label{asymcny} c^{\alpha,\beta,\eta}_n(y) \leq M^{\alpha,\eta}(y) n^{-\eta-1/2} \end{align} holds true for large $n$. This follows making use of the asymptotic behavior for gamma function as well as the one for generalized Laguerre polynomials \cite[p.245]{MagnusOberhettingerSoni1966} \begin{align}\label{asymcnLaguerre} L^{(\alpha)}_n(y) &= \frac{e^{x/2}}{\sqrt{\pi} x^{(2\alpha+1)/4}} n^{(2\alpha-1)/4} \cos\left( 2 \sqrt{n y} - \pi \frac{2\alpha+1}4 \right) + O \left(n^{(2\alpha-3)/4} \right) . \end{align} Therefore, the quantity $\sup_{n} ( c^{\alpha,\beta,\eta}_n(y)) $ is clearly finite if $\eta\geq -1/2$ is assumed which is satisfied since $\eta >0$. \end{proof} \section{The null space and the range of $S^{\alpha}_{y}$} Apparently, the description of the null space and the range of $S^{\alpha}_{y}$ depends on the set $ \mathcal{Z} \{L^{(\alpha)}_{n};\, n\}:= \cup_n \mathcal{Z} (L^{(\alpha)}_{n})$, where $\mathcal{Z} (L^{(\alpha)}_{n})$ denotes the zero set of $L^{(\alpha)}_{n}$. \begin{proposition} The null space of $S^{\alpha}_{y}$ in $L^{2,\alpha}_{\Hq}(\R^+)$ is spanned by $\varphi_n^{\alpha}$ with $n\in N_y^\alpha =\{ n; L^{(\alpha)}_{n}(y)\ne 0\}$, $ \ker(S^{\alpha}_{y})= span\{\varphi_n^{\alpha} ; \, n\in N_y^\alpha \}$. \end{proposition} \begin{proof} It is clear that $ span\{\varphi_n^{\alpha} ; \, n\in N_y^\alpha \} \subset \ker(S^{\alpha}_{y}) $, since $S^{\alpha}_{y} \varphi_n^{\alpha} =0$ for any $n\in N_y^\alpha$. Conversely, let $\varphi \in \ker(S^{\alpha}_{y}) = \{ \varphi \in L^{2,\alpha}_{\Hq}(\R^+) ; S^{\alpha}_{y}(\varphi)=0\}$ that we can expanded as $ \varphi=\sum_{n=0}^\infty a_n \varphi_n^{\alpha} $, since the Laguerre functions in \eqref{basisLaguerre} form an orthonormal basis of $L^{2,\alpha}_{\Hq}(\R^+)$. Then, keeping in mind that $S^{\alpha}_{y} \varphi_n = \varphi_n^{\alpha}(y)e_n $, we get $ S^{\alpha}_{y}(\varphi)= \sum_{n=0}^\infty a_n \varphi_n^{\alpha} (y)\varphi_n^{\alpha} $ and hence $0=\scal{ S^{\alpha}_{y}(\varphi), e_k} = \pi \overline{a_k} \varphi_k^{\alpha} (y) \gamma_k$. Hence, $a_k =0$ for any $k\notin N_y^\alpha$ and therefore $\varphi =\sum_{n\in N_y^\alpha} a_n \varphi_n^{\alpha} $. Moreover, the dimension of $\ker(S^{\alpha}_{y})$ is clearly given by $\dim(\ker(S^{\alpha}_{y}))= card(N_y^\alpha)$. \end{proof} \begin{remark} We notice that $\dim(\ker(S^{\alpha}_{y}))$ depends in $y$ and $\alpha$ and characterizes the number (finite or infinite) of generalized Laguerre polynomials that have $y$ as common zero. Thus, for $y=0$, the set $N_0$ is empty since $L^{(\alpha)}_{n}(0)\ne 0$ for any nonnegative integer $n$, so that $\dim(\ker(S^{\alpha}_{y}))=0$. By regarding the graphs of the generalized Laguerre polynomials we conjecture that $card(N_y^\alpha)$ (and hence $\dim(\ker(S^{\alpha}_{y}))$) is finite. \end{remark} The next result shows that the Hilbert space $A^{2,\omega}_{slice}$ shelters the range ${^cA}^{2,\omega}_{slice}: = S^{\alpha}_{y} (L^{2,\alpha}_{\Hq}(\R^+))$ of $S^{\alpha}_{y}$ acting on $L^{2,\alpha}_{\Hq}(\R^+)$. \begin{proposition}\label{range} Assume that \eqref{BoundeCond} holds, then for every $y\geq 0$ we have $ {^cA}^{2,\omega}_{slice} \subset A^{2,\omega}_{slice}$ and $\{e_n(q)=q^n; n\notin N_y^\alpha\}$ defines a complete orthogonal system in ${^cA}^{2,\omega}_{slice}$. \end{proposition} \begin{proof} Starting from $ S^{\alpha}_{y}(\varphi)= \sum_{n=0}^\infty a_n \varphi_n^{\alpha} (y)\varphi_n^{\alpha} $ for given $\varphi \in L^{2,\alpha}_{\Hq}(\R^+)$ and using the fact that $\scal{e_m,e_n}_{\omega}= \pi \gamma_{n} \delta_{m,n}$, we get easily \begin{align} \label{NormSalpha} \norm{S^{\alpha}_{y} \varphi}_{\omega}^2 = \pi \sum_{n=0}^\infty \gamma_{n} \left| \varphi_n^{\alpha}(y) a_n\right| ^2 <+\infty \end{align} under the assumption that $S^{\alpha}_{y}$ is bounded. Therefore, \begin{align} \label{Inclusion} S^{\alpha}_{y} (L^{2,\alpha}_{\Hq}(\R^+)) \subset \left\{ \sum_{n=0}^\infty q^n c_n; \, c_n \in \Hq ; q\in \mathbb{B}, \, \sum_{n=0}^\infty \gamma_{n} |c_n|^2 <\infty \right\} . \end{align} The right hand-side in \eqref{Inclusion} is exactly the sequential characterization of the weighted hyperholomorphic Bergman space $A^{2,\omega}_{slice}$ discussed in the Section 2. \end{proof} \begin{remark}\label{RemIncomp} If $y$ is a positive zero of some Laguerre polynomial, then $N_y^\alpha$ is not empty and the corresponding monomials $e_n$; $n\in N_y^\alpha$, do not belong to ${^cA}^{2,\omega}_{slice}$. This shows that, in this case, ${^cA}^{2,\omega}_{slice}$ is strictly contained in $A^{2,\omega}_{slice}$. \end{remark} \begin{remark}\label{Remy0} For $y=0$, we have $\varphi_n^{\alpha}(0)\ne 0$ and then $N_0^\alpha$ is an empty set. Thus, we can show that for $\omega=\omega_{\beta,\eta}$ with $\beta=1$ and $\eta=\alpha$, we have $ {^cA}^{2,\omega}_{slice} = A^{2,\beta,\eta}_{slice}=A^{2,\alpha}_{slice}$ and hence $S^{\alpha}_{0}: L^{2,\alpha}_{\Hq}(\R^+) \longrightarrow A^{2,\beta,\eta}_{slice}$ is onto and is exactly the second Bargmann transform $\SHyperBTransR$ in \eqref{BargmannT} defining a unitary isometric transformation from $L^{2,\alpha}_{\Hq}(\R^+)$ onto the slice hyperholomorphic Bergman space $A^{2,\alpha}_{slice}$. \end{remark} \begin{remark}\label{Weightassump} For the general case; the converse inclusion in \eqref{Inclusion} requires further assumption on the weight function. \end{remark} \section{Compactness and membership in $p$-Schatten class} \begin{proposition}\label{corcomp} The operator $ S^{\alpha}_{y} :L^{2,\alpha}_{\Hq}(\R^+) \longrightarrow A^{2,\beta,\eta}_{slice}$ is compact for any $y>0$. \end{proposition} \begin{proof} The compactness of $ S^{\alpha}_{y} $; $y>0$, follows by appealing to the spectral theorem \cite[Theorem 4.3.5]{HsingEubank2015}. In fact, $ S^{\alpha}_{y} $ is bounded and can be expanded as \begin{align*} S^{\alpha}_{y} \varphi &= \sum_{n=0}^\infty s_{n,y}^{\alpha} \frac{e_n }{\sqrt{\pi\gamma_n} } \scal{\varphi , \varphi_n^{\alpha} }_{L^{2,\alpha}_{\Hq}(\R^+)} \end{align*} with $(\varphi_n^{\alpha} )_n$ and $\left( {e_n }/{\sqrt{\pi\gamma_n} }\right)_n$ are orthonormal bases of $L^{2,\alpha}_{\Hq}(\R^+)$ and $A^{2,\omega}_{slice}$, respectively, and $s_{n,y}^{\alpha}:= \sqrt{\pi\gamma_n}\varphi_n^{\alpha}(y)$ tends to $0$ when the wight function $\omega_{\beta,\eta}$ is specified. This readily follows making use of \eqref{asymcnLaguerre} thanks to \eqref{asymcny} since $$ |s_{n,y}^{\alpha}| = \sqrt{c^{\alpha,\beta,\eta}_n(y)} .$$ \end{proof} The membership of $ S^{\alpha}_{y} $ in the $p$-Schatten class is a direct consequence of the next two results. Recall for instance that a bounded operator $S$ is said to be a Schatten operator of class $p$ for $p\geq 1$ if its Schatten $p$-norm $$ \norm{S}_{p}:= \mbox{Tr} (|S|^{p})^{1/p} $$ is finite. \begin{lemma} \label{adjointS} The adjoint of $S^{\alpha}_{y}$ is given by \begin{equation}\label{adjointSexp} (S^{\alpha}_{y})^* G (x) = \int_{\mathbb{B}_I} K_{\overline{q} }^\alpha(x,y) G(q) (1-|q|^2)^{\alpha-1}d\lambda_I(q). \end{equation} \end{lemma} \begin{proof} For every $\varphi \in L^{2,\alpha}_{\Hq}(\R^+) $ and $ G \in A^{2,\omega}_{slice}$ we have \begin{align*} \scal{ S^{\alpha}_{y}\varphi,G}_{A^{2,\omega}_{slice}} & = \sum_{n=0}^\infty \varphi_n^{\alpha} (y) \scal{\varphi , \varphi_n^{\alpha} }_{L^{2,\alpha}_{\Hq}(\R^+) } \scal{ e_n , G }_{A^{2,\omega}_{slice}} = \scal{\varphi , (S^{\alpha}_{y})^* G}_{L^{2,\alpha}_{\Hq}(\R^+) }. \end{align*} This readily follows by applying Fubini's theorem. Therefore, the adjoint of $S^{\alpha}_{y}$ given by \begin{align*} (S^{\alpha}_{y})^* G(x) = \scal{ \sum_{n=0}^\infty \varphi_n^{\alpha} (y) \varphi_n^{\alpha} e_n , G }_{A^{2,\omega}_{slice}} = \scal{ K(x,y| \cdot ) , G }_{A^{2,\omega}_{slice}} \end{align*} which reduces further to \eqref{adjointSexp} since $K(x,y| q)$ is exactly the kernel function in \eqref{KernelBessel}. \end{proof} \begin{proposition} \label{propsv} If $ S^{\alpha}_{y}$ is bounded, then their singular values are given by \begin{align}\label{ingeigenvS} |s_{n,y}^{\alpha}| = \left( \pi \gamma_n\right) ^{1/2} |\varphi_n^{\alpha} (y)|. \end{align} \end{proposition} \begin{proof} By Lemma \ref{adjointS}, we have $ (S^{\alpha}_{y})^* e_k = \pi \gamma_k \varphi^\alpha_k(y) \varphi^\alpha_k $, and therefore, the operator $(S^{\alpha}_{y})^* S^{\alpha}_{z} : L^{2,\alpha}_{\Hq}(\R^+) \longrightarrow L^{2,\alpha}_{\Hq}(\R^+)$ satisfies $$ (S^{\alpha}_{y})^* S^{\alpha}_{z} \varphi_n^{\alpha} = (S^{\alpha}_{y})^* \left( \varphi_n^{\alpha} (z) e_n\right) = \pi \gamma_n \varphi_n^{\alpha} (y) \varphi_n^{\alpha} (z) \varphi_n^{\alpha} .$$ This is to say that the Laguerre functions $\varphi^{\alpha}_{n}$ in \eqref{basisLaguerre} constitute an orthogonal basis of $L^2$-eigenfunctions for $(S^{\alpha}_{y})^* S^{\alpha}_{y}$ with $\pi \gamma_n (\varphi_n^{\alpha} (y) )^2$ as corresponding eigenvalues. Therefore, the eigenvalues of $|(S^{\alpha}_{y})^*|:= ( (S^{\alpha}_{y})^*S^{\alpha}_{y})^{1/2}$ are exactly those given through \eqref{ingeigenvS}. \end{proof} \begin{proposition}\label{thmSchayyen} Let $y>0$ and $p>4/(1+2\eta)$. Then, $ S^{\alpha}_{y} : L^{2,\alpha}_{\Hq}(\R^+) \longrightarrow A^{2,\beta,\eta}_{slice}$ is a Schatten operator of class $p$. \end{proposition} \begin{proof} The operator $S^{\alpha}_{y}$; $y>0$, is compact thanks to Proposition \ref{corcomp}. To conclude we need only to apply Proposition \ref{propsv} keeping in mind \eqref{asymcny} for large $n$. Thus, the singular values $s_{n,y}^{\alpha}$ satisfy \begin{align} |s_{n,y}^{\alpha}|=\sqrt{ c^{\alpha,\beta,\eta}_n(y)} = O(n^{-(2\eta+1)/4}). \end{align} Therefore, if $(1+2\eta)p>4$, the the series $\sum_{n=0}^\infty |s_{n,y}^{\alpha}| ^p$ converges and therefore $$\norm{S^{\alpha}_{y}}_{p}:= \mbox{Tr} (|S^{\alpha}_{y}|^{p})^{1/p} = \left( \sum_{n=0}^\infty |s_{n,y}^{\alpha}| ^p \right)^{1/p} <+\infty.$$ The second equality follows since $S^{\alpha}_{y}$ is compact from/into separable Hilbert spaces. \end{proof} \begin{remark} For $y=0$ and large $n$, we have \begin{align*} |s^\alpha_n(0)|=\sqrt{c^{\alpha,\beta,\eta}_n(0)}& \sim \frac{\sqrt{\pi\Gamma(\eta)}}{\Gamma(\alpha+1)}n^{(\alpha-\eta)/2} . \end{align*} Subsequently, the transform $S^{\alpha}_{0}: L^{2,\alpha}_{\Hq}(\R^+) \longrightarrow A^{2,\beta,\eta}_{slice}$ is compact if and only if $\eta>\alpha$, since in this case $\lim\limits_{n \to +\infty} |s^\alpha_n(0)|=0$. Moreover, it is in $p$-Schatten class if in addition $p>2/(\eta-\alpha)$. \end{remark} We conclude by providing the singular value decomposition of $S^{\alpha}_{0}$. Thus, we consider the mapping defined by $U^{\alpha}_{y}(\varphi_n^{\alpha} ) = 0$ for $n\in N_y^\alpha$ and $$U^{\alpha}_{y}(\varphi_n^{\alpha} ) = \frac{\varphi_n^{\alpha} (y)}{\sqrt{\pi \gamma_n} |\varphi_n^{\alpha} (y)|} e_n $$ otherwise. We extend $U^{\alpha}_{y}$ in a natural way to linear mapping on the whole $ L^{2,\alpha}_{\Hq}(\R^+)$. By means of $ \norm{ e_n}_{L^{2,\omega}_{\Hq}(\mathbb{B}_I)}^2 = \pi \gamma_n $, we get $$\norm{U^{\alpha}_{y}(\varphi_n^{\alpha} )}_{L^{2,\alpha}_{\Hq}(\R^+)} = \left|\frac{\varphi_n^{\alpha} (y)}{\sqrt{\pi \gamma_n} |\varphi_n^{\alpha} (y)|} \right| \norm{ e_n}_{L^{2,\omega}_{\Hq}(\mathbb{B}_I)} = 1. $$ Then, we claim that the following assertions hold trues: \begin{enumerate} \item[i)] $U^{\alpha}_{y} : L^{2,\alpha}_{\Hq}(\R^+) \longrightarrow L^{2,\omega}_{\Hq}(\mathbb{B}_I) $ is a partial isometry. \item[ii)] We have $ \ker(S^{\alpha}_{y}) = \ker(U^{\alpha}_{y}) = span\{e_n; \, n\in N_y^\alpha \}$. \end{enumerate} Moreover, the singular value decomposition of the linear operator $S^{\alpha}_{y}$ is given by $$S^{\alpha}_{y} = U^{\alpha}_{y} |S^{\alpha}_{y} |.$$ This readily follows by direct computation. \bibliographystyle{amsplain}
1,108,101,565,820
arxiv
\section{Conclusions} \label{sec:conclusion} \begin{figure}[tbph] \begin{subfigure}[tbph]{0.5\textwidth} \includegraphics[width=\textwidth]{Figures/dx_compare.pdf} \end{subfigure} \begin{subfigure}[tbph]{0.5\textwidth} \includegraphics[width=\textwidth]{Figures/df_compare.pdf} \end{subfigure} \caption{Comparison of the measurements in this work with measurements from other reference cavity experiments, both in terms of length noise (top) and frequency noise (bottom). The traces show measurements from Numata~et~al.~\cite{Numata2003}, Black~et~al.~\cite{Black2004_1}, Ludlow~et~al.~\cite{Ludlow2007}, Alnis~et~al.~\cite{Alnis2008}, Webster~et~al.~\cite{Webster2007b}, Cole~et~al.~\cite{Cole2013}, Kessler~et~al.~\cite{Kessler2012b}, and the two measurements presented in this work.} \label{fig:comparisonplots} \end{figure} In summary, we have demonstrated a high-sensitivity system for measuring thermal noise from high-reflectivity silica/tantala coatings. The cavity spacers, the isolation system, the laser stabilization system, and the beat readout have been designed to push down known technical and environmental noise sources to below the expected thermal noise level. Indeed, in the band from 10\,Hz to 1\,kHz, the measured beat spectrum produces length fluctuation consistent with Brownian noise from the mirror coatings, with a loss angle of $\phi_\text{c} = 4\times10^{-4}$. The estimated loss angles $\phi_\text{L}$ and $\phi_\text{H}$ are high compared to other measurements given in the literature. For example, ringdown measurements by Penn et~al.~\cite{Penn2003}, Crooks et~al.~\cite{Crooks2004, Crooks2006}, and Li et~al.~\cite{Li2014} found $\phi_\text{L} < 1\times10^{-4}$ and $\phi_\text{H} \sim 4\times10^{-4}$. It is possible that these newer coatings were manufactured with better fabrication techniques, as manufacturers became more aware of coating thermal noise. In Figure~\ref{fig:comparisonplots}, we plot our measurements, along with measurements from other reference cavities, in both displacement and frequency noise. While other measurements have focused on attaining thermally limited noise performance at low frequencies (100\,Hz down to less than 10\,mHz) or high frequencies (100\,Hz up to 100\,kHz), the measurements presented in this paper are consistent with the thermal noise limit in an intermediate frequency band, from 10\,Hz to 1\,kHz, which is of direct interest to the current and future generations of gravitational wave detectors. Brownian noise in optical coatings is a significant limit in precision optical measurements because of the high mechanical loss angle in the amorphous coatings. Recent efforts have now begun to focus on other coating materials, such as monocrystalline Al$_x$Ga$_{1-x}$As heterostructure (AlGaAs). Measurements by Cole et~al.~\cite{Cole2013} on AlGaAs coatings with quarter-wavelength structures indicate the potential for a smaller thermal noise by almost an order of magnitude compared to that of silica/tantala coatings. The current sensitivity of the testbed mentioned in this paper will need several improvements to be able to measure thermal noise in AlGaAs coating with better SNR. The RIN suppression servo will need to be upgraded to reduce RIN-induced photothermal noise from DC to 30\,Hz. To overcome the readout noise from the PLL at frequencies above 1\,kHz, another means of frequency noise detection (for example, a homodyne detection system~\cite{Eichholz2013, hartman2014measurement}) may need to be considered. Since AlGaAs coatings may be used in third-generation gravitational-wave detectors~\cite{Rana:RMP2014}, it is important to characterize all fundamental noises associated with the coatings to thoroughly estimate the detector's sensitivity. There are still several issues regarding thermal noise calculation in AlGaAs coatings to be explored. First, the current theoretical calculations~\cite{Hong2013} of coating Brownian noise may have to be revised to include tensorial components in the elasticity equations; the current calculations assume that coating properties are isotropic in the amorphous thin films. Second, thermo-optic noise in AlGaAs coatings is predicted to be significant due to its high thermo-refactive coefficient. Since GaAs and AlAs have a high thermal conductivity compared to silica/tantala coatings, the assumptions used by Evans et~al.~\cite{Evans2008} to compute thermo-optic noise will no longer be accurate: corrections for a small spot size and low frequencies will be required. In addition, it seems possible to minimize thermo-optic noise by adjusting the crystalline coating structure~\cite{AlGaAs:TO}, so that the limiting noise floor of the coating can be further reduced. \section{Description of experiment} \label{sec:experiment} In this section we describe the testbed we have developed to measure the beat note fluctuation $S_{\hat{\nu}}(f)$ of our cavities. \subsection{Cavity as a frequency reference} Figure~\ref{fig:PDHblock} shows a block diagram of a laser that is frequency-locked to a reference cavity using Pound--Drever--Hall (PDH) locking~\cite{DH1983}. \begin{figure}[tbp] \centering \includegraphics[width = 0.4\textwidth]{./Figures/PDH_block_fixed.pdf} \caption{Block diagram of the PDH setup used for laser frequency locking. $\delta\nu$ is the free-running frequency noise of the laser, and $\delta\nu_\text{s}$ is the suppressed frequency noise, or the frequency noise of the transmitted beam behind the cavity. $\delta L$ is the cavity's length fluctuation, which is converted to frequency noise via the PDH lock. $D$ is frequency discriminator, which uses an rf photodiode and associated demodulation electronics to convert frequency fluctuation into an error signal with voltage $N_\text{e}$. $G$ is the electronic gain of the servo. $A$ is the actuator, which takes the control signal voltage $N_\text{a}$ and actuates on the laser frequency. The fact that $A$ is summed with a minus sign indicates that negative feedback is occurring. The minus sign from $\delta L$ means the displacement noise of the cavity is compared to the laser frequency.} \label{fig:PDHblock} \end{figure} The laser has free-running noise $\delta\nu$. The frequency discriminator $D$, electronic servo gain $G$, and actuator response $A$ combine to produce the open-loop gain $H = DGA$. When the loop is engaged, the suppressed frequency noise $\delta\nu_\text{s}$ of the laser becomes \begin{subequations} \begin{align} \delta\nu_\text{s} &= \frac{\delta\nu}{1+H} + \frac{H}{1+H} \times \frac{c}{L\lambda} \delta L \\ &\approx \frac{\delta\nu}{H} + \frac{c}{L\lambda} \delta L \hspace{5em} \text{for } |H| \gg 1. \end{align} \label{eq:PDHnoise} \end{subequations} Within the loop bandwidth, where the magnitude $|H|$ of the open-loop gain is large, the displacement noise $\delta L$ of the cavity is impressed onto the frequency noise of the laser: $\delta\nu_\text{s} \approx (c/L\lambda) \delta L$. The power spectral density of the frequency noise is given by $S_\nu(f) = |\delta\nu_\text{s}|^2$. To measure the frequency noise of the laser when locked to the cavity, we compare the transmitted beam with another transmitted beam from a similar cavity with an independent, frequency-stabilized laser. Because of the slightly different lengths of the two cavities, the two beams have different frequencies, $\nu_1$ and $\nu_2$. When directed onto an RF photodiode, the combined beam results in a beat note with frequency $\hat{\nu} = \nu_1 - \nu_2$. The frequency noise of this beat note has a PSD $S_{\hat{\nu}} = S_{\nu_1} + S_{\nu_2}$. As described below, we read out this beat note using a phase-locked loop (PLL). \subsection{Setup} In this section, we describe two experimental setups used for observing coating thermal noise. The first setup measures the noise from two 20.3~cm reference cavities. The second setup, which is conceptually similar to the first one, measures coating thermal noise from two 3.68~cm reference cavities. \begin{figure}[tbp] \centering \includegraphics[width = 3.5in]{./Figures/setup1.pdf} \caption{The prototype one-laser setup for measuring the coating thermal noise of LIGO reference cavities. An Nd:YAG laser is stabilized to one reference cavity. The second beam is split from the main beam and locked to the second cavity after being double-passed through an acousto-optic modulator (AOM). The transmitted beams are used to measure the length noise by measuring their beat signal.} \label{fig:setup1} \end{figure} \subsubsection{One-laser setup} A diagram of the one-laser setup is shown in Figure~\ref{fig:setup1}. In this setup, both interrogation beams were provided by a single Nd:YAG non-planar ring oscillator (NPRO) laser with a vacuum wavelength of $\lambda = 1064$~nm. Approximately 1~mW of light was incident on each cavity, with visibility $\eta$ of more than 0.95. The main beam was frequency-locked to one of the cavities by actuating on the NPRO crystal with a piezo-electric transducer (PZT), as well as actuating on a broadband electro-optic modulator (EOM) placed in the optical path. For the second cavity, part of the laser beam was sent through a double-pass acousto-optic modulator (AOM) in order to frequency-shift the light before entering the cavity. Frequency locking to the second cavity was achieved by actuating on the AOM. Laser light was injected through the vacuum chamber windows and into the cavities, where it was kept resonant via the PDH locking technique. The photodiodes and electronics used to implement the frequency stabilization were designed to achieve a loop with unity-gain frequency (UGF) of nearly 1\,MHz, and to have a noise floor below the frequency noise of the cavities. The transmitted beams were recombined and directed onto an RF photodiode, producing an RF beat note measured with a PLL and a spectrum analyzer. \subsubsection{Test cavities} The reference cavities are formed by optically contacting laser mirrors to cylindrical fused-silica spacers. The mirror substrates are commercially available fused silica with a 25.4\,mm diameter and 6.4\,mm thickness, and with a 0.5\,m radius of curvature (ROC). The coatings were fabricated by Research Electro-Optics via ion-beam sputtering. They consist of 28 alternating layers of silica (SiO$_2$) and tantala (Ta$_2$O$_5$). The first 27 layers are each deposited to a thickness of $\lambda/4n$, where $n$ is the refractive index of the layer material. The final layer is silica, and in order to give the appropriate interference condition it is deposited to a thickness of $\lambda/2n$. The transmission of each mirror is approximately 300~ppm. Using these mirrors, we initially constructed two symmetric cavities using fused-silica spacers with length $L = 20.3$\,cm. Both substrates and spacers are made of fused silica because of its low mechanical loss and small thermal expansion coefficients. Each cavity is fitted with a pair of O-rings close to the cavity's Airy points. Each cavity sits on a pair of teflon blocks with a semicircular cut, and each block has a transverse V-shaped groove to keep an O-ring in place. The cavities are placed side by side on a double-stack seismic isolation platform. The resonances of this platform all lie below 10\,Hz. The cavities and the platform are housed inside a temperature-stabilized vacuum chamber with the pressure below $10^{-7}$ torr. The use of a single platform and chamber endows the beat measurement with some amount of common-mode rejection of seismic and ambient temperature noise. The optical table for the setup sits on pneumatic legs which have a resonant frequency around 1 Hz. \begin{figure*}[tbp] \centering \includegraphics[width=\textwidth]{./Figures/2laser_setup_ISS.pdf} \caption{Setup for measuring the coating thermal noise using two independent lasers. Each laser is stabilized to one of the two identical cavities. The readout scheme is the same as for the one-laser setup.} \label{fig:setup2} \end{figure*} \subsubsection{Two-laser setup} We found that the one-laser setup discussed above produced a measurement with a low signal-to-noise ratio. Therefore, we subsequently constructed two shorter cavities using similar mirrors from the same coating run and developed a two-laser setup shown in Figure~\ref{fig:setup2}. The use of shorter cavities increases the observed frequency noise, since $\delta\nu / \nu = \delta L / L$ for small cavity length fluctuations. There were several considerations that placed a lower limit for the allowable length of the new cavities. First, it should be possible to use a heater to tune each cavity length by half of a free spectral range, so that the beat note $\hat{\nu}$ can be brought within the bandwidth of the readout photodiode. A cavity that is too short would require excessive heating in order to achieve this. Additionally, the cavity must form a stable optical resonator. Finally, the length must be chosen so that no low-order transverse laser modes resonate simultaneously with the TEM$_{00}$ mode. With these considerations in mind, we chose a cavity length of 3.68\,cm. In addition, compared to the previous, longer cavities, these cavities have a smaller spot size. The combined effects of shorter length and smaller spot size mean that the observed coating Brownian noise should increase by a factor of 9, in accordance with eq.~\ref{eq:Nakagawa_BR_coat}. This setup is symmetric; the PDH error signal from each cavity is used to actuate on an independent NPRO and on a broadband EOM. The use of two lasers also allows larger possible range for the beat frequency; in the previous setup, this was constrained by the operational range of the AOM. For each path, 1\,mW of light is incident on each cavity. The visibilities of both cavities exceed 0.9, indicating that the incident beams have a nearly Gaussian spatial mode and that the cavities are close to critically coupled. In this setup, the relative intensity noise (RIN) in both cavities becomes uncorrelated, and so an intensity stabilizion system (ISS) is required. In each path, an electro-optic amplitude modulator (EOAM) is used to suppress the laser's RIN, and thereby decrease the photothermal noise to below the estimated thermal noise of the coatings. \subsubsection{Beat note frequency readout} To read out the beat note frequency, we use a phase-locked loop (PLL). A block diagram for the PLL is shown in fig.~\ref{fig:PLLblock}. The two transmitted beams are directed onto a single RF photodiode, where they beat against each other to produce an RF signal at approximately 100\,MHz. This signal is mixed with a voltage-controlled oscillator (VCO) of similar frequency and then low-passed at several megahertz, producing a baseband signal. We then amplify this signal (labeled $V_\text{fb}$ in the diagram) and use it as a control signal to actuate on the VCO, thereby forming a phase-locked loop. This control signal gives a linear readout of the frequency noise of the beat note, which is the incoherent sum of the displacement noise from the two cavities. The calibration to convert the voltage $V_\text{fb}$ to frequency fluctuation is measured by observing the output frequency of the VCO while varying the input voltage. The open loop gain of our PLL has a UGF of 50\,kHz. \begin{figure}[tbp] \centering \includegraphics[width=0.4\textwidth]{Figures/PLLsetup2.pdf} \caption{Block diagram of the phase-locked loop (PLL) used to read out the beat note fluctuation. The main noise sources associated with the PLL are photocurrent shot noise, $\delta s$; photodiode amplifier noise, $\delta n$; and VCO frequency noise, $\delta\nu$. Generally, $\delta s$ and $\delta n$ have flat spectral densities in terms of current and voltage, respectively. However, since the PLL is a phase detector whose output is then used to actuate on frequency, these noises contribute a frequency noise which rises with Fourier frequency $f$.} \label{fig:PLLblock} \end{figure} \subsection{Technical and environmental noise sources} \label{sec:tech_noise} Both setups discussed in the previous section have similar technical and environmental noise sources. \begin{figure}[tbp] \begin{subfigure}[tbp]{0.22\textwidth} \includegraphics[width=\textwidth]{Figures/cav_support.pdf} \caption{Top-down view} \label{fig:supports_top} \end{subfigure} \begin{subfigure}[tbp]{0.22\textwidth} \includegraphics[width=\textwidth]{Figures/cav_support_axial.pdf} \caption{Axial view} \label{fig:supports_axial} \end{subfigure} \caption{Cavity mounting and supports for 3.7\,cm cavities. The locations of the four contacts were chosen for superior rejection of vertical seismic noise, as determined by FEA simulation. In the axial view, the red circle is the thermal shield used for temperature control. In the top-down view, this shield is not shown.} \label{fig:supports} \end{figure} \subsubsection{Seismic and vibrational noise} Cavity bending due to vibration is known to cause significant displacement noise in a reference cavity. To minimize this effect, some groups have explored different methods for supporting laser reference cavities---for example, by cutting or drilling support points into the spacer, or by holding the cavity vertically~\cite{Nazarova2006, Webster2007a, Millo2009}. Based on our previous experience and FEA of the seismic coupling, we determined that the direct seismic coupling could be kept small enough with horizontal cavities with nodal supports. In the two-laser setup, each cavity is mounted on four supports cut from cylindrical rods and placed orthogonally to the spacer to achieve approximately a point contact. The support geometry is shown in Fig.~\ref{fig:supports}. The rods are made from polyether ether ketone (PEEK) because of its compatibility with high vacuum. The support positions were chosen based on ease of machining and on FEA of the susceptibility of the cavity to seismic noise. At the chosen spot, if we take mounting errors ($\pm\,0.5$\,mm) and common mode rejection into account, the coupling from acceleration into cavity strain is estimated to be $6\times10^{-12}\,\text{ m}^{-1}\,\text{s}^2$. \subsubsection{PDH shot noise} For each cavity, the ultimate lower limit to the laser's frequency noise suppression is set by the shot noise of the light falling on the RF photodiode when the cavity is on resonance. The PSD of this lower limit is~\cite{Fritschel:1998gr, Rana2004} \begin{equation} S_P^{(\text{PDHshot})}(f) = 2h\nu P_0 \bigl[J_0(\Gamma)^2(1-\eta) + 3J_1(\Gamma)^2\bigr], \label{eq:pdh_shot} \end{equation} where $h$ is the Planck constant, $\Gamma$ is the phase modulation index ($\Gamma \approx 0.2$~rad for our system), $\eta$ is the visibility, and $J_0$ and $J_1$ are the zeroth and first Bessel functions of the first kind, respectively. \subsubsection{Residual (RF) Amplitude Modulation} The EOM used to perform the PDH modulation was temperature-stabilized with insulation and a heater, and then the polarization of the beam was adjusted to minimize any residual amplitude modulation (RAM), which can add a false offset to the PDH error signal (see, e.g., the discussion by Ishibashi et~al.~\cite{Hall2002}) \subsubsection{Photothermal Noise} As discussed in section \ref{sec:noise_budget}, fluctuation in laser power changes the effective cavity length via the thermoelastic and thermorefractive coefficients. In the case of a beam whose intensity fluctuation is shot-noise limited, the photothermal noise is negligible compared to Brownian thermal noise and thermoelastic noise~\cite{BGV1999}. However, for a laser with significant intensity noise above the shot-noise limit, the photothermal noise can be much higher. In the case of the one-laser setup, this excess photothermal effect appears in both cavities as a common-mode noise. However, this is not the case for the two-laser setup, and so the photothermal effect has to be carefully characterized and factored into the noise budget. By using the EOAM in each path to modulate the input power (see Fig.~\ref{fig:setup2}), we can observe the corresonding modulation in the beat note frequency using the PLL readout. As shown in Figure~\ref{fig:farsi}, the results are comparable with the calculations given in Farsi, et~al.~\cite{Farsi2012} with the assumption of 5\,ppm absorption on each mirror. Together with the measured RIN in the transmitted cavity beams, the estimated frequency noise due to RIN-induced photothermal noise can be added to the noise budget. \begin{figure}[tbp] \centering \includegraphics[width = 3.7in]{Figures/farsi_2.pdf} \caption{A swept sine measurement of beat note frequency fluctuation in response to RIN-induced photothermal noise. Both the amplitude (top) and phase response (bottom) agree with the calculations from Farsi et~al. For our coatings, the greatest effects are thermal expansion from substrate and coating.} \label{fig:farsi} \end{figure} \subsubsection{PLL noise} Noise sources add into the PLL at several points in the loop, as shown in Figure~\ref{fig:PLLblock}. In the photodiode, there is shot noise from the photocurrent ($\delta s$) and electronic noise from the internal amplifier ($\delta n$). Additionally, there is frequency noise from the VCO ($\delta v$). We have measured these noises and included them in the noise budget. \section{Introduction} Thermal noise is an important fundamental noise source in precision experiments. In the field of gravitational wave (GW) detection, thermal noise affects instruments such as Advanced LIGO, a large-scale Michelson interferometer with Fabry--P\'erot arm cavities 4\,km in length. Advanced LIGO will attempt to measure GW-induced spacetime fluctuations with a sensitivity of $1.4\times10^{-20}\text{ m/Hz}^{1/2}$ in the most sensitive band, around 200--500\,Hz. It is predicted that this sensitivity will be limited in part by thermal noise in the high-reflectivity coating of the mirrors~\cite{Harry2010}. Many groups have developed mathematical models to calculate coating thermal noise~\cite{Nakagawa2002, Harry2002, Somiya2009, Hong2013}. However, due to these coatings' multilayer structure and uncertainties in the thin film material parameters (e.g., Young's moduli, Poisson ratios, and mechanical loss angles), thermal noise in coatings has not yet been thoroughly understood. For this reason, an experiment which can measure coating thermal noise with high signal-to-noise ratio across a wide frequency band is necessary for a comprehensive verification of their performance. Previously, direct measurements of thermal noise have been carried out with free-space cavities formed from large, suspended mirrors (e.g., Numata et~al.~\cite{Numata2003} and Black et~al.~\cite{Black2004_1}). The nature of these suspensions is such that thermal noise can be observed only above a few hundred hertz; seismic motion becomes a limiting noise source at frequencies below 100\,Hz. On the other hand, in the field of optical frequency metrology, a fixed spacer Fabry--P\'erot cavity is typically used as a stable reference for laser frequency. By designing the shape of the spacer, and searching for vibration-insensitive support points, several groups have demonstrated that the total displacement noise of a rigid cavity can be very close to the thermal noise limit at frequencies around 0.01--1\,Hz~\cite{Ludlow2007, Alnis2008, Webster2007b}. However, none have reported Brownian thermal noise in the frequency band relevant to ground based GW detectors. These motivations have led us to develop an experiment that uses fixed-spacer cavities to directly observe thermal noise in mirror coatings from 10\,Hz to 1\,kHz. We demonstrate a method that can be used to measure thermal noise in SiO$_2$/Ta$_2$O$_5$ quarter-wavelength (QWL) coatings over two decades in frequency. \input{Theory} \input{NoiseBudget} \input{Experiment} \input{Results} \input{Conclusion} \begin{comment} \checkme{[We should say some words about how well the testbed will be able to perform with AlGaAs coatings, and what obvious upgrades are necessary to reduce technical noise to acceptable level.]} In summary, we have demonstrated a high sensitivity system for measuring the relative length fluctuation between two fused-silica Fabry--P\'erot cavities with high-reflectivity silica/tantala coatings. The cavity spacers, the isolation system, the laser stabilization system, and the beat readout have been designed to push down known technical and environmental noise sources to below the expected thermal noise level. Indeed, in the band from 10\,Hz to 1\,kHz, the measured beat spectrum produces length fluctuation consistent with Brownian noise from the mirror coatings, with a loss angle of $\phi_\text{c} = 4\times10^{-4}$. \emphb{Further directions} It is now recognized that silica/tantala coatings present a significant limitation on precision optical measurement. Recent efforts have now begun to focus on other coating materials, such as alternating layers of gallium arsenide (GaAs) and aluminum-doped gallium arsenide (AlGaAs). Measurements by Cole et~al.~\cite{Cole2013} on GaAs/AlGaAs coatings indicate a loss angle that is an order of magnitude lower than that of silica/tantala coatings. It is hoped that the testbed described in this paper will be used to characterize AlGaAs coatings for use in third-generation gravitational-wave detectors~\cite{Rana:RMP2014}. \end{comment} \begin{acknowledgments} The authors wish to thank Rich Abbott, Daniel Sigg, David Yeaton-Massey, Larry Price, Peter King, Matt Abernathy, Megan Daily, Raphael Cervantes, Sarah Terry, and Nicolas Smith-Lefebvre for useful discussions and technical help. We thank Borja Sorazu for careful reading of the manuscript. LIGO was constructed by the California Institute of Technology and Massachusetts Institute of Technology with funding from the National Science Foundation and operates under cooperative agreement PHY-0757058. This paper has LIGO Document Number LIGO-P1400072. \end{acknowledgments} \nocite{Bondu1996, Kessler2012b} \section{Noise budget for fixed-spacer Fabry--P\'erot cavities} \label{sec:noise_budget} \begin{table}[tbp] \centering \begin{tabular}{l l c c} \toprule Symb. & Description & Initial cav. & Short cav. \\ \colrule $L$ & Nominal spacer length & 20.3~cm & 3.68(3)~cm\footnote{Machining specification was $L = 1.45 \pm 0.01$ inches.} \\ $R_\text{sp}$ & Outer spacer radius & 25.4~mm\footnote{\label{fn:longspacer}LIGO internal document D980670.} & 19.0~mm \\ $r_\text{sp}$ & Inner spacer radius & 6.4~mm\textsuperscript{\ref{fn:longspacer}} & 5.1~mm \\ $R_\text{s}$ & Mirror substrate radius & \multicolumn{2}{c}{12.7~mm} \\ $\mathcal{R}$ & Mirror ROC\footnote{Uncertainty taken as 0.5\% of the nominal.} & \multicolumn{2}{c}{500(3) mm} \\ $\lambda$ & Laser wavelength & \multicolumn{2}{c}{1064~nm} \\ $w$ & Spot size on mirrors\footnote{Defined as the radius for which the intensity has fallen by $1/\mathrm{e}^2$ relative to the maximum intensity. Computed as $w = (\lambda\mathcal{R}/\pi)^{1/2}/(2\mathcal{R}/L-1)^{1/4}$.} & 290~{\textmu}m & 182.0(4)~{\textmu}m \\ $\mathcal{F}$ & Finesse & \multicolumn{2}{c}{$10\,000$} \\ $\mathcal{T}$ & Power transmission (per mirror) & \multicolumn{2}{c}{300~ppm} \\ $T$ & Cavity temperature & \multicolumn{2}{c}{306(1)~K} \\ \colrule $E_\text{s}$ & Substrate Young modulus\footnote{The quantities $E_\text{sp}$, $\sigma_\text{sp}$, etc., for the spacer are taken to the identical to the quantities for the substrate.} & \multicolumn{2}{c}{72(1)~GPa} \\ $\sigma_\text{s}$ & Substrate Poisson ratio & \multicolumn{2}{c}{0.170(5)} \\ $\phi_\text{s}$ & Substrate loss angle & \multicolumn{2}{c}{$1\times10^{-7}$} \\ $\kappa_\text{s}$ & Subst. therm. conduct. & \multicolumn{2}{c}{1.38~W/(m~K)} \\ $C_\text{s}$ & Substrate heat capacity & \multicolumn{2}{c}{$1.6\times10^6$~J/(K~m$^3$)} \\ $\alpha_\text{s}$ & Substrate CTE & \multicolumn{2}{c}{$5.1\times10^{-7}$~K$^{-1}$} \\ \colrule $E_\text{L}$ & Young modulus of silica & \multicolumn{2}{c}{72(1)\,GPa} \\ $E_\text{H}$ & Young modulus of tantala\footnote{Nominal value and uncertainty from Crooks et~al.~\cite[tab.~6]{Crooks2006}.} & \multicolumn{2}{c}{144(42)\,GPa} \\ $n_\text{L}$ & Silica index of refraction\footnote{\label{fn:index_ref}Values from Evans et~al.~\cite[tab.~II]{Evans2008}.} & \multicolumn{2}{c}{$1.45(1)$} \\ $n_\text{H}$ & Tantala index of refraction\textsuperscript{\ref{fn:index_ref}} & \multicolumn{2}{c}{$2.06(1)$} \\ $N$ & Number of coating layers\footnote{The first 27 layers are quarter-wavelength, and the top layer is a half-wavelength silica cap.} & \multicolumn{2}{c}{28} \\ $d$ & Coat. total thickness\footnote{Calculated as $d = 14 \lambda/4n_\text{Ta$_2$O$_5$} + (13+2)\lambda/4n_\text{SiO$_2$}$.} & \multicolumn{2}{c}{4.53(7)~{\textmu}m} \\ \botrule \end{tabular} \caption{Parameters for test cavities.} \label{tab:cavity_params} \end{table} In this section we present the assumptions and formulas used to generate the thermal noise contributions to the noise budget. Numerical values of the relevant parameters and symbols are given in table \ref{tab:cavity_params}. \subsection{Mirror substrate noise} \subsubsection{Substrate Brownian noise} Levin~\cite[eq.~2]{Levin1998} computed the Brownian noise for a mirror substrate in the limit that the spot size $w$ is much smaller than the radius $R_\text{s}$ of the mirror: \begin{equation} S_x^{(\text{subBr})}(f) = \frac{2 k_\text{B} T}{\pi^{3/2} f} \frac{\bigl(1-\sigma_\text{s}^2\bigr)\phi_\text{s}}{w E_\text{s}}. \label{eq:substrate_brownian} \end{equation} The spot size is defined as the $1/\mathrm{e}^2$ falloff in intensity. $E_\text{s}$, $\sigma_\text{s}$, and $\phi_\text{s}$ are, respectively, the Young modulus, Poisson ratio, and loss angle of the substrate. Later, Bondu et~al.~\cite[eq.~14]{Bondu1998} computed corrections to the above formula for the case when $w$ is not much smaller than $R_\text{s}$, but we have found that these corrections are not necessary for our system. \subsubsection{Substrate thermoelastic noise} The thermoelastic noise for a mirror substrate was computed by Braginsky et~al.~\cite{BGV1999} for the case of a half-infinite substrate in the adiabatic limit $\ell_\text{th} \ll w$, where $\ell_\text{th} = \sqrt{\kappa_\text{s}/(2 \pi C_\text{s} f)}$ is the thermal diffusion length at frequency $f$, and $\kappa_\text{s}$ and $C_\text{s}$ are, respectively, the thermal conductivity and the heat capacity per unit volume of the substrate. Non-adiabatic corrections for low frequencies and small beam sizes were computed by Cerdonio et~al.~\cite[eq.~20]{Cerdonio2001}: \begin{equation} S_x^{(\text{subTE})}(f) = \frac{4 k_\text{B} T^2}{\sqrt{\pi}} \frac{\alpha_\text{s}^2 \bigl(1+\sigma_\text{s}\bigr)^2 w}{\kappa_\text{s}} J\bigl(f/f_\text{T}\bigr), \label{eq:substrate_TE} \end{equation} where $f_\text{T} = \kappa_\text{s}/\pi w^2 C_\text{s}$, and $J(f/f_\text{T})$ is a non-elementary function whose asymptotes are $2/\bigl(3\sqrt{\pi f/f_T}\bigr)$ for $f/f_\text{T} \ll 1$ and $1/\bigl(f/f_T\bigr)^2$ for $f/f_\text{T} \gg 1$; the full expression is \begin{equation} J(f/f_\text{T}) = \left(\frac{2}{\pi}\right)^{1/2} \int\limits_0^\infty \mathrm{d}u \int\limits_{-\infty}^\infty \mathrm{d}v \; \frac{u^3 \mathrm{e}^{-u^2/2}}{(u^2 + v^2)\bigl[(u^2 + v^2)^2 + (f/f_\text{T})^2\bigr]}. \label{eq:Jint} \end{equation} \subsection{Noise in mirror coatings} \subsubsection{Coating Brownian noise} The Brownian thermal noise contribution of a thin film on a half-infinite substrate can be expressed as~\cite{Nakagawa2002} \begin{equation} \label{eq:Nakagawa_BR_coat} S_x^{\text{(cBR)}}(f) = \frac{4 k_B T}{\pi^2 f} \frac{(1 + \sigma_s)(1 - 2\sigma_s)}{E_s} \frac{d}{w^2} \phi_{c}, \end{equation} where $d$ is the total thickness of the coating, and $\phi_\text{c}$ is the coating's loss angle. This equation assumes that the elastic properties of substrate and the thin coating are the same, and that all the coating properties are isotropic. Due to the multilayer structure of the amorphous materials, the coating loss and elastic properties may be anisotropic. For this reason, authors such as Harry et~al.~\cite{Harry2002} decompose coating loss and elastic deformation into parallel ($\parallel$) and perpendicular ($\perp$) directions relative to the mirror normal. Then, in accordance with eq.~\ref{eq:elasticU}, the total dissipated energy can be written as $W_\text{diss} = 2\pi f (U_{\perp} \phi_{\perp} + U_{\parallel} \phi_{\parallel})$. However, as argued by Hong et al.~\cite{Hong2013}, $\phi_{\perp}$ and $\phi_{\parallel}$ are not a suitable choice to be consistently used as the loss angles of a material, since the corresponding energies $U_{\perp}$ and $U_{\parallel}$ can sometimes be negative. Instead, $W_\text{diss}$ should be decomposed into bulk (``B'') and shear (``S'') contributions: $W_\text{diss} = 2\pi f( U_\text{B} \phi_\text{B} + U_\text{S} \phi_\text{S})$. For SiO$_2$/Ta$_2$O$_5$ coatings, the individual loss angles (either $\phi_{\perp}$ and $\phi_{\parallel}$, or $\phi_\text{B}$ and $\phi_\text{S}$) are not well known, and knowledge of the individual material properties is also limited. These uncertainties will propagate forward toward the estimate of the loss angle~\cite{Hong2013}. In this work, we assume the equality of $\phi_\text{B}$ and $\phi_\text{S}$, but we stress that there is no fundamental reason to assume this, nor indeed is there reason to assume equality of the elastic parameters of the substrate and the coating. Nevertheless, if we assume that the coating is described by a single loss angle $\phi_\text{c}$, and that the elastic properties of the coating and substrate are similar, then the results of Harry et~al.~\cite{Harry2002} and Hong et~al.~\cite{Hong2013} reduce to eq.~\ref{eq:Nakagawa_BR_coat}. The ``coating loss angle'' $\phi_\text{c}$ as defined in equation~\ref{eq:Nakagawa_BR_coat} should be viewed not as a physical parameter, but as a figure of merit which is related to the various loss angles and material parameters of each coating material. \subsubsection{Coating thermo-optic noise} An expression for thermo-optic noise in coatings is given by Evans et~al.~\cite[eq.~4]{Evans2008}: \begin{equation} S_x^{(\text{cTO})}(f) = S_T(f) \, \Gamma_\text{tc} \, \left[\bar{\alpha}_\text{c} d - \bar{\beta}\lambda - \bar{\alpha}_\text{s} d C_\text{c} /C_\text{s}\right]^2. \label{eq:coating_TO} \end{equation} Here $S_T(f)$ is the temperature fluctuation of a bare substrate as sensed by an interrogating beam. In the adiabatic regime, it is given by~\cite{Levin2008} \begin{equation} S_T(f) = \frac{2 k_\text{B} T^2}{\pi^{3/2} w^2 \sqrt{\kappa_\text{s} C_s f}}. \label{eq:TO_spectrum1} \end{equation} $\Gamma_\text{tc}$ is a correction for $S_T(f)$ in the presence of a coating layer. The term in brackets in eq.~\ref{eq:coating_TO} determines how temperature flucutation $S_T$ is converted into displacement fluctuation $S_x$. $\bar{\alpha}_\text{c}$, $\bar{\beta}$, and $C_\text{c}$ are the effective thermal expansion coefficient, effective thermorefractive coefficient, and heat capacity per unit volume of the coating. The quantities $\bar{\alpha}_\text{s}$ and $C_\text{s}$ are the thermal expansion coefficient and heat capacity per unit volume of the substrate. The complete formalism for computing the various thermal expansion and thermorefractive coefficients is summarized by Evans et~al.~\cite[appx.~A~and~B]{Evans2008}. \begin{comment} Here we only reproduce the approximate expression for $\bar{\beta}$ for the case of a highly reflective multilayer QWL coating with a $\lambda/2$ cap of the low-refractive-index material: \begin{equation} \label{eq:eff_beta} \bar{\beta} \approx \frac{B_\textsub{h} + B_\textsub{l}\bigl[2(n_\textsub{h}/n_\textsub{l})^2-1\bigr]}{4\bigl(n_\textsub{h}^2 - n_\textsub{l}^2\bigr)}, \end{equation} with $B_\textsub{x} = \beta_\textsub{x} + \bar{\alpha}_\textsub{x} n_\textsub{x}$ \footnote{As emphasized by Evans et~al.~\cite{Evans2008}, the term ``thermorefractive'' refers to the phase change inside the coating due to the change in the optical path length experienced by the beam. This length is affected by both thermal expansion (via $\alpha$) and thermorefraction (via $\beta$).}. In general, $\bar{\beta}$ can be calculated numerically for an arbitrary coating structure~\cite{Evans2008}. \end{comment} Similar to substrate thermoelastic noise, the temperature fluctuation in eq.~\ref{eq:TO_spectrum1} can be corrected for small beam size and low frequencies by extending the calculation by Braginsky et~al.~\cite{BGV2000}. The result is given by Martin~\cite[\S 3.3.2]{Mike2013}: \begin{equation} S_T(f) = \frac{2\sqrt{2} k_\text{B} T^2}{\pi \kappa_\text{s} w} M\bigl(f/f_\text{T}\bigr). \label{eq:TO_spectrum} \end{equation} $M(f/f_T)$ is a non-elementary function whose asymptotes are $\sqrt{\pi/2}$ for $f/f_T \ll 1$ and $(2f/f_\text{T})^{-1/2}$ for $f/f_T \gg 1$. The full expression is \begin{equation} M(f/f_\text{T}) = \real\left[ \int\limits_0^\infty \mathrm{d}u\; u\, \mathrm{e}^{-u^2/2} \sqrt{\frac{u^2 + i f / f_\text{T}}{u^4 + (f / f_\text{T})^2}}\right]. \label{eq:Mint} \end{equation} Note that $\Gamma_\text{tc}$ in Evans et~al.~\cite{Evans2008} is calculated assuming that $\ell_\text{th} \ll w$. For SiO$_2$/Ta$_2$O$_5$ QWL coatings, $\ell_\text{th} = (44 \text{ \textmu m}) \times \sqrt{(100 \text{ Hz})/f}$, as calculated using the material parameters of silica and tantala, along with the formalism described by Evans et~al.~\cite{Evans2008}. For a beam with spot size $w = 200 \text{ \textmu m}$, this correction factor should still be valid above 25~Hz. However, a thorough calculation has yet to be done. \begin{figure}[tbp] \centering \includegraphics[width = 3.5in]{./Figures/COMSOL_SP_BR.jpg} \caption{Image of FEA model used to predict thermal noise from the fused-silica cavity spacers. Left, 1/8 of the model is used with a symmetric boundary condition on three planes to reduce the computation time. Since most of the deformation will occur close to the applied force, to further minimize the calculation time, only the small volume at the center of the mirror has very fine mesh size while the mesh size is larger far away from the beam. Right, the deformation on the spacer due to the applied force on the mirror (not shown). This model can be used to calculate the elastic energy stored in the spacer.} \label{fig:comsolbr} \end{figure} \subsection{Noise in spacer} \subsubsection{Spacer Brownian noise} The length fluctuation due to Brownian noise in a cylindrically symmetric spacer of outer radius $R_\text{sp}$ and inner radius $r_\text{sp}$ was worked out by Kessler et~al.~\cite{Kessler2012a}, building on earlier work by Numata et~al.~\cite{Numata2004}: \begin{equation} S_x^{(\text{spBr})}(f) = \frac{4k_\text{B}T}{\pi f} \frac{L \phi_\text{sp}}{2\pi E_\text{sp} \bigl(R_\text{sp}^2 - r_\text{sp}^2\bigr)}. \label{eq:spacer_brownian} \end{equation} However, this formula assumes that the outer radii of the mirror and the spacer are the same, and are fully contacted. In general, the outer radius of the spacer is larger than the mirror radius, and only a thin annulus on the outer edge of the mirror is optically contacted to the spacer. To estimate the Brownian noise more accurately, an FEA simulation along with the direct approach is used to calculate the stored elastic energy (see fig.~\ref{fig:comsolbr}). Then, using eq.~\ref{eq:FDT2}, we obtain the displacement noise. The power spectral density of the displacement noise computed from the FEA is about a factor of 2 larger than that of eq.~\ref{eq:spacer_brownian}. \subsubsection{Spacer thermoelastic noise} To estimate the level of thermoelastic noise in the spacer, we follow the method outlined by Liu and Thorne~\cite[eq.~13]{Liu2000}: \begin{equation} S_x^{(\text{spTE})}(f) = \frac{2 k_\text{B} T}{\pi^2 f^2} \kappa_\text{sp} T \left[\frac{E_\text{sp} \alpha_\text{sp}}{(1-2\sigma_\text{sp}) C_\text{sp}}\right]^2 \int \frac{\bigl[\boldsymbol{\nabla} (\boldsymbol{\nabla}\cdot\mathbf{u}) \bigr]^2}{F_0^2} \; \mathrm{d}^3 r, \label{eq:spacer_TE} \end{equation} where $\mathbf{u}(\mathbf{r})$ is the displacement field of the spacer in response to a static pressure from a force $F_0$ applied to the mirror faces. To evaluate the integral in eq.~\ref{eq:spacer_TE}, we use the same FEA model as described above for computing the spacer Brownian noise. The calculation is performed under the adiabatic approximation, since the diffusion length $\ell_\text{th}$ is much smaller than the width of the contact area between the spacer and the mirror. For an annulus with a thickness of 2\,mm, the assumption $\ell_\text{th} \ll w$ should be valid down to a few millihertz. At very low frequencies, where the assumption on $\ell_\text{th}$ is not satisfied, the expected thermoelastic noise is smaller than the adiabatic prediction~\cite{Cerdonio2001}. \subsection{Photothermal Noise} Fluctuation in laser power, either from shot noise or from classical intensity noise, induces a local temperature change in both coating and substrate. Because of the thermal expansion and thermorefractive coefficients of the mirror substrate and the coating, the temperature gradient caused by the absorbed laser power couples into the cavity's displacement noise. This is called photothermal noise. As with thermo-optic noise, the effect in the substrate is mostly thermoelastic. This noise source was first considered in a restricted regime by Braginsky et~al.~\cite{BGV1999}. The full expression for photothermal noise in a mirror substrate, valid for small beam size and low frequencies, is~\cite{Cerdonio2001} \begin{equation} \label{eq:sub_photo} S_x^{\text{(PT)}}(f) = \frac{2}{\pi^2} \frac{(1+\sigma_\text{s})^2}{\kappa_\text{s}^2} \mathcal{S}_\text{abs} K(f/f_T). \end{equation} where \begin{equation} \label{eq:sub_photo_K} K(f/f_T) = \left| \frac{1}{\pi} \int\limits_0^\infty \mathrm{d}u \int\limits_{-\infty}^\infty \mathrm{d}v\, \frac{u^2 \mathrm{e}^{-u^2 / 2}}{(u^2 + v^2) (u^2 + v^2 + if/f_\text{T})}\right|^2 \end{equation} and \begin{equation} \mathcal{S}_\text{abs} = \delta P(f) \frac{2\mathcal{F}/\pi}{ 1 + (f/f_\text{cav})^2} \chi_\text{abs}. \end{equation} $\delta P(f)$ is the input power fluctuation, $\chi_\text{abs}$ is the absorption coefficient of the mirror, and $f_\text{cav} = f_\text{FSR} / (2\mathcal{F})$ is the cavity pole frequency. The effects from the coating (both thermoelastic and thermorefractive) were later included in the work of Farsi et~al.~\cite[appendix]{Farsi2012}, who treat all the contributions from substrate and coating coherently. We do not reproduce their formulas here. The effect can be measured directly by modulating the power of the laser and observing the corresponding length change of the cavity. Generally, relative intensity noise (RIN) in a laser is much higher than its shot noise limit and causes excessive photothermal noise. This will be discussed in section \ref{sec:tech_noise}. Here, $S_x^\text{(PT)}$ is the RIN-induced photothermal noise for a single mirror of a cavity. The noise on the two mirrors is coherent, and so the total effect on the cavity is $4S_x^\text{(PT)}(f)$. We do not consider photothermal effects in the cavity spacer, since these effects only occur at frequencies below our measurement band. \subsection{Total thermal noise in cavities} Finally, we note that the length noise $S_L$ of a Fabry--P\'erot cavity involves the sum of the contributions from two mirrors and a single spacer: \begin{align} S_L &= 2 S_x^{(\text{cBr})} + 2 S_x^{(\text{cTO})} + 2 S_x^{(\text{subBr})} + 2 S_x^{(\text{subTE})} \nonumber \\ &\hspace{2em} + S_x^{(\text{spBr})} + S_x^{(\text{spTE})} + 4S_x^{(\text{PT})}. \label{eq:cavity_length_noise} \end{align} In the subsequent sections, we consider a number of technical and environmental noise sources which must be added to $S_L$ in order to arrive at the experimentally measured noise spectrum. \section{Results} \label{sec:results} The measured PSD of the beat note frequency fluctuation $S_{\hat{\nu}}(f)$ is given by the sum of the cavity length noise $S_L(f)$ from both cavities, as well as the technical frequency noises: \begin{equation} S_{\hat{\nu}} = 2\left(\frac{c}{L\lambda}\right)^2 S_L(f) + S_{\hat{\nu}}^{(\text{tech})}(f) \label{eq:length_to_beat} \end{equation} where $S_{\hat{\nu}}^{(\text{tech})}$ contains the contributions from the residual frequency noise, PLL readout noise, and seismic noise. \begin{figure*}[tbp] \centering \includegraphics[width=0.85\textwidth]{./Figures/nb_beat_long.pdf} \caption{Amplitude spectral density $\sqrt{S_{\hat{\nu}}(f)}$ of beat note from 20.3~cm cavities using one-laser setup.} \label{fig:nb_beat_long} \end{figure*} \begin{figure*}[tbp] \centering \includegraphics[width=0.85\textwidth]{./Figures/nb_beat_short.pdf} \caption{Amplitude spectral density $\sqrt{S_{\hat{\nu}}(f)}$ of beat note from two 3.7~cm cavities using the two-laser setup. In the band from 10~Hz to 1~kHz, the ASD has a $1/f^{1/2}$ slope with an amplitude consistent with coating Brownian noise.} \label{fig:nb_beat_short} \end{figure*} \begin{figure*}[tbp] \centering \includegraphics[width=0.85\textwidth]{./Figures/beat_short_resid.pdf} \caption{Measured and estimated amplitude spectral density $\sqrt{S_{\hat{\nu}}(f)}$ of beat note from 3.7\,cm cavities using the two-laser setup, along with the residual. With the exception of isolated peaks, above 10\,Hz the residual lies about a factor of two lower than the measured noise.} \label{fig:setup2_resid} \end{figure*} The beat note fluctuation of the 20.3\,cm cavities using the one-laser setup is shown in Figure~\ref{fig:nb_beat_long}. In the band from 90\,--\,300\,Hz, the beat frequency noise has an amplitude spectral density $\sqrt{S_{\hat{\nu}}(f)}$ that is approximately $\bigl(8\times10^{-2} \text{ Hz}\bigr)/f^{1/2}$, although it is heavily contaminated by peaks. Conversion into single-cavity length noise via $\sqrt{2}c/L\lambda$ gives $\sqrt{S_L(f)} = \bigl(4\times10^{-17} \text{ m}\bigr)/f^{1/2}$. The beat note fluctuation of the 3.7\,cm cavities using the two-laser setup is shown in Figure~\ref{fig:nb_beat_short}, along with all the expected noise terms. In the region from 10\,--\,1000\,Hz, the beat fluctuation has an ASD of approximately $\sqrt{S_{\hat{\nu}}(f)} = \bigl(0.5 \text{ Hz}\bigr)/f^{1/2}$, which is equvalent to $\sqrt{S_L(f)} = \bigl(5\times10^{-17} \text{ m}\bigr)/f^{1/2}$. \subsubsection{Estimate of $\phi_\text{c}$} We perform a fit for $\phi_\text{c}$ (defined in equation \ref{eq:Nakagawa_BR_coat}) in the region from 50\,--\,500\,Hz, where the measured ASD appears to be dominated by coating thermal noise. We exclude bins near 60\,Hz and its harmonics. We write the total estimated noise as $S_{\hat{\nu}}^{(\text{est})} = S_{\hat{\nu}}^{(\text{cBr})} + S_{\hat{\nu}}^{(\text{other})}$, where $S_{\hat{\nu}}^{(\text{cBr})}$ is determined from eq.~\ref{eq:Nakagawa_BR_coat}. Then we perform a least squares fit of $S_{\hat{\nu}}^{(\text{meas})} - S_{\hat{\nu}}^{(\text{other})}$ to the functional form $A f^a$, for constant $A$ and $a$. We find $A = (0.261\pm 0.015)\,\text{Hz}^2$ and $a = -1.004\pm0.011$. Then from eq.~\ref{eq:Nakagawa_BR_coat}, we find $\phi_\text{c} = (4.43\pm0.25)\times10^{-4}$. In Figure~\ref{fig:setup2_resid}, we plot the measured length noise, the total noise predicted from the noise budget, and the residual, found by performing the quadrature subtraction $\sqrt{S_{\hat{\nu}}^{(\text{resid})}} = \bigl|S_{\hat{\nu}}^{(\text{meas})} - S_{\hat{\nu}}^{(\text{est})}\bigr|^{1/2}$. With the fitted loss angle, we calculate the coating thermal noise in the 20.3\,cm cavity, and plot it on the noise budget. This is shown in Figure~\ref{fig:nb_beat_long}. The measurement and the estimate total noise are in good agreement. This is strong evidence that both measurements are dominated by coating thermal noise, since the amplitude of the PSD scales correctly with the spot size. Additionally, our fitted loss angle $\phi_\text{c}$ is in good agreement with the results of Numata et~al.~\cite{Numata2003}, who found $\phi_\text{c} = 4\times10^{-4}$. Finally, the shape of the beat note ASD for our two-cavity measurement is close to $f^{-1/2}$, as predicted by Eq.~\ref{eq:Nakagawa_BR_coat}. \begin{figure*} \begin{subfigure}[tbp]{0.33\textwidth} \includegraphics[width=\textwidth]{Figures/prior_harry.pdf} \caption{Prior PDF, after Harry et~al.~\cite{Harry2002}} \label{fig:prior} \end{subfigure} \begin{subfigure}[tbp]{0.33\textwidth} \includegraphics[width=\textwidth]{Figures/likelihood.pdf} \caption{Likelihood} \label{fig:likelihood} \end{subfigure} \begin{subfigure}[tbp]{0.33\textwidth} \includegraphics[width=\textwidth]{Figures/posterior_joint_harry.pdf} \caption{Posterior PDF} \label{fig:posterior} \end{subfigure} \caption{Prior PDF, likelihood, and posterior PDF used for Bayesian estimation of the loss angles of silica and tantala.} \label{fig:bayesian} \end{figure*} \begin{figure} \centering \includegraphics[width=0.45\textwidth]{Figures/marginalized_posteriors.pdf} \caption{Marginalized posterior PDFs for $\phi_\text{L}$ (silica) and $\phi_\text{H}$ (tantala), with shaded regions demarcating the 16th, 50th, and 84th percentiles.} \label{fig:marginalized} \end{figure} \subsubsection{Estimate of $\phi_\text{L}$ and $\phi_\text{H}$} Given $\phi_\text{c}$, a knowledge of the parameters of our coatings, and prior observations of coating loss angles, we can make a Bayesian estimate of $\phi_\text{L}$ and $\phi_\text{H}$. To do this, we first write down a formula relating $\phi_\text{c}$, $\phi_\text{L}$ and $\phi_\text{H}$: \begin{equation} \mathcal{M} \phi_\text{c} = \Xi_\text{L} N_\text{L} d_\text{L} \phi_\text{L} + \Xi_\text{H} N_\text{H} d_\text{H} \phi_\text{H}. \label{eq:numata_hong_relation} \end{equation} Here $\mathcal{M} = (1 + \sigma_\text{s})(1 - 2\sigma_\text{s}) d / E_\text{s}$, $N_\text{L} = 15$, $N_\text{H} = 14$, $d_\text{L} = \lambda/4n_\text{L}$, and $d_\text{H} = \lambda/4n_\text{H}$. The coefficients $\Xi_\text{L}$ and $\Xi_\text{H}$ are found by combining Table~1, and Eqs.~94 and 96 from Hong et~al.~\cite{Hong2013}, assuming zero light penetration into the coating \footnote{For silica/tanala QWL coatings, most of the light penetrates only into the first few doublets}. These coefficients depend only on the coating parameters. Next we write down Bayes's theorem~\cite{vonToussaint2011}: \begin{equation} p(\phi_\text{L}, \phi_\text{H} | \hat{\phi}_\text{c}) = \ \frac{1}{Z}~\mathcal{L}(\phi_\text{L}, \phi_\text{H} | \hat{\phi}_\text{c})~p(\phi_\text{L}, \phi_\text{H}), \label{eq:bayes} \end{equation} where $Z$ is a normalization. As a prior, we use data from the ringdown measurements in Harry et~al.~\cite{Harry2002}, since these measurements were performed on coatings from the same manufacturer as in our experiment, and were made during a similar time period. Since Harry~et~al. performed a ringdown measurement, their quoted quantity $\phi_\parallel$ is distinct from $\phi_\text{c}$, and is related to the material loss angles $\phi_\text{L}$ and $\phi_\text{H}$ via \begin{equation} (E_\text{L} d_\text{L} + E_\text{H} d_\text{H}) \phi_\parallel = E_\text{L} d_\text{L} \phi_\text{L} + E_\text{H} d_\text{H} \phi_\text{H}. \label{eq:phipara} \end{equation} We use $\hat{\phi}_\parallel \pm \sigma_{\hat{\phi}_\parallel} = (5.2 \pm 0.8)\times10^{-4}$ as the value measured by Harry et~al.~\footnote{Harry originally determined $\hat{\phi}_\parallel \pm \sigma_{\hat{\phi}_\parallel} = (1.0 \pm 0.3) \times 10^{-4}$ using a coating thickness that was 5 times the actual value. Taking into account the correction given in Penn et~al.~\cite{Penn2003}, we reanalyze Harry's ringdown data to arrive at arrive at $\hat{\phi}_\parallel \pm \sigma_{\hat{\phi}_\parallel} = (5.2\pm0.8)\times10^{-4}$.}. We then construct the prior \begin{equation} p(\phi_\text{L}, \phi_\text{H}) = \frac{1}{Z_0}\exp\left[-\frac{1}{2} \frac{(\hat{\phi}_\parallel - \phi_\parallel)^2}{\sigma_{\hat{\phi}_\parallel}^2 + \sigma_{\phi_\parallel}^2}\right], \label{eq:prior} \end{equation} where $Z_0$ is a normalization, $\phi_\parallel$ is related to $\phi_\text{L}$ and $\phi_\text{H}$ via eq.~\ref{eq:phipara}, and $\sigma_{\phi_\parallel}$ is found by propagating forward the uncertainties on the material parameters as given in our Table~\ref{tab:cavity_params}. As a likelihood we take \begin{equation} \mathcal{L}(\phi_\text{L}, \phi_\text{H} | \hat{\phi}_\text{c}) = \exp\left[-\frac{1}{2} \frac{(\hat{\phi}_\text{c} - \phi_\text{c})^2}{\sigma_{\hat{\phi}_\text{c}}^2 + \sigma_{\phi_\text{c}}^2}\right] \label{eq:likelihood} \end{equation} with $\hat{\phi}_c$ given by our measurement, and $\phi_c$ given by Equation~\ref{eq:numata_hong_relation}. The prior, the likelihood, and the resulting posterior are shown in Figures~\ref{fig:prior}--\ref{fig:posterior}. In Figure~\ref{fig:marginalized}, we show the marginalized posterior PDFs for each loss angle. For silica, we find the maximum \emph{a posteriori} (MAP) estimate for the loss angle $\phi_\text{L}$ is $1.1\times10^{-4}$, and the values for the 16th, 50th, and 84th percentiles are $0.7\times10^{-4}$, $2.2\times10^{-4}$, and $4.1\times10^{-4}$, respectively. Likewise, for tantala, the MAP estimate for the loss angle $\phi_\text{H}$ is $7.8\times10^{-4}$, and the 16th, 50th, and 84th percentile values are $4.9\times10^{-4}$, $7.2\times10^{-4}$, and $9.2\times10^{-4}$, respectively. The median (50th percentile) estimates for $\phi_\text{L}$ and $\phi_\text{H}$ are in agreement with the values that result from treating eqs.~\ref{eq:numata_hong_relation} and \ref{eq:phipara} as a system of two equations in two unknowns and solving for $\phi_\text{L}$ and $\phi_\text{H}$ (and propagating uncertainties accordingly); the results are $(2.0\pm2.2)\times10^{-4}$ and $(7.4\pm2.7)\times10^{-4}$, respectively. \section{Theory of Thermal Noise} \label{sec:theory} In this section we describe the fluctuation-dissipation theorem and its use in calculating thermal noise. \subsection{Fluctuation-Dissipation Theorem} Analysis of thermal noise begins with the fluctuation-dissipation theorem (FDT)~\cite{Callen1952}, which states that the more heavily damped a system is when driven by an external force, the noisier it is when sitting in its quiescent state. The single-sided PSD of the system's generalized displacement $x(t)$ is given by \begin{equation} S_x(f) = \frac{k_\text{B} T}{\pi^2 f^2} \bigl|\real[Y(f)]\bigr|, \label{eq:fdt} \end{equation} where $Y = 1 / Z$ is the mechanical admittance. We define the system's mechanical impedance $Z$ as the complex frequency-domain response $F(f)/\dot{x}(f)$, where $F$ is the generalized force conjugate to $x$~\cite{Saulson1990}. In considering the Brownian noise of a LIGO mirror, Saulson~\cite{Saulson1990} found an expression for $S_x(f)$ by computing $\bigl|\real[Y(f)]\bigr|$ separately for each of the normal modes contributing to the strain of the mirror. However, this method is computationally expensive, and the result is not guaranteed to converge \cite{Levin1998}. Instead of using modal expansion, one can use the so-called ``direct approach'' to compute $\bigl|\real[Y(f)]\bigr|$. This was introduced by Gonz\'{a}lez and Saulson~\cite{Gonzalez1994} for computing thermal noise in suspensions, and was later applied to a laser mirror by Levin~\cite{Levin1998}. In this approach, one calculates the thermal noise by applying a cyclic force, which causes power dissipation in a lossy system. With the FDT, the dissipated power $W_\text{diss}$ and the PSD $S_x$ are related by \begin{equation} \label{eq:FDT2} S_x(f) = \frac{2 k_B T}{\pi^2 f^2} \frac{W_\text{diss}}{F_0^2}, \end{equation} where $F_0$ is the magnitude of the applied force used to calculate the dissipated power. In the case of a mirror whose position is interrogated by a laser beam, the cyclic ``force'' applied is a pressure with the same profile as the intensity of the beam. \subsection{Types of Thermal Noise} There are two known sources of thermal noise present in extended solid systems: mechanical loss and thermal dissipation. Mechanical loss is responsible for \emph{Brownian noise}. Thermal dissipation leads to temperature fluctuation, which in an optical system is converted to position fluctuation via the optic's coefficient of thermal expansion (CTE) $\alpha = (1/L)\partial L/\partial T$ and its thermorefractive coefficient $\beta = \partial n/\partial T$. The noise of this position fluctuation is called \emph{thermo-optic noise}. \subsubsection{Brownian noise} Mechanical loss arises from the microscopic structure of a material, such as impurities or dislocations. It is represented by introducing an imaginary part to the Young's modulus of the material: $E = E_0(1+i\phi)$. The quantity $\phi$ is referred to as the \emph{loss angle}, and in general may have a frequency dependence. When a sinusoidal force is applied to a system with mechanical loss, the dissipated power due to the applied force is \begin{equation} \label{eq:elasticU} W_\text{diss} = 2\pi f U_0 \phi, \end{equation} where $U_0$ is the maximum energy of elastic deformation~\cite{Levin1998}. If one is interested only in frequencies $f$ below the first mechanical resonance frequency of the system (as is the case with our reference cavities), it is sufficient to compute the stored energy $U_0$ in the presence of a static force. The problem of evaluating $W_\text{diss}$ then reduces to a single elastostatics computation, which can be carried out using finite-element analysis (FEA) if necessary. Together with eq.~\ref{eq:FDT2}, one can then calculate the Brownian contribution to the apparent position fluctuation of the mirror as sensed by a laser beam interrogating the mirror surface. \subsubsection{Thermo-optic noise} In contrast to Brownian noise, thermo-optic noise is related to thermal, rather than mechanical, dissipation; it arises from fluctuation in the temperature field $T(\mathbf{r},t)$ throughout the mirror~\cite{Zener1938}. To compute thermo-optic noise using the direct approach, one can apply either an imaginary force~\cite{Liu2000, Somiya2009} or imaginary heat~\cite{Levin2008, Evans2008} to the mirror's surface; the results will be the same if the stress inside the coating is uniform~\cite{Somiya2009}. The applied force will cause temperature gradients inside the mirror through the equation of static stress balance. Then, the temperature perturbation evolves according to the thermal diffusion equation (see, e.g., the treatment by Liu and Thorne~\cite{Liu2000} or Cerdonio et~al.~\cite{Cerdonio2001}). Finally, the power dissipation due to the heat flow caused by the temperature gradient is given by the expression \cite[eq.~35.1]{L&L} \begin{equation} \label{eq:LL} W_\text{diss} = \left\langle T\frac{dS}{dt} \right\rangle = \left\langle \int \frac{\kappa}{T} (\boldsymbol{\nabla} \delta T)^2 \mathrm{d}^3 r \right\rangle. \end{equation} Here $T$ is the unperturbed temperature of the system and $\delta T$ is the temperature perturbation due to the applied force $F_0$. The entropy $S$ of the system changes due to the heat flux $-\kappa \boldsymbol{\nabla}(\delta T)$, and $\langle \cdots \rangle$ denotes an average over the period of oscillation of the force. By substituting eq.~\ref{eq:LL} into eq.~\ref{eq:FDT2}, we can obtain the temperature fluctuation on the mirror sensed by a Gaussian laser beam. This fluctuation couples into the electromagnetic response of the mirror via the CTE and $\partial n/\partial T$. In the literature, the term ``thermoelastic noise'' refers to the effect from the change in position of the mirror surface due to thermal expansion of a substate and coating \cite{BGV1999, Liu2000, Cerdonio2001, Fejer2004}. On the other hand, ``thermorefractive noise'' refers to the phase fluctuation of the beam as it propagates through or reflects off the mirror, and it is a combined effect of both the CTE and $\partial n/\partial T$. For a Fabry--P\'{e}rot cavity with mirrors fabricated from multilayer dielectric coatings, thermorefractive noise in the substrate is much smaller than that in the coating~\cite{Heinert2011}: the beam passes through each substrate only once, but it reflects off the multilayer coating multiple times as it circulates inside the cavity. Thus, for our experiment, we take thermorefractive noise into account only in the coating. Since both thermoelastic and thermorefractive noises have a common origin, they are computed in a coherent fashion and the combined effect is called thermo-optic noise~\cite{Evans2008}. For substrates and spacers, only thermoelastic noise will be considered. \begin{comment} In contrast to Brownian noise, thermo-optic noise is thermodynamical in origin. It arises from fluctuation in the temperature field $T(\mathbf{r},t)$ throughout the solid~\cite{BGV1999}. This fluctuation couples into the electromagnetic response of the solid in two ways: through the coefficient of thermal expansion (CTE) $\alpha = (1/L)\partial L/\partial T$, and through the temperature coefficient $\beta = \partial n/\partial T$ of the refractive index For Fabry--P\'{e}rot cavities and similar systems, thermorefractive noise is expected to be an important source of noise only in the mirror coatings, where the transmissivity is a sensitive function of the refractive indices of the quarter-wavelength layers. Typically (\checkme{?}), the thermoelastic and thermorefractive contributions to $S_x$ are computed independently, with no consideration for possible interaction between the two. However, these two effects can be treated in coherent fashion, in which case one anticipates that some cancation occurs (see, e.g., the work of Evans et~al. \cite{Evans2008}). In computing thermo-optic noise, the FDT is not applied by computing fluctuations in $x$ in response to the force $F$; rather, one takes as a generalized coordinate the mirror's temperature field, $T$, and the corresponding generalized force is the entropy, $S$. The FDT is used to compute the spectrum $S_T(f)$ of the mirror's temperature, which is then propagated forward to the readout variable $x$ via $\alpha$ and $\beta$, as described by Evans et~al.~\cite{Evans2008}. \end{comment}
1,108,101,565,821
arxiv
\section{Introduction} This paper is about the link between automorphic forms and infinite dimensional algebras. It is primarily an exposition of joint work \cite{HVEaccepted} of the author with R. Heluani, which specifically relates certain automorphic forms on the group $SL_2(\Z) \ltimes \Z^2$ called Jacobi forms to vertex algebras equipped with an $N=2$ structure. Being of an expository nature, we have taken the opportunity to make some digressions; in particular to discuss an interpretation (Section \ref{Section.Ramanujan}) of Ramanujan's differential equations as an expression of the `Virasoro uniformisation' of the moduli space of elliptic curves with local coordinate. The first instance of the aforementioned link was uncovered by Kac and Peterson \cite{KP84}, who used the Weyl-Kac character formula to express characters of integrable modules over affine Kac-Moody algebras in terms of theta functions. Another perspective was later adopted by Zhu \cite{Zhu96}, who proved that characters of suitable conformal vertex algebras are classical modular forms on the group $SL_2(\Z)$. Zhu proceeded by analysing $\CD$-modules associated with the vertex algebra on families of elliptic curves, establishing in particular a certain $SL_2(\Z)$-equivariance. We study vertex algebras which admit the richer structure of $N=2$ superconformal symmetry. These give rise to $\CD$-modules on families of elliptic supercurves, and we show these $\CD$-modules to be equivariant under a certain $SL_2(\Z) \ltimes \Z^2$-action. In referring to the $N=2$ superconformal symmetry algebra, whose origin lies in theoretical physics, we mean the Lie superalgebra $\widehat{W}^{1|1}$ with explicit basis and relations as given in Section \ref{Section.SUSY.VA}. To it there is an associated family $L(\widehat{W}^{1|1})_c$ of simple vertex algebras (see Example \ref{Example.SUSY}) depending on an auxiliary parameter $c$ called the central charge. For generic $c$ the representation theory of $L(\widehat{W}^{1|1})_c$ is rather complicated. However for $c(u) = 3 - 6/u$, where $u \in \Z_{\geq 2}$, it turns out that $L(\widehat{W}^{1|1})_{c(u)}$ has precisely $u(u-1)/2$ irreducible modules $L_u(j, k)$, parameterised by the set of pairs $(j, k) \in \Z^2$ where $j \geq 0$, $k \geq 1$ and $j+k < u$. There is an explicit formula for the graded superdimensions of these modules too \cite{Matsuo} \cite{KRW03}, viz. \begin{align}\label{N2.char.explicit} \str_{L_u(j, k)} q^{L_0} y^{J_0} = q^{\frac{jk}{u}} y^{\frac{j-k+1}{u}} P_{j, k}^{(u)} / P_{1/2, 1/2}^{(2)}, \end{align} where \begin{align*} P_{j, k}^{(u)} = \prod_{n=1}^\infty \frac{(1-q^{u(n-1)+j+k})(1-q^{un-j-k})(1-q^{un})^2}{(1-q^{un-j}y)(1-q^{u(n-1)+j}y^{-1})(1-q^{un-k}y^{-1})(1-q^{u(n-1)+k}y)}. \end{align*} Now the normalised functions $y^{c(u)/6} \str_{L_u(j, k)} q^{L_0} y^{J_0}$ span a vector space which turns out to be invariant under an action of the group $SL_2(\Z) \ltimes \Z^2$ (specifically the weight $0$ index $c/6$ Jacobi action (\ref{Jacobi.action})). In other words the span of the normalised graded superdimensions is a vector valued Jacobi form. The question is to explain this fact conceptually. In \cite{HVEaccepted} we showed that the picture outlined above relates to a general phenomenon: for any vertex algebra equipped with an `$N=2$ superconformal structure' (of which $L(\widehat{W}^{1|1})_c$ above is an example) the normalised graded superdimensions satisfy $SL_2(\Z) \ltimes \Z^2$-invariant differential equations. The key observations are as follows. \begin{enumerate} \item Jacobi forms are essentially sections of vector bundles on the moduli space of pairs $(E, \LL)$, where $E$ is an elliptic curve and $\LL$ a holomorphic line bundle on $E$. \item Such pairs can be reinterpreted as certain special $1|1$-dimensional supercurves. \item A vertex algebra $V$ equipped with a suitable $N=2$ superconformal structure `localises' nicely to give a $\CD$-module $\CC$ (known as `conformal blocks') on the moduli space of such supercurves. \item If $V$ is well-behaved, the normalised graded superdimensions of $V$-modules (generalising the left of (\ref{N2.char.explicit})) converge in the analytic topology and yield horizontal sections of $\CC$. \end{enumerate} The issue of convergence is technical. In the appendix to \cite{HVEaccepted} we establish the convergence subject to the well known (to vertex algebraists) condition of $C_2$-cofiniteness. The proof involves analysis of the coefficients of the differential equations corresponding to $\CC$. The key points are to show that these coefficients lie in a certain ring of quasi-Jacobi forms, and to establish that this ring is Noetherian. For careful statements of results and complete proofs we refer the reader to \cite{HVEaccepted}. \emph{Acknowledgements.} I would like to thank the organisers of the 2014 intensive period `Perspectives in Lie Theory' at CRM Ennio De Giorgi, where this work was presented, and CAPES-Brazil for financial support. \section{Notation} In addition to standard symbols such as $\C$, $\Z_+ = \{0, 1, 2, \ldots\}$, $\partial_z = \frac{\partial}{\partial z}$, etc., we shall use the following notation without further comment: $\OO = \C[[z]]$ the ring of formal power series in one variable, $\mathfrak{m} = z\C[[z]]$ its maximal ideal, and $\CK = \C((z))$ the ring of Laurent series. The supercommutative algebra $\OO^{1|1}$ is by definition $\OO \otimes \bigwedge[\theta]$, i.e., is obtained by adjoining to $\OO$ a single odd variable $\theta$ satisfying $\theta^2=0$. Similarly we have $\mathfrak{m}^{1|1} = \mathfrak{m} \otimes \bigwedge[\theta]$ and $\CK^{1|1} = \CK \otimes \bigwedge[\theta]$. The structure sheaf, tangent sheaf, cotangent sheaf, and sheaf of differential operators of a (super)scheme $X$ are denoted $\OO_X$, $\Theta_X$, $\Om_X$, and $\CD_X$, respectively. \section{Superschemes and Elliptic Supercurves}\label{Section.Curves} The picture to keep in mind of a complex supermanifold (of dimension $m|n$) is that of a space on which the Taylor expansion of a function in terms of local coordinates $z_1, \ldots, z_m$, $\theta_1, \ldots, \theta_n$ lies in the supercommutative ring $\C[[z_i]] \otimes \bigwedge[\theta_j]$. We refer the reader to {\cite{Manin.Gauge}} for background on superalgebra and supergeometry. A superscheme is formally defined {\cite[Chapter 4]{Manin.Gauge}} to be a topological space $X_\text{top}$ together with a sheaf $\OO_X$ of supercommutative local rings, such that the even part $(X_\text{top}, \OO_{X, 0})$ is a scheme. Morphisms are required to be $\Z/2\Z$-graded. The bulk $X_\text{rd}$ of a superscheme $X$ is the scheme $(X_\text{top}, \OO_X / \mathcal{J})$ where $\mathcal{J} = \OO_{X, 1} + \OO_{X, 1}^2$. A (complex) supercurve is a smooth superscheme over $\spec{\C}$ of dimension $1|n$. In this article we shall concern ourselves with $1|1$-dimensional complex supercurves, and we shall generally work in the analytic topology. Let $X_0$ be a smooth curve and $\LL$ a holomorphic line bundle on $X_0$. We may construct a $1|1$-dimensional supercurve $X$ from this data by putting \[ \OO_X = \bigwedge \LL[-1] = \OO_{X_0} \oplus \LL, \] with $\Z/2\Z$-grading induced by cohomological degree. Any even family of $1|1$-dimensional complex supercurves is of the above form. Indeed, for a $1|1$-dimensional supercurve defined over a base superscheme $\spec{R}$, transformations between coordinate charts take the general form \begin{align}\label{gen.11.xform} \begin{split} z' &= f_{11}(z) + f_{12}(z) \theta, \\ \theta' &= f_{21}(z) + f_{22}(z) \theta, \end{split} \end{align} where $f_{11}$, $f_{22}$ are power series whose coefficients are even elements of $R$, and $f_{12}$, $f_{21}$ are power series whose coefficients are odd elements of $R$. If the base ring $R$ contains no odd elements then $f_{12}$ and $f_{21}$ vanish, (\ref{gen.11.xform}) is linear in $\theta$ and comprises the \v{C}ech cocycle description of a line bundle $\LL$, and the supercurve is consequently of the form $\bigwedge{\LL[-1]}$. Recall the set $\Pic(X)$ of isomorphism classes of holomorphic line bundles on a smooth curve $X$, and its subset $\Pic_0(X)$ of line bundles of degree $0$. As is well known {\cite[Appendix B.5]{Hartshorne}} there is a natural bijection $\Pic(X) \cong H^1(X, \OO_X^*)$, and the exponential exact sequence \[ 0 \rightarrow \underline{\Z} \rightarrow \OO_X \rightarrow \OO_X^* \rightarrow 0 \] yields the following morphisms in cohomology \[ H^1(X, \underline{\Z}) \rightarrow H^1(X, \OO_X) \rightarrow H^1(X, \OO_X^*) \rightarrow H^2(X, \underline{Z}). \] The last map here assigns a line bundle its degree, and the kernel $\Pic_0(X)$ is identified with the quotient \[ H^1(X, \OO_X) / H^1(X, \underline{\Z}) \] which is a complex torus of dimension $g$, where $g$ is the genus of $X$. An elliptic curve is a smooth complex curve of genus $1$, together with a marked point. We shall define an elliptic supercurve to be a supercurve $X$ of dimension $1|1$ whose bulk $X_\text{rd}$ has genus $1$, together with a marked point. Let $\HH$ denote the complex upper half plane, and let $z$ be the standard coordinate on $\C$ which we fix once and for all. The trivial family $\HH \times \C \rightarrow \HH$ carries the action $(m, n) : (z, \tau) \mapsto (z+m\tau+n, \tau)$ of $\Z^2$ and the quotient together with marked point $z=0$ is a family of elliptic curves, which we denote $E \rightarrow \HH$. Quite generally {\cite[Appendix to \S 2]{Mumford.abelian.var}}, for $X$ a topological space with a free discontinuous action of a discrete group $G$, and $\CF$ a sheaf on the quotient space $X/G$ (and with $\pi : X \rightarrow X/G$ the quotient), there is a natural map \[ H^\bullet(G, \G(X, \pi^* \CF)) \rightarrow H^\bullet(X/G, \CF), \] from group cohomology to sheaf cohomology. In case $X$ is a fibre $\C_\tau$ of the trivial family above, this map is an isomorphism. An element $\al \in \C$ defines a group $1$-cocycle $c_\al : \Z^2 \rightarrow \G(\C_\tau, \OO^*)$ by $(m, n) \mapsto e^{2\pi i m \al}$. We denote by $\LL_\al \in \Pic_0(E_\tau)$ the corresponding line bundle on $E_\tau$. Let $S^\circ = \HH \times \C$. We denote by $E^\circ \rightarrow S^\circ$ the family whose fibre over $(\tau, \al)$ is the elliptic supercurve corresponding to $(E_\tau, \LL_\al)$. The group $SL_2(\Z)$ acts on $E \rightarrow \HH$ in such a way as to identify fibres isomorphic as elliptic curves. We now have the following $1|1$-dimensional analogue. \begin{prop}\label{Jacobi.action.on.family} The formulas \begin{align*} A : (t, \zeta, \tau, \al) &\mapsto \left( \frac{t}{c\tau+d}, e^{-2\pi i t \frac{c\al}{c\tau+d}}\zeta, \frac{a\tau+b}{c\tau+d}, \frac{\al}{c\tau+d} \right) \\ (m, n) : (t, \zeta, \tau, \al) &\mapsto (t, e^{2\pi i m t} \zeta, \tau, \al+m\tau+n), \end{align*} where $A \in SL_2(\Z)$ and $m, n \in \Z$, extend to a left action on $E^\circ \rightarrow S^\circ$ of the semidirect product group \[ SL_2(\Z) \ltimes \Z^2 \quad \text{where} \quad (A, x) \cdot (A', x') = (AA', xA' + x'). \] The restriction of the action of $g \in SL_2(\Z) \ltimes \Z^2$ to the fibre $E_{(\tau, \al)}$ is an isomorphism $E_{(\tau, \al)} \cong E_{g \cdot (\tau, \al)}$ of supercurves. \end{prop} Every elliptic supercurve associated to an elliptic curve $E$ and its degree $0$ line bundle $\LL$, appears as a fibre of $E^\circ \rightarrow S^\circ$. However $E^\circ \rightarrow S^\circ$ is not a universal family in the sense that it does not `see' families over odd base schemes. We denote by $\mathbb{A}^{1|1}$ the superscheme whose set of $R$-points is $\spec{R[z, \theta]}$. In fact we distort convention a little by fixing a choice $z, \theta$ of coordinates, in particular our $\mathbb{A}^{1|1}$ has a distinguished origin, and we denote by $(\mathbb{A}^{1|1})^\times$ the subscheme with this origin removed. We then have the algebraic supergroup $GL(1|1)$ of linear automorphisms acting on $(\bA^{1|1})^\times$. The trivial family $(\bA^{1|1})^\times \times GL(1|1) \rightarrow GL(1|1)$ carries the action $n : (x, \mathbf{q}) \mapsto (\mathbf{q}^n x, \mathbf{q})$ of $\Z$. We restrict to the subscheme $S^\bullet \subset GL(1|1)$ consisting of automorphisms with nonzero even reduction, then the quotient by $\Z$ is a family $E^\bullet \rightarrow S^\bullet$ of elliptic supercurves. The distinguished point is $(z, \theta) = (1, 0)$. We introduce the morphism $\sexp : E^\circ \rightarrow E^\bullet(\C)$ of $\C$-schemes defined by \begin{align}\label{sex.defined} (t, \zeta, \tau, \al) \mapsto \left( e^{2\pi i t}, e^{2\pi i t} \zeta, \twobytwo{q}{0}{0}{qy} \right). \end{align} The notation $q = e^{2\pi i \tau}$, $y = e^{2\pi i \al}$ used here will be in force throughout the paper. \begin{rem} There is a quite distinct notion of supercurve, which we recall here for the sake of avoiding confusion. A $\text{SUSY}_n$ curve {\cite[Chapter 2, Definition 1.10]{Manin.noncomm}} consists of a $1|n$-dimensional supercurve $X$ together with the extra data of a rank $0|n$ subbundle $T \subset \Theta_X$ such that the alternating form \[ \varphi : T \otimes T \xrightarrow{[\cdot, \cdot]} \Theta_X \longrightarrow \Theta_X / T \] is nondegenerate and split. There is a forgetful functor from the category of $\text{SUSY}_n$ curves to that of $1|n$-dimensional supercurves. On the other hand, there turns out to be a nontrivial \emph{equivalence} (due to Deligne {\cite[pp. 47]{Manin.noncomm}}) between the category of all $1|1$-dimensional supercurves, and the category of `orientable' $\text{SUSY}_2$ curves. We describe the correspondence briefly. Let $(X, T)$ be a $\text{SUSY}_2$ curve. Locally there is a splitting of $T$ as a direct sum of rank $0|1$-subbundles, each isotropic with respect to $\varphi$. If this can be extended to a global splitting, then we say $(X, T)$ is orientable. Suppose this is the case, and let $T_1 \subset T$ be an isotropic subbundle. Set $\ov{X}$ to be the superscheme $(X_\text{top}, \OO_X / T_1 \cdot \OO_X$). Then $\ov{X}$ is a $1|1$-dimensional supermanifold, and $X$ can be recovered uniquely from $\ov{X}$. Much of the theory discussed below extends straightforwardly to $1|n$-dimensional supercurves, and to $\text{SUSY}_n$ curves. \end{rem} \section{The Bundle of Coordinates} In this section and the two subsequent ones we outline the basics of `formal geometry'. This theory, which goes back to \cite{GeK}, provides a bridge between representation theory of infinite dimensional algebras and geometry of algebraic varieties. The book \cite{FBZ} contains a good introduction for the case of curves. We focus on the case of $1|1$-dimensional supercurves. The basic object of formal geometry is the `set of all coordinates' on a variety $X$, denoted here by $\coord_X$. It may be defined precisely either as the subscheme of the jet scheme \cite{EM05} consisting of jets with nonzero differential, or as the fibre bundle with fibre at $x \in X$ the set of choices of generator of $\mathfrak{m}_x$ (where $\mathfrak{m}_x$ is the unique maximal ideal of the local ring $\OO_x$ at $x$). For the case of $X$ a supercurve of dimension $1|1$ we have the noncanonical isomorphism $\OO_x \cong \OO^{1|1}$ at each point $x \in X$. Each fibre therefore carries a simply transitive action of the supergroup $\aut{\OO^{1|1}}$ by changes of coordinates, in other words $\coord_X$ is a principal $\aut{\OO^{1|1}}$-bundle. This supergroup consists of transformations \begin{align*} z &\mapsto a_{0, 1} \theta + a_{1, 0} z + a_{1, 1} z \theta + a_{2, 0} z^2 + a_{2, 1} z^2 \theta + \cdots, \\ \theta &\mapsto b_{0, 1} \theta + b_{1, 0} z + b_{1, 1} z \theta + b_{2, 0} z^2 + b_{2, 1} z^2 \theta + \cdots \end{align*} where $\twobytwo{a_{0, 1}}{a_{1, 0}}{b_{0, 1}}{b_{1, 0}} \in GL(1|1)$. As such the corresponding Lie superalgebra $\der_0{\OO^{1|1}}$ of derivations preserving $\mathfrak{m}^{1|1}$ has basis \begin{equation} \label{n2.plus.basis} \begin{aligned} L_n &= -z^{n+1}\partial_z - (n+1)z^n \theta \partial_\theta, & J_n &= -z^n \theta \partial_\theta, \\ Q_n &= -z^{n+1} \partial_\theta, & H_n &= z^n \theta \partial_\theta, \end{aligned} \end{equation} where $n \in \Z_+$. The Lie bracket is the usual bracket of vector fields \cite{H07}. \section{Superconformal Algebras and SUSY Vertex Algebras}\label{Section.SUSY.VA} The Lie superalgebra $\der_0{\OO^{1|1}}$ embeds into $\der{\CK^{1|1}}$, and we obtain a basis of the latter by extending (\ref{n2.plus.basis}) to $n \in \Z$. This algebra admits a central extension \[ 0 \rightarrow \C C \rightarrow \widehat{W}^{1|1} \rightarrow \der \CK^{1|1} \rightarrow 0, \] which splits over $\der_0{\OO^{1|1}}$. Several distinct bases of $\widehat{W}^{1|1}$ appear in the literature. Relative to our choice the explicit relations are as follows {\cite[(2.5.1c)]{HK07}}. \begin{align*} [L_m, L_n] &= (m-n) L_{m+n}, & [L_m, J_n] &= -n J_{m+n} + \delta_{m, -n} \frac{m^2+m}{6} C, & \\ [L_m, H_n] &= -nH_{m+n}, & [L_m, Q_n] &= (m-n)Q_{m+n}, & \\ [J_m, J_n] &= \delta_{m, -n} \frac{m}{3} C, & [J_m, Q_n] &= Q_{m+n}, & \\ [J_m, H_n] &= -H_{m+n}, & [H_m, Q_n] &= L_{m+n} - m J_{m+n} + \delta_{m, -n} \frac{m^2-m}{6} C. \end{align*} \begin{rem} In the $1|0$-dimensional setting we have the analogous Virasoro extension \[ 0 \rightarrow \C C \rightarrow \vir \rightarrow \der{\CK} \rightarrow 0, \] with relations \[ [L_m, L_n] = (m-n) L_{m+n} + \delta_{m, -n} \frac{m^3-m}{12}C. \] The naive map $L_n \mapsto L_n$, $C \mapsto C$ is not an embedding of Lie algebras $\vir \hookrightarrow \widehat{W}^{1|1}$, but the map $L_n \mapsto L_n - \frac{1}{2}(n+1)J_n$, $C \mapsto C$ is. \end{rem} Though we will not be using vertex algebras until Section \ref{section.cb}, this is a convenient place to give their definition. To avoid clutter we present only the definition of `$N_W=1$ SUSY vertex algebra', which is the variant relevant for us. See \cite{KacVA} and {\cite{HK07}} for the general picture. \begin{defn}\label{def.SUSY.va} An $N_W=1$ SUSY vertex algebra is a vector superspace $V$, a vector $\vac \in V$, linear operators $S, T : V \rightarrow V$, and an even linear map $V \otimes V \rightarrow V \widehat{\otimes} \CK^{1|1}$ which is denoted \[ a \otimes b \mapsto Y(a, Z)b = Y(a, z, \theta)b \] These structures are to satisfy the following axioms. \begin{enumerate} \item $Y(\vac, Z) = \text{Id}_V$, and $Y(a, Z)\vac = a \bmod{(V \widehat{\otimes} \mathfrak{m}^{1|1})}$. \item The series \[ Y(a, Z) Y(b, W)c, \quad (-1)^{p(a)p(b)} Y(b, W) Y(a, Z)c, \quad \text{and} \quad Y(Y(a, Z-W)b, W)c \] are expansions of a single element of $V \widehat{\otimes} \CK^{1|1} \otimes_{\C[z, w]} \C[(z-w)^{-1}]$. \item $[T, Y(a, Z)] = \partial_z Y(a, Z)$ and $[S, Y(a, Z)] = \partial_\theta Y(a, Z)$. \end{enumerate} \end{defn} The notion of conformal structure (i.e., compatible $\vir$-action) on a vertex algebra permits connection with the geometry of algebraic curves via formal geometry {\cite[Chapter 6]{FBZ}}. Similarly important in the context of $1|1$-dimensional supercurves is the notion of superconformal structure on a SUSY vertex algebra \cite{H07}. \begin{defn}[{\cite{HK07}}]\label{def.superconf} A superconformal structure on the $N_W=1$ SUSY vertex algebra $V$ is a pair of vectors $j$ and $h$ (even and odd respectively) such that the following associations furnish $V$ with a $\widehat{W}^{1|1}$-module structure: \begin{align*} Y(j, Z) = J(z) - \theta Q(z), \quad Y(h, Z) = H(z) + \theta [L(z) + \partial_z J(z)], \end{align*} and \begin{align*} J(z) &= \sum_{n \in \Z} J_n z^{-n-1}, & Q(z) &= \sum_{n \in \Z} Q_n z^{-n-2}, \\ H(z) &= \sum_{n \in \Z} H_n z^{-n-1}, & L(z) &= \sum_{n \in \Z} L_n z^{-n-2}. \end{align*} It is further required that $T = L_{-1}$, $S = Q_{-1}$, and that $V$ be graded by finite dimensional eigenspaces of $L_0, J_0$, with integral eigenvalues bounded below. For the vector $b \in V$ satisfying $L_0b = \Delta b$, we write $o(b) \in \en{V}$ for the $z^{-\Delta}\theta$ coefficient of $Y(b, z, \theta)$. \end{defn} There is a natural notion of module over a SUSY vertex algebra. In the superconformal case we include in the definition $L_0$- and $J_0$-grading conditions analogous to those that appear in Definition \ref{def.superconf}. \begin{exmp}\label{Example.SUSY} Let $M(h, m, c)$ denote the Verma module $U(\widehat{W}^{1|1}) \otimes_{U(\widehat{W}^{1|1}_+)} \C{v}$, where the action on $v$ is by $C = c$, $L_0 = h$, $J_0 = m$, $Q_0 = 0$, and all positive modes act by $0$. Let $L(h, m, c)$ denote the unique irreducible quotient of $M(h, m, c)$. Then $M(0, 0, c)$ and $L(\widehat{W}^{1|1})_c = L(0, 0, c)$ have unique superconformal vertex algebra structures such that $v = \vac$, $j = J_{-1}v$, $h = H_{-1}v$. \end{exmp} \section{Harish-Chandra Localisation}\label{section.HC} Let $K$ be a Lie group, $Z$ a principal $K$-bundle on a smooth manifold $S$, and $V$ a left $K$-module. The familiar associated bundle construction produces a vector bundle $\V = Z \times_K V$ on $S$ (recall by definition $Z \times_K V$ is $Z \times V$ modulo the relation $(zg, v) = (z, gv)$). If $\dim S = n$ then $S$ carries a canonical $GL(n)$-bundle, namely the frame bundle, whose fibre at $s \in S$ if the set of all bases of the tangent space $T_sS$. Associated with the defining $GL(n)$-module $\R^n$ is the tangent bundle $\Theta_S$, and with its dual $(\R^n)^*$ the cotangent bundle $\Om_S$. The functor of Harish-Chandra localisation extends the associated bundle construction, enabling the construction of vector bundles with connection (more properly $\CD$-modules) from $K$-modules with the action of an additional Lie algebra. See {\cite[Chapter 17]{FBZ}} and {\cite[Section 1.2]{BD.Hitchin}} for the general theory. \begin{defn} A Harish-Chandra pair $(\g, K)$ consists of a Lie algebra $\g$, a Lie group $K$, an action $\text{Ad}$ of $K$ on $\g$, and a Lie algebra embedding $\lie{K} \hookrightarrow \g$ compatible with $\text{Ad}$. A $(\g, K)$-module is a vector space with compatible left $\g$- and $K$-module structures. A $(\g, K)$-structure on a space $S$ is a principal $K$-bundle $Z \rightarrow S$ together with a transitive action $\g \rightarrow \Theta_Z$ satisfying certain compatibilities. \end{defn} Let $Z \rightarrow S$ be a $(\g, K)$-structure, and $V$ a $(\g, K)$-module. The fibre $\V_s$ of the associated bundle $\V = Z \times_K V$ over the point $s \in S$ carries an action of the Lie algebra $\g_s = Z_s \times_K \g$. Inside $\g_s$ we have the pointwise stabiliser $\g_s^0$ of $Z_s$. We denote by $\Delta(V)$ the sheaf whose fibre over $s$ is the space of coinvariants $\V_s / \g_s^0 \cdot \V_s$. The $\g$-action on $V$ translates into a flat connection (more precisely a left $\CD_S$-module structure) on $\Delta(V)$. Now let $\widehat{\g}$ be a central extension of $\g$ split over $\lie{K} \subset \g$. If $V$ is a $\widehat{\g}$-module then a variation on the construction above yields $\Delta(V)$ a twisted $\CD_S$-module. That is to say, there is a certain sheaf $\CF$ on $S$ (which depends on the central extension $\widehat{\g}$ of $\g$) such that $\Delta(V)$ is a $\CD_\CF$-module, where $\CD_\CF$ is the sheaf of differential operators on $\CF$. The Harish-Chandra pairs of particular importance in our context are $(\vir, \aut{\OO})$ and $(\widehat{W}^{1|1}, \aut{\OO^{1|1}})$. Their relevance stems from the fact that moduli spaces of curves and $1|1$-dimensional supercurves carry natural $(\g, K)$-structures for these respective pairs {\cite{ADKP}} {\cite{BS88}} (see also {\cite[Chapter 17]{FBZ}} for an overview). This fact frequently goes by the name `Virasoro Uniformisation'. Let $\pi : X \rightarrow S$ be a morphism of schemes in general. In the sequence \[ 0 \rightarrow \Theta_{X/S} \rightarrow \Theta_X \rightarrow \pi^* \Theta_S \rightarrow 0 \] (which defines the relative tangent bundle $\Theta_{X/S}$), we denote by $\Theta_\pi$ the preimage of $\pi^{-1} \Theta_S$ in $\Theta_X$. Intuitively $\Theta_\pi$ consists of vector fields on $X$ of the shape $f(s) \partial_s + g(s, x) \partial_x$. Let $\widehat{\M}$ denote the moduli space of triples $(X, x, t)$ consisting of a smooth algebraic curve $X$ (of genus $g \geq 1$), a point $x \in X$, and a local coordinate $t \in \coord_{X, x}$, and let $\M$ denote the moduli space of pairs $(X, x)$. Let $\pi : \widehat{X} \rightarrow \widehat{\M}$ be the universal curve, and $Y \subset \widehat{X}$ the section of points $(X, x, t; x)$. The following theorem can be viewed as a refinement of the Kodaira-Spencer isomorphism. \begin{thm}[{\cite[Lemma 4.1.1]{BS88}}] There is a canonical $(\der \CK, \aut \OO)$-structure on $\widehat{\M} \rightarrow \M$. It is induced by the $\OO_{\widehat{\M}}$-module isomorphism \[ \Theta_\pi(\widehat{X} \backslash Y) \rightarrow \OO_{\widehat{\M}} \otimes \der{\CK} \] which acts at $(X, x, t)$ by sending a vector field to the expansion at $x$ in powers of $t$ of its vertical component \textup{(}along $X$\textup{)}. \end{thm} The $1|1$-dimensional analogue (along with other cases) is {\cite[Theorem 6.1]{Vai95}}. It follows that any $(\vir, \aut{\OO})$-module gives rise to a twisted $\CD$-module on $\M$ (or on any family of smooth curves). Similarly any $(\widehat{W}^{1|1}, \aut{\OO^{1|1}})$-module gives rise to a twisted $\CD$-module on any family of smooth $1|1$-dimensional supercurves. \section{Elliptic Curves and Ramanujan Differential Equations}\label{Section.Ramanujan} It is instructive to flesh out the construction of the previous section a little in the case of elliptic curves. Let $E \rightarrow \HH$ be the family of elliptic curves introduced in Section \ref{Section.Curves}, and let $V$ be a $(\der{\CK}, \aut{\OO})$-module, so that we obtain a $\CD$-module $\Delta(V)$ on $\HH$. We recall some standard functions from number theory \cite{Apostol}. The Bernoulli numbers $B_n$, $n \geq 0$ are defined by \[ \frac{x}{e^x-1} = \sum_{n=1}^\infty B_n \frac{x^n}{n!}. \] The Eisenstein series $G_{2k}$, $k \geq 1$ are defined by \[ G_{2k} = \frac{(-1)^{k+1}B_{2k}}{(2k)!} (2\pi)^{2k} E_{2k}, \quad \text{where} \quad E_{2k} = 1 - \frac{4k}{B_{2k}} \sum_{m=1}^\infty \frac{n^{2k-1} q^n}{1-q^n}. \] The Weierstrass elliptic function $\wp$, and quasielliptic function $\ov{\zeta}$, are defined by \begin{align*} \wp(z, \tau) &= z^{-2} + \sum_{k \in \Z_{>0}} (2k-1) z^{2k-2} G_{2k} \quad \text{and} \quad -2\pi i \ov{\zeta}(z, \tau) = z^{-1} - \sum_{k \in \Z_{>0}} z^{2k-1} G_{2k}. \end{align*} The Weierstrass function $\wp$ is elliptic, i.e., \[ \wp(z+1, \tau) = \wp(z+\tau, \tau) = \wp(z, \tau). \] The nonstandard normalisation of $\ov{\zeta}$ is chosen so that \begin{align}\label{zeta.trans} \ov{\zeta}(z+1, \tau) = \ov{\zeta}(z, \tau) \quad \text{and} \quad \ov{\zeta}(z+\tau, \tau) = \ov{\zeta}(z, \tau)+1. \end{align} We have the following result. \begin{lemma}\label{global.vec.field} Flat sections $s$ of $\Delta(V)$ satisfy the differential equation \[ \frac{\partial s}{\partial \tau} + \left(\res_t \ov{\zeta}(t, \tau) L(t) dt\right) \cdot s = 0. \] \end{lemma} \begin{proof} The proof is an exercise in unwinding the definitions of Section \ref{section.HC}, applied to $E \rightarrow \HH$. All that needs to be checked is that the vector field $\partial_\tau + \ov{\zeta}(z, \tau) \partial_z$ is well defined on $E$ (being \emph{a priori} well defined only on its universal cover, since $\ov{\zeta}$ is not elliptic). Under the transformation $(z', \tau') = (z+\tau, \tau)$ we have $\partial_{\tau'} = \partial_\tau - \partial_z$ and $\partial_{z'} = \partial_z$. This together with (\ref{zeta.trans}) shows that $\partial_\tau + \ov{\zeta}(z, \tau) \partial_z$ is well defined. The same check on the transformation $(z, \tau) \mapsto (z+1, \tau)$ is immediate. \end{proof} Another incarnation of Lemma \ref{global.vec.field} is the following partial differential equation satisfied by the Weierstrass function $\wp$. \begin{prop} The Weierstrass functions satisfy \begin{align}\label{Weier.DE} \frac{\partial}{\partial \tau} \wp + \ov{\zeta} \frac{\partial}{\partial z}\wp = \frac{1}{2\pi i}(2\wp^2 - 2 G_2 \wp - 20 G_4). \end{align} \end{prop} \begin{proof} Differentiating \[ \wp(z+\tau, \tau) - \wp(z, \tau) = 0 \] with respect to $\tau$ yields \[ \dot{\wp}(z+\tau, \tau) - \dot{\wp}(z, \tau) = -\wp'(z+\tau, \tau) \] (where $\wp'$ and $\dot{\wp}$ are the derivatives with respect to the first and second entries). Similarly $\dot{\wp}(z+1, \tau) - \dot{\wp}(z, \tau) = 0$. It is clear then that $\dot{\wp} + \ov{\zeta} \wp'$ is an elliptic function with pole of order $4$ at $z=0$, hence a polynomial in $\wp$. Comparing leading coefficients yields the result. \end{proof} Equating coefficients of (\ref{Weier.DE}) yields an infinite list of differential equations on Eisenstein series. The first three of these, viz. \begin{align*} q \partial{E_2}/\partial q &= (E_2^2 - E_4)/12, \\ q \partial{E_4}/\partial q &= (E_2 E_4 - E_6)/3, \\ q \partial{E_6}/\partial q &= (E_2 E_6 - E_4^2)/2, \end{align*} were discovered by Ramanujan \cite{ramanujan} (see also \cite{vdP} and \cite{Movasati2012}). \section{Conformal Blocks and Trace Functions}\label{section.cb} A conformal vertex algebra carries a $(\vir, \aut{\OO})$-module structure, so the machinery of Section \ref{section.HC} can be applied. Let $V$ be a conformal vertex algebra and $X$ a smooth algebraic curve, we obtain an associated bundle $\V = \coord_X \times_{\aut{\OO}} V$ on $X$. Put $\A = \V \otimes \Om_X$. The vertex operation on $V$ has not yet been used, it translates into the following structure on $\A$: for each $x \in X$ an action $\mu$ of the space of sections $\G(D_x^\times, \A)$ on the fibre $\A_x$ (here $D_x^\times = \spec{\CK_x}$ is the punctured infinitesimal disc at $x$). In fact this structure makes $\A$ into a chiral algebra on $X$, in the sense of \cite{BD} (see also {\cite[Theorem 19.3.3]{FBZ}}). Underlying this construction is the following formula due to Huang {\cite{HuangCFT}} \begin{align}\label{Huang.lemma} R(\rho) Y(a, z) R(\rho)^{-1} = Y(R(\rho_z) a, \rho(z)), \end{align} valid for all $\rho \in \aut{\OO}$. Here $\rho_z \in \aut{\OO}$ is the automorphism defined by $\rho_z(t) = \rho(z+t)-\rho(t)$, and $R(\rho)$ is the action of $\rho$ on the conformal vertex algebra $V$ (obtained by exponentiating $\der_0{\OO} \subset \vir$). Applying the Harish-Chandra formalism to $V$ and to a family $X, x$ of pointed curves over base $S$ yields the $\CD_S$-module $\Delta(V)$, with fibres \[ \frac{\A_x}{\G(X \backslash x, \A) \cdot \A_x}. \] The dual of this fibre is called the vector space of conformal blocks associated with $X, x, V$, and is denoted $\CC(X, x, V)$. A superconformal SUSY vertex algebra carries a $(\widehat{W}^{1|1}, \aut{\OO^{1|1}})$-module structure, and can therefore be similarly localised on $1|1$-dimensional supercurves. These sheaves are again chiral algebras, using {\cite[Theorem 3.4]{H07}} which is a general SUSY analogue of (\ref{Huang.lemma}) above. The theorems of this section and the next concern construction of horizontal sections of the conformal blocks bundle $\CC$ for elliptic supercurves, and the modular properties of these sections. They are super-analogues of fundamental results of Zhu {\cite{Zhu96}}. \begin{thm}[{\cite[Proposition 7.10]{HVEaccepted}}]\label{triscb} Let $V$ be a superconformal vertex algebra and $M$ its module. Let $X = (\mathbb{A}^{1|1})^\times / \mathbf{q}$ be an elliptic supercurve with marked point $x = (z, \theta) = (1, 0)$ as in Section \ref{Section.Curves}. Then the element of $V^*$ defined by \[ \varphi_M : b \mapsto \str_M o(b) R(\mathbf{q}) \] is a conformal block, i.e., $\varphi_M \in \CC(X, x, V)$. \end{thm} \begin{proof}[Sketch] Let $a, b$ be sections of a chiral algebra $\A, \mu$ on $(\mathbb{A}^{1|1})^\times$. Huang's formula (\ref{Huang.lemma}) can be written schematically as \[ \rho \mu(a) \rho^{-1} = \mu(\rho \cdot a). \] Item (2) of Definition {\ref{def.SUSY.va}} may be reformulated {\cite[Theorem 3.3.17]{HK07}} as the relation \[ \mu(a) \mu(b) - \mu(b) \mu(a) = \mu(\mu(a)b) \] (again expressed only schematically). Let $\mathbf{q} \in GL(1|1)$, and suppose $a$ is $\mathbf{q}$-equivariant. Then the relations above combine with (super)symmetry of the (super)trace, and equivariance of $a$, to yield \begin{align}\label{main.loop} \begin{split} \tr \mu(\mu(a)b) \mathbf{q} &= \tr \left[\mu(a) \mu(b) - \mu(b) \mu(a)\right] \mathbf{q} \\ &= \tr \left[\mu(a) \mu(b) \mathbf{q} - \mu(b) \mathbf{q} \mu(\mathbf{q} \cdot a)\right] \\ &= \tr \left[\mu(a) \mu(b) \mathbf{q} - \mu(b) \mathbf{q} \mu(a)\right] \\ &= \tr \left[\mu(a) \mu(b) \mathbf{q} - \mu(a) \mu(b) \mathbf{q}\right] \\ &= 0. \end{split} \end{align} In other words $b \mapsto \str \mu(b) \mathbf{q}$ annihilates the action of global $\mathbf{q}$-equivariant sections, and hence is a conformal block on $(\mathbb{A}^{1|1})^\times / \mathbf{q}$. This sketch can be made precise either in the language of chiral algebras or of vertex algebras (and is done so in {\cite{HVEaccepted}} Sections 7.10 and 7.11, respectively). \end{proof} In \cite{Zhu96} the convergence in the analytic topology of the series defining $\varphi_M$ is important, and is derived from a finiteness condition on $V$ called $C_2$-cofiniteness. The superconformal analogue is proved in {\cite[Appendix A]{HVEaccepted}}, also using $C_2$-cofiniteness. We may now regard the element $\varphi_M \in \CC(E^\bullet(\mathbf{q}), V)$ as a section of the sheaf $\CC$ of conformal blocks on $S^\bullet$. As we have seen this sheaf is a twisted $\CD$-module. The $\varphi_M$ are flat sections of $\CC$, as we shall see in the next theorem via a variation on the proof of Theorem \ref{triscb}. Though the argument applies generally, we restrict attention to even $\mathbf{q} = \twobytwo{q}{0}{0}{qy}$ for the sake of clarity. In this case the operator $R(\mathbf{q})$ on $V$ is simply $q^{L_0} y^{J_0}$ and we recover the supercharacter \begin{align}\label{supercharacter} \varphi_M(b) = \str_M o(b) q^{L_0} y^{J_0}. \end{align} Expressed in terms of $x = e^{2\pi i t}$ we have the following expression for the Weierstrass function \begin{align}\label{x.express.zeta} \ov{\zeta}(t, \tau) = \xi(x, q) = \frac{1}{2} + \frac{1}{x-1} + \sum_{n \in \Z \backslash 0} \left( \frac{1}{q^n x - 1} - \frac{1}{q^n-1} \right). \end{align} We remark that the relation $\xi(qx, q) = \xi(x, q)+1$ is easily deduced from (\ref{x.express.zeta}) via a telescoping sum argument. \begin{thm}[{\cite[Theorem 8.15]{HVEaccepted}}]\label{trhasode} The function $\varphi_M$ satisfies the following \textup{(}in general infinite\textup{)} system of PDEs: \begin{align*} q \frac{\partial}{\partial q} \varphi_M(b) &= \varphi_M(\res_{x=1} x \xi(x, q) L(x-1) b), \\ y \frac{\partial}{\partial y} \varphi_M(b) &= \varphi_M(\res_{x=1} \xi(x, q) J(x-1) b). \end{align*} \end{thm} \begin{proof}[Sketch] As in Theorem \ref{triscb} we work on $(\mathbb{A}^{1|1})^\times$ with coordinates $(z, \theta)$ fixed. We let $a$ be a section of $\A$ no longer $\mathbf{q}$-equivariant, but satisfying instead $\mathbf{q} \cdot a = a - s$, where $s$ will be one of the explicit sections $h z$ or $j \theta$. We repeat the calculation (\ref{main.loop}) to obtain \begin{align*} \str \mu(\mu(a)b) \mathbf{q} = \str \mu(b) \mathbf{q} \mu(s) \end{align*} in general. The function $\xi$ may be used to construct the appropriate section $a$ because of the key relation $\xi(qx) = \xi(x)+1$. A precise calculation (in, for instance, the case $s = hz$) yields \begin{align*} \varphi_M(\res_{x=1} x \xi(x, q) L(x-1) b) = \str_M o(b) q^{L_0} y^{J_0} L_0 = q \frac{\partial}{\partial q} \varphi_M(b). \end{align*} The other relation derives in the same way from $s = j \theta$. \end{proof} \begin{rem}\label{connec.de} By the same reasoning as in Lemma \ref{global.vec.field}, we see that the differential equations of Theorem \ref{trhasode} are essentially the explicit expressions of the canonical Harish-Chandra connection. \end{rem} \section{Jacobi Modular Invariance} We now study the pullbacks of the sections $\varphi_M$ via the morphism $\sexp$ defined by formula (\ref{sex.defined}). We show that (after a normalisation) they are horizontal with respect to a certain $SL_2(\Z) \ltimes \Z^2$-equivariant connection. Explicitly we prove the following result. \begin{thm}[{\cite[Theorem 9.10]{HVEaccepted}}]\label{equivar.of.sections} The normalised section \[ \widetilde{\varphi}_M = e^{2\pi i \al \cdot (C/6)} \sexp^*(\varphi_M) \] is flat with respect to the connection \[ \nabla = d + \left( \res_t \ov{\zeta}(t, \tau) J(t) dt \right) d\al + \frac{1}{2\pi i} \left( \res_z \ov{\zeta}(z, \tau) \left[ L(z) + \partial_z J(z) \right] \right) d\tau. \] Furthermore $\nabla$ is equivariant with respect to the $SL_2(\Z) \ltimes \Z^2$-action on $E^\circ \rightarrow S^\circ$ of Proposition \ref{Jacobi.action.on.family}. \end{thm} This theorem is proved by analysing the behaviour of the partial differential equations of Theorem \ref{trhasode} under $SL_2(\Z) \ltimes \Z^2$ transformations, which is an explicit computation. It is possible to write the (projective) $SL_2(\Z) \ltimes \Z^2$-action on flat sections $\widetilde{\varphi}$ of $\CC$ explicitly {\cite[Theorem 1.2 (c)]{HVEaccepted}}. The specialisation to $b = \vac$ is \begin{align}\label{Jacobi.action} \begin{split} [\widetilde{\varphi} \cdot (m, n)](\vac, \tau, \al) &= \exp{2\pi i \frac{C}{6} \left[ m^2 \tau + 2m\al + 2n \right]} \widetilde{\varphi}(\vac, \tau, \al+m\tau+n) \\ [\widetilde{\varphi} \cdot \twobytwo abcd](\vac, \tau, \al) &= \exp{2\pi i \frac{C}{6} \left[ \frac{-c\al^2}{c\tau+d} \right]} \widetilde{\varphi}\left( \vac, \frac{a\tau+b}{c\tau+d}, \frac{\al}{c\tau+d} \right). \end{split} \end{align} This recovers the well known transformation law {\cite[Theorem 1.4]{EZ}} for Jacobi forms of weight $0$ and index $C/6$. Evaluation at other elements $b \in V$ yields Jacobi forms of higher weight, as well as more complicated `quasi-Jacobi' forms. In order to deduce Jacobi invariance of the (normalised) supercharacters (\ref{supercharacter}) it suffices to show that they span the fibre of $\CC$. This can presumably be done following the method of Zhu \cite[Section 5]{Zhu96}, assuming $V$ is a rational vertex algebra. Alternatively Jacobi invariance can be proved by extending the calculations of \cite{KM13} to the supersymmetric case. \bibliographystyle{plain} \def\cprime{$'$}
1,108,101,565,822
arxiv
\section{Introduction} The idea of grammar-based compression is based on the fact that in many cases a word $w$ can be succinctly represented by a context-free grammar that produces exactly $w$. Such a grammar is called a {\em straight-line program} (SLP) for $w$. In the best case, one gets an SLP of size $O(\log n)$ for a word of length $n$, where the size of an SLP is the total length of all right-hand sides of the rules of the grammar. A {\em grammar-based compressor} is an algorithm that produces for a given word $w$ an SLP $\mathbb A$ for $w$, where, of course, $\mathbb A$ should be smaller than $w$. Grammar-based compressors can be found at many places in the literature. Probably the best known example is the classical {\sf LZ78}-compressor of Lempel and Ziv \cite{ZiLe78}. Indeed, it is straightforward to transform the {\sf LZ78}-representation of a word $w$ into an SLP for $w$. Other well-known grammar-based compressors are {\sf BISECTION} \cite{KiefferYNC00}, {\sf SEQUITUR} \cite{Nevill-ManningW97}, and {\sf RePair} \cite{LarssonM99}, just to mention a few. One of the first appearances of straight-line programs in the literature are \cite{BerstelB87,Diw86}, where they are called {\em word chains} (since they generalize addition chains from numbers to words). In \cite{BerstelB87}, Berstel and Brlek prove that the function $g(k,n) = \max \{ g(w) \mid w \in \{1,\ldots,k\}^n \}$, where $g(w)$ is the size of a smallest SLP for the word $w$, is in $\Theta(n/\log_k n)$. Note that $g(k,n)$ measures the worst case SLP-compression over all words of length $n$ over a $k$-letter alphabet. The first systematic investigations of grammar-based compressors are \cite{CLLLPPSS05,KiYa00}. Whereas in \cite{KiYa00}, grammar-based compressors are used for universal lossless compression (in the information-theoretic sense), Charikar et al.~study in \cite{CLLLPPSS05} the worst case approximation ratio of grammar-based compressors. For a given grammar-based compressor $\mathcal{C}$ that computes from a given word $w$ an SLP $\mathcal{C}(w)$ for $w$ one defines the approximation ratio of $\mathcal{C}$ on $w$ as the quotient of the size of $\mathcal{C}(w)$ and the size $g(w)$ of a smallest SLP for $w$. The approximation ratio $\alpha_{\mathcal{C}}(n)$ is the maximal approximation ratio of $\mathcal{C}$ among all words of length $n$ over any alphabet. In \cite{CLLLPPSS05} the authors compute upper and lower bounds for the approximation ratios of several grammar-based compressors (among them are the compressors mentioned above). The contribution of this paper is the improvement of the lower bound for {\sf RePair} from $\Omega(\sqrt{\log n})$ to $\Omega(\log n/\log\log n)$. While in \cite{CLLLPPSS05} the lower bound needs an unbounded alphabet (the alphabet grows logarithmically in the length of the presented words) our family of words is defined over a binary alphabet. {\sf RePair} works by repeatedly searching for a digram $d$ (a string of length two) with the maximal number of non-overlapping occurrences in the current text and replacing all these occurrences by a new nonterminal $A$. Moreover, the rule $A \to d$ is added to the grammar. {\sf RePair} is one of the so-called global grammar-based compressor from \cite{CLLLPPSS05} for which the approximation ratio seems to be very hard to analyze. Charikar et al. prove for all global grammar-based compressors an upper bound of $\mathcal{O}\left((n/\log n)^{2/3}\right)$ for the approximation ratio. Note that the gap to our improved lower bound $\Omega(\log n/\log\log n)$ is still large. \paragraph{Related work.} The theoretically best known grammar-based compressors with a polynomial (in fact, linear) running time achieve an approximation ratio of $O(\log n)$ \cite{CLLLPPSS05,Jez15tcs,Jez16,Ryt03}. In \cite{HuLoRe17}, the precise (up to constant factors) approximation ration for BISECTION (resp., LZ78) was shown to be $\Theta( (n/\log n)^{1/2})$ (resp., $\Theta( (n/\log n)^{2/3})$). In \cite{NavarroR08} the authors prove that {\sf RePair} combined with a simple binary encoding of the grammar compresses every word $w$ over an alphabet of size $\sigma$ to at most $2 H_k(w) + o(|w| \log \sigma)$ bits, for any $k = o(\log_\sigma |w|)$, where $H_k(w)$ is the $k$-th order entropy of $w$. There is also a bunch of papers with practical applications for {\sf RePair}: web graph compression \cite{ClaudeN10}, bit maps \cite{NavarroPV11}, compressed suffix trees \cite{Gonzalez07}. Some practical improvements of {\sf RePair} can be found in \cite{GaJe17}. \section{Preliminaries} Let $[1,k] = \{1,\ldots,k\}$. Let $w=a_1\cdots a_n$ ($a_1,\dots,a_n\in\Sigma$) be a \emph{word} or \emph{string} over a finite \emph{alphabet} $\Sigma$. The length $|w|$ of $w$ is $n$ and we denote by $\varepsilon$ the word of length $0$. We define $w[i]=a_i$ for $1\le i\le |w|$ and $w[i:j]=a_i\cdots a_j$ for $1\le i\le j\le |w|$. Let $\Sigma^+ = \Sigma^* \setminus \{\varepsilon\}$ be the set of nonempty words. For $w \in \Sigma^+$, we call $v\in\Sigma^+$ a \emph{factor} of $w$ if there exist $x,y\in\Sigma^*$ such that $w=xvy$. If $x=\varepsilon$, then we call $v$ a \emph{prefix} of $w$. For words $w_1,\dots, w_n\in\Sigma^*$, we further denote by $\prod_{i=j}^nw_i$ the word $w_jw_{j+1}\cdots w_n$ if $j\le n$ and $\varepsilon$ otherwise. A \emph{straight-line program}, briefly SLP, is a context-free grammar that produces a single word $w\in\Sigma^+$. Formally, it is a tuple $\mathbb A = (N,\Sigma, P, S)$, where $N$ is a finite set of nonterminals with $N\cap \Sigma = \emptyset$, $S \in N$ is the start nonterminal, and $P$ is a finite set of productions (or rules) of the form $A \to w$ for $A \in N$, $w \in (N \cup \Sigma)^+$ such that: \begin{itemize} \item For every $A \in N$, there exists exactly one production of the form $A \to w$, and \item the binary relation $\{ (A, B) \in N \times N \mid (A \to w) \in P,\;B \text{ occurs in } w \}$ is acyclic. \end{itemize} Every nonterminal $A \in N$ produces a unique string $\mathrm{val}_{\mathbb A}(A) \in \Sigma^+$. The string defined by $\mathbb A$ is $\mathrm{val}(\mathbb A) = \mathrm{val}_{\mathbb A}(S)$. We omit the subscript $\mathbb A$ when it is clear from the context. The \emph{size} of the SLP $\mathbb A$ is $|\mathbb A| = \sum_{(A \to w) \in P} |w|$. We denote by $g(w)$ the size of a smallest SLP producing the word $w\in\Sigma^+$. We will use the following lemma: \begin{lemma}[\mbox{\cite[Lemma~3]{CLLLPPSS05}}] \label{lemma:folklore} A string $w$ contains at most $g(w) \cdot k$ distinct factors of length $k$. \end{lemma} A grammar-based compressor $\mathcal C$ is an algorithm that computes for a nonempty word $w$ an SLP $\mathcal C(w)$ such that $\mathrm{val}(\mathcal C(w))=w$. The \emph{approximation ratio} $\alpha_{\mathcal C}(w)$ of $\mathcal C$ for an input $w$ is defined as $|\mathcal C(w)|/g(w)$. The worst-case approximation ratio $\alpha_{\mathcal C}(k,n)$ of $\mathcal C$ is the maximal approximation ratio over all words of length $n$ over an alphabet of size $k$: \[\alpha_{\mathcal C}(k,n)=\max \{ \alpha_{\mathcal C}(w) \mid w \in [1,k]^n \} = \max\{ |\mathcal C(w)|/g(w) \mid w \in [1,k]^n \} \] If the alphabet size is unbounded, i.e., if we allow alphabets of size $|w|$, then we write $\alpha_{\mathcal C}(n)$ instead of $\alpha_{\mathcal C}(n,n)$. \section{RePair} For a given SLP $\mathbb A = (N,\Sigma, P, S)$, a word $\gamma \in (N \cup \Sigma)^+$ is called a \emph{maximal string} of $\mathbb A$ if \begin{itemize} \item $|\gamma|\ge 2$, \item $\gamma$ appears at least twice without overlap in the right-hand sides of $\mathbb A$, \item and no strictly longer word appears at least as many times on the ride-hand sides of $\mathbb A$ without overlap. \end{itemize} A \emph{global grammar-based compressor} starts on input $w$ with the SLP $\mathbb A=(\{S\},\Sigma, \{S\to w\}, S)$. In each round, the algorithm selects a maximal string $\gamma$ of $\mathbb A$ and updates $\mathbb A$ by replacing a largest set of a pairwise non-overlapping occurrences of $\gamma$ in $\mathbb A$ by a fresh nonterminal $X$. Additionally, the algorithm introduces the rule $X\to \gamma$. The algorithm stops when no maximal string occurs. The global grammar-based compressor {\sf RePair}~\cite{LarssonM99} selects in each round a most frequent maximal string. Note that the replacement is not unique, e.g. the word $a^5$ with the maximal string $\gamma=aa$ yields SLPs with rules $S\to XXa, X\to aa$ or $S\to XaX, X\to aa$ or $S\to aXX, X\to aa$. We assume the first variant in this paper, i.e. maximal strings are replaced from left to right. The above description of RePair is taken from~\cite{CLLLPPSS05}. In most papers on {\sf RePair} the algorithm works slightly different: It replaces in each step a digram (a string of length two) with the maximal number of pairwise non-overlapping occurrences in the right-hand sides. For example, for the string $w = abcabc$ this produces the SLP $S \to BB$, $B \to Ac$, $A \to ab$, whereas the {\sf RePair}-variant from \cite{CLLLPPSS05} produces the smaller SLP $S \to AA$, $A \to abc$. The following lower and upper bounds on the approximation ratio of {\sf RePair} were shown in~\cite{CLLLPPSS05}: \begin{itemize} \item $\alpha_\mathsf{RePair}(n)\in\Omega\left(\sqrt{\log n}\right)$ \item $\alpha_\mathsf{RePair}(2,n)\in \mathcal{O}\left((n/\log n)^{2/3}\right)$ \end{itemize} The proof of the lower bound in~\cite{CLLLPPSS05} assumes an alphabet of unbounded size. To be more accurate, the authors construct for every $k$ a word $w_k$ of length $\Theta(\sqrt{k} 2^k)$ over and alphabet of size $\Theta(k)$ such that $g(w) \in O(k)$ and {\sf RePair} produces a grammar of size $\Omega(k^{3/2})$ for $w_k$. We will improve this lower bound using only a binary alphabet. To do so, we first need to know how {\sf RePair} compresses unary words. \begin{example}[unary inputs] \label{unary} {\sf RePair} produces on input $a^{27}$ the SLP with rules $X_1\to aa$, $X_2\to X_1X_1$, $X_3\to X_2X_2$ and $S\to X_3X_3X_3X_1a$, where $S$ is the start nonterminal. For the input $a^{22}$ only the start rule $S\to X_3X_3X_2X_1$ is different. \end{example} In general, {\sf RePair} creates on unary input $a^m$ ($m\ge 4$) the rules $X_1\to aa$, $X_i\to X_{i-1}X_{i-1}$ for $2\le i\le \lfloor \log m\rfloor-1$ and a start rule, which is strongly related to the binary representation of $m$ since each nonterminal $X_i$ produces the word $a^{2^i}$. To be more accurate, let $b_{\lfloor \log m\rfloor} b_{\lfloor \log m\rfloor-1}\cdots b_1b_0$ be the binary representation of $m$ and define the mappings $f_i$ ($i \geq 0$) by: \begin{itemize} \item $f_0:\{0,1\}\to\{a,\varepsilon\}$ with $f_0(1)=a$ and $f_0(0)=\varepsilon$,\label{f0} \item $f_i:\{0,1\}\to \{X_i,\varepsilon\}$ with $f_i(1)=X_i$ and $f_i(0)=\varepsilon$ for $i\ge 1$. \end{itemize} Then the start rule produced by {\sf RePair} on input $a^m$ is \begin{center} $S\to X_{\lfloor\log m\rfloor-1}X_{\lfloor\log m\rfloor-1}f_{\lfloor\log m\rfloor-1}(b_{\lfloor\log m\rfloor-1})\cdots f_1(b_1)f_0(b_0)$. \end{center} This means that the symbol $a$ only occurs in the start rule if $b_0=1$, and the nonterminal $X_i$ ($1\le i\le \lfloor\log m\rfloor-2$) occurs in the start rule if and only if $b_i=1$. Since {\sf RePair} only replaces words with at least two occurrences, the most significant bit $b_{\lfloor \log m\rfloor}=1$ is represented by $X_{\lfloor\log m\rfloor-1}X_{\lfloor\log m\rfloor-1}$. Note that for $1 \leq m \leq 3$, {\sf RePair} produces the trivial SLP $S \to a^m$. \section{Main result} The main result of this paper states: \begin{theorem} \label{thm} $\alpha_\mathsf{RePair}(2,n)\in\Omega\left(\log n/\log\log n\right)$ \end{theorem} \begin{proof} We start with a binary De-Bruijn sequence $B_{\lceil\log k\rceil}\in \{0,1\}^*$ of length $2^{\lceil\log k\rceil}$ such that each factor of length $\lceil\log k\rceil$ occurs at most once \cite{deBr46}. We have $k\le|B_{\lceil\log k\rceil}|< 2k$. Note that De-Bruijn sequences are not unique, so without loss of generality let us fix a De-Bruijn sequence which starts with $1$ for the remaining proof. We define a homomorphism $h:\{0,1\}^*\to\{0,1\}^*$ by $h(0)=01$ and $h(1)=10$. The words $w_k$ of length $2k$ are defined as $$w_k=h(B_{\lceil\log k\rceil}[1:k]).$$ For example for $k=4$ we can take $B_2=1100$, which yields $w_4=10100101$. We will analyze the approximation ratio of {\sf RePair} for the binary words $$s_k=\prod_{i=1}^{k-1}\left(a^{w_k[1:k+i]}b\right)a^{w_k}=a^{w_k[1:k+1]}ba^{w_k[1:k+2]}b\dots a^{w_k[1:2k-1]}ba^{w_k},$$ where the prefixes $w_k[1:k+i]$ for $1\le i\le k$ are interpreted as binary numbers. For example we have $s_4=a^{20}ba^{41}ba^{82}ba^{165}$. Since $B_{\lceil\log k\rceil}[1]=w_k[1]=1$, we have $2^{k+i-1}\le\left|a^{w_k[1:k+i]}\right|\le 2^{k+i}-1$ for $1\le i\le k$ and thus $|s_k|\in \Theta\left(4^k\right)$. \medskip \noindent {\em Claim 1.} A smallest SLP producing $s_k$ has size $\mathcal O(k)$. \medskip \noindent There is an SLP $\mathbb A$ of size $\mathcal O(k)$ for the first $a$-block $a^{w_k[1:k+1]}$ of length $\Theta(2^k)$. Let $A$ be the start nonterminal of $\mathbb A$. For the second $a$-block $a^{w_k[1:k+2]}$ we only need one additional rule: If $w_k[k+2]=0$, then we can produce $a^{w_k[1:k+2]}$ by the fresh nonterminal $B$ using the rule $B\to AA$. Otherwise, if $w_k[k+2]=1$, then we use $B\to AAa$. The iteration of that process yields for each $a$-block only one additional rule of size at most $3$. If we replace the $a$-blocks in $s_k$ by nonterminals as described, then the resulting word has size $2k+1$ and hence $g(s_k)\in \mathcal O(k)$. \medskip \noindent {\em Claim 2.} The SLP produced by {\sf RePair} on input $s_k$ has size $\Omega(k^2/\log k)$. \medskip \noindent On unary inputs of length $m$, the start rule produced by {\sf RePair} is strongly related to the binary encoding of $m$ as described above. On input $s_k$, the algorithm starts to produce a start rule which is similarly related to the binary words $w_k[1:k+i]$ for $1\le i\le k$. Consider the SLP $\mathbb{G}$ which is produced by {\sf RePair} after $(k-1)$ rounds on input $s_k$. We claim that up to this point {\sf RePair} is not affected by the $b$'s in $s_k$ and therefore has introduced the rules $X_1\to aa$ and $X_i\to X_{i-1}X_{i-1}$ for $2\le i\le k-1$. If this is true, then the start rule after $k-1$ rounds begins with \begin{center} $S\to X_{k-1}X_{k-1}f_{k-1}(w_k[2])f_{k-2}(w_k[3])\cdots f_0(w_k[k+1])b\cdots$ \end{center} where $f_0(1)=a$, $f_0(0)=\varepsilon$ and $f_i(1)=X_i$, $f_i(0)=\varepsilon$ for $i\ge 1$. All other $a$-blocks are longer than the first one, hence each factor of the start rule which corresponds to an $a$-block begins with $X_{k-1}X_{k-1}$. Therefore, the number of occurrences of $X_{k-1}X_{k-1}$ in the SLP is at least $k$. Since the symbol $b$ occurs only $k-1$ times in $s_k$, it follows that our assumption is correct and {\sf RePair} is not affected by the $b$'s in the first $(k-1)$ rounds on input $s_k$. Also, for each block $a^{w_k[1:k+i]}$, the $k-1$ least significant bits of $w_k[1:k+i]$ ($1\le i\le k$) are represented in the corresponding factor of the start rule of $\mathbb{G}$, i.e., the start rule contains non-overlapping factors $v_i$ with \begin{equation} v_i=f_{k-2}(w_k[i+2])f_{k-3}(w_k[i+3])\dots f_1(w_k[k+i-1])f_0(w_k[k+i])\label{blockencoding} \end{equation} for $1\le i\le k$. For example after $3$ rounds on input $s_4=a^{20}ba^{41}ba^{82}ba^{165}$, we have the start rule $$S\to \underbrace{X_3X_3X_2}_{a^{20}}b\underbrace{X_3^5a}_{a^{41}}b\underbrace{X_3^{10}X_1}_{a^{82}}b\underbrace{X_3^{20}X_2a}_{a^{165}},$$ where $v_1=X_2$, $v_2=a$, $v_3=X_1$ and $v_4=X_2a$. The length of the factor $v_i\in\{a,X_1,\dots,X_{k-2}\}^*$ from equation~\eqref{blockencoding} is exactly the number of $1$'s in the word $w_k[i+2:k+i]$. Since $w_k$ is constructed by the homomorphism $h$, it is easy to see that $|v_i|\ge (k-3)/2$. Note that no letter occurs more than once in $v_i$, hence $g(v_i)=|v_i|$. Further, each substring of length $2\lceil\log k\rceil+2$ occurs at most once in $v_1,\dots,v_k$, because otherwise there would be a factor of length $\lceil\log k\rceil$ occurring more than once in $B_{\lceil\log k\rceil}$. It follows that there are at least $$k\cdot ( \lceil(k-3)/2\rceil-2\lceil\log k\rceil-1)\in\Theta(k^2)$$ different factors of length $2\lceil\log k\rceil+2\in\Theta(\log k)$ in the right-hand side of the start rule of $\mathbb G$. By Lemma~\ref{lemma:folklore} it follows that a smallest SLP for the right-hand side of the start rule has size $\Omega(k^2/\log k)$ and therefore $|\mathsf{RePair}(s_k)|\in\Omega(k^2/\log k)$. \medskip \noindent In conclusion: We showed that a smallest SLP for $s_k$ has size $\mathcal O(k)$, while {\sf RePair} produces an SLP of size $\Omega(k^2/\log k)$. This implies $\alpha_{\mathsf{RePair}}(s_k) \in \Omega(k/\log k)$, which together with $n=|s_k|$ and $k\in\Theta(\log n)$ finishes the proof. \end{proof} Note that in the above prove, {\sf RePair} chooses in the first $k-1$ rounds a digram for the replaced maximal string. Therefore, Theorem~\ref{thm} also holds for the {\sf RePair}-variant, where in every round a digram (which is not necessarily a maximal string) is replaced.
1,108,101,565,823
arxiv
\section{Introduction}\label{sec:introduction} Causal inference from observational data---that is, identifying cause and effect in data that was not collected through carefully controlled randomised trials---is a fundamental problem in both business and science~\cite{spirtes:00:book,pearl:09:book}. A particularly interesting setting is to tell cause from effect between a pair of random variables $X$ and $Y$, given data over the joint distribution. That is, to identify which of $\ensuremath{X \rightarrow Y}\xspace$ or $\ensuremath{Y \rightarrow X}\xspace$ is the most likely causal direction. In recent years, a number of important ideas have been proposed that allow for accurate causal inference based on properties of the joint distribution. These ideas include that of the Additive Noise Model (ANM), where we assume the effect is a function of the cause with additive noise independent of the cause~\cite{shimizu:06:anm,peters:10:discreteanm,peters:14:continuousanm}, and that of the algorithmic Markov condition~\cite{janzing:10:algomarkov,budhathoki:16:origo} which is based on Kolmogorov Complexity. Loosely speaking, the key idea is that if $X$ causes $Y$, the shortest description of the joint distribution $P(X,Y)$ is given by the separate descriptions of $P(X)$ and $P(Y\mid X)$. That is, if \ensuremath{X \rightarrow Y}\xspace, these two distributions will be less dependent than $P(Y)$ and $P(X \mid Y)$. However, as Kolmogorov complexity is not computable, any method using this observation requires a computable approximation of this notion, which in general involves arbitrary choices~\cite{sgouristsa:15:cure,vreeken:15:ergo,liu:16:dc,janzing:12:igci}. In this paper, for the first time, we define a causal inference rule based on the algorithmic Markov condition using stochastic complexity. More in particular, we approximate Kolmogorov complexity via the Minimum Description Length (MDL) principle using a score that is mini-max optimal with regard to the model class under consideration. This means that even if the true data generating distribution does not reside in the model class \ensuremath{\mathcal{M}}\xspace under consideration, we still obtain the optimal encoding for the data relative to \ensuremath{\mathcal{M}}\xspace~\cite{grunwald:07:book}. Best of all, unlike Kolmogorov complexity, stochastic complexity is \emph{computable}. We show the strength of this approach by instantiating it for pairs of univariate discrete data using the class of multinomials. For this class the stochastic complexity is computable remarkably efficiently, by which our score has only a linear-time computational complexity. Through experiments we show that our method, \textsc{cisc}\xspace, for causal inference by stochastic complexity, performs very well in practice. The strength of the mini-max property shows when we consider synthetic data where we vary the data generating process---\textsc{cisc}\xspace outperforms the state of the art by a margin, including for out-of-model distributions such as geometric, hypergeometric, and Poisson. On the T\"{u}bingen benchmark data set of 95 univariate pairs, \textsc{cisc}\xspace significantly outperforms the existing proposals for discrete data, with an accuracy of 100\% over the 21 pairs it is most certain about, and an overall accuracy of $67\%$, which is comparable to the state of the art for causal inference on continuous-valued data. Last, but not least, we perform three case studies which show \textsc{cisc}\xspace indeed infers sensible causal directions from real-world data. In sum, the main contributions of this paper are as follows. \begin{itemize}[noitemsep,topsep=2pt] \item[(a)] we propose the first computable framework for causal inference by the algorithmic Markov condition with provable mini-max optimality guarantees, \item[(b)] define a causal indicator for pairs of discrete variables based on stochastic complexity, \item[(c)] show how to efficiently compute it, \item[(d)] provide extensive experimental results on synthetic, benchmark, and real-world data, and \item[(e)] make our implementation and all used data available \end{itemize} The paper is structured as usual. We introduce notation and give preliminaries in Sec.~\ref{sec:prelim}, and give a brief primer to causal inference by Kolmogorov complexity in Sec.~\ref{sec:foundation}. We present \textsc{cisc}\xspace, our practical instantiation based on stochastic complexity score in Sec.~\ref{sec:practical}. Related work is discussed in Sec.~\ref{sec:rel}, and we evaluate \textsc{cisc}\xspace empirically in Sec.~\ref{sec:exps}. We round up with discussion in Sec.~\ref{sec:disc} and conclude in Sec.~\ref{sec:concl}. \section{Preliminaries}\label{sec:prelim} In this section, we introduce notations and background definitions we will use in subsequent sections. \subsection{Kolmogorov Complexity} The Kolmogorov complexity of a finite binary string $x$ is the length of the shortest binary program $p^*$ for a Universal Turing machine $\mathcal{U}$ that generates $x$, and then halts~\cite{kolmogorov:65:information, vitanyi:93:book}. Formally, we have \[ K(x) = \min \left \{ |p| : p \in \{0,1\}^*, \mathcal{U}(p) = x \right \} \; . \] Simply put, $p^*$ is the most succinct \emph{algorithmic} description of $x$, and the Kolmogorov complexity of $x$ is the length of its ultimate lossless compression. Conditional Kolmogorov complexity, $K(x \mid y) \leq K(x)$, is then the length of the shortest binary program $p^*$ that generates $x$, and halts, given $y$ as input. The amount of \emph{algorithmic information} contained in $y$ about $x$ is $I(y:x) = K(y) - K(y \mid x^*)$, where $x^*$ is the shortest binary program for $x$, defining $I(x:y)$ analogously. Intuitively, it is the number of bits that can be saved in the description of $y$ when the shortest description of $x$ is already known. Algorithmic information is symmetric, i.e.\ $I(y:x) \ensuremath{\stackrel{+}{=}}\xspace I(x:y)$, where \ensuremath{\stackrel{+}{=}}\xspace denotes equality up to an additive constant, and therefore also called \emph{algorithmic mutual information}~\cite{vitanyi:93:book}. Two strings $x$ and $y$ are \emph{algorithmically independent} if they have no algorithmic mutual information, i.e.\ $I(x:y) \ensuremath{\stackrel{+}{=}}\xspace 0$. For our purpose, we also need the Kolmogorov complexity of a distribution. The Kolmogorov complexity of a probability distribution $P$, $K(P)$, is the length of the shortest program that outputs $P(x)$ to precision $q$ on input $\langle x, q \rangle$~\cite{grunwald:08:ait}. More formally, we have \[ K(P) = \min \left \{ |p| : p \in \{0,1\}^*, |\mathcal{U}(\langle x, \langle q, p \rangle \rangle) - P(x) | \leq 1/q \right \} \; . \] We refer the interested reader to Li \& Vit{\'a}nyi~\cite{vitanyi:93:book} for more details on Kolmogorov complexity. \section{Causal Inference by Complexity}\label{sec:foundation} Given two correlated variables $X$ and $Y$, we are interested in inferring their causal relationship. In particular, we want to infer whether $X$ causes $Y$, whether $Y$ causes $X$, or they are only correlated. In doing so, we assume causal sufficiency. That is, there is no confounding variable, i.e.\ hidden common cause $Z$ of $X$ and $Y$. We use $\ensuremath{X \rightarrow Y}\xspace$ to indicate $X$ causes $Y$. We base our causal inference method on the following postulate: \begin{postulate}[independence of input and mechanism~\cite{sgouristsa:15:cure}]\label{post:ind_input_mechanism} If $X \rightarrow Y$, the marginal distribution of the cause $P(X)$, and the conditional distribution of the effect given the cause, $P(Y \mid X)$ are \emph{independent} --- $P(X)$ contains no information about $P(Y \mid X)$ --- and vice versa since they correspond to independent mechanisms of nature. \end{postulate} This postulate provides the foundation for many successful causal inference frameworks designed for a pair of variables~\cite{janzing:10:justifyanm,janzing:12:igci,sgouristsa:15:cure,schoelkopf:12:clearn}. We can think of conditional $P(Y|X)$ as the \emph{mechanism} that transforms $x$-values into $y$-values, i.e.\ generates effect $Y$ for cause $X$. The postulate is justified if we are dealing with a mechanism of nature that does not care what input we provide to it ($P(X)$ in this case). This independence will not hold in the other direction as $P(Y)$ and $P(X \mid Y)$ may contain information about each other as both inherit properties from $P(Y \mid X)$ and $P(X)$. This creates an asymmetry between cause and effect. It is insightful to consider the following example where amount of radiation per $\mathit{cm}^2$ solar cell (cause) causes power generation in the cell (effect). We can just affect $P(\ensuremath{\mathit{cause}}\xspace)$ only by actions such as moving the solar cell to a shady place, and varying the angle to the sun to affect $P(\ensuremath{\mathit{cause}}\xspace)$. Likewise we can change only $P(\ensuremath{\mathit{effect}}\xspace \mid \ensuremath{\mathit{cause}}\xspace)$ by actions such as using more efficient cells. However it is hard to find actions that change $P(\ensuremath{\mathit{effect}}\xspace)$ without affecting $P(\ensuremath{\mathit{cause}}\xspace \mid \ensuremath{\mathit{effect}}\xspace)$ or vice versa. The notion of \emph{independence}, however, is abstract. Accordingly, different formalisations have been proposed. Janzing et al.~\cite{janzing:12:igci} define independence in terms of information geometry. Liu \& Chan~\cite{liu:16:dc} formulate independence in terms of the distance correlation between marginal and conditional empirical distribution. Janzing \& Sch{\"o}lkopf~\cite{janzing:10:algomarkov} formalise independence using algorithmic information theory, and postulate \emph{algorithmic} independence of $P(X)$ and $P(Y \mid X)$. Since algorithmic formulation captures all types of dependencies, and has a sound theoretical foundation, it is, arguably, a better mathematical formalisation of Postulate~\ref{post:ind_input_mechanism}. Using algorithmic information theory, we arrive at the following postulate. \begin{postulate}[algorithmic independence of Markov kernels~\cite{janzing:10:algomarkov}]\label{post:ind_algo} If $\ensuremath{X \rightarrow Y}\xspace$, the marginal distribution of the cause $P(X)$ and the conditional distribution of the cause given the effect $P(Y \mid X)$ are algorithmically independent, i.e.\ $I(P(X):P(Y \mid X)) \ensuremath{\stackrel{+}{=}}\xspace 0$. \end{postulate} Postulate~\ref{post:ind_algo} is equivalent to saying that if $\ensuremath{X \rightarrow Y}\xspace$, factorizing the joint distribution over $X$ and $Y$ into $P(X)$ and $P(Y \mid X)$, will lead to simpler --- in terms of Kolmogorov complexity --- models than factorizing it into $P(Y)$ and $P(X \mid Y)$~\cite{janzing:10:algomarkov}. The following theorem is hence a consequence of the algorithmic independence of input and mechanism. \begin{theorem}[Th.~1 in~\citet{mooij:10:latent}]\label{thm:joint_desc} If $X$ is a cause of $Y$, \[ K(P(X)) + K(P(Y \mid X)) \leq K(P(Y)) + K(P(X \mid Y)) \; . \] holds up to an additive constant. \end{theorem} In other words, we can perform causal inference simply by identifying that direction between $X$ and $Y$ for which the factorization of the joint distribution has the lowest Kolmogorov complexity. Although this inference rule has sound theoretical foundations, the problem remains that Kolmogorov complexity is not computable because of the widely known \emph{halting problem}. In practice, we therefore need other, computable, notions of independence or information. We can, for instance, approximate Kolmogorov complexity from above through lossless compression~\cite{vitanyi:93:book}. More generally, the Minimum Description Length (MDL) principle~\cite{rissanen:78:mdl,grunwald:07:book} provides a statistically sound and computable means for approximating Kolmogorov complexity~\cite{vereshchagin:03:kolmo,grunwald:07:book}. \section{Causal Inference by Compression}\label{sec:practical} In this section, we discuss how stochastic complexity can be used for practical causal inference. We gradually move towards that goal starting with MDL, and covering the basics along the way. \subsection{Minimum Description Length Principle} The Minimum Description Length (MDL)~\cite{rissanen:78:mdl} principle is a practical version of Kolmogorov complexity. Instead of all possible programs, it considers only programs for which we know they generate $x$ and halt. That is, lossless compressors. In MDL theory, programs are often referred to as \emph{models}. The MDL principle has its root in the two-part decomposition of the Kolmogorov complexity~\cite{vitanyi:93:book}. It can be roughly described as follows~\cite{grunwald:07:book}. Given a set of models $\ensuremath{\mathcal{M}}\xspace$ and data $D$, the best model $M \in \ensuremath{\mathcal{M}}\xspace$ is the one that minimises $L(D, M) = L(M)+L(D \mid M)$, where $L(M)$ is the length, in bits, of the description of the model, and $L(D \mid M)$ is the length, in bits, of the description of the data when encoded with the model $M$. Intuitively $L(M)$ represents the compressible part of the data, and $L(D \mid M)$ represents the noise in the data. This is called two-part MDL, or \emph{crude} MDL. To use crude MDL in practice, we have to define our model class \ensuremath{\mathcal{M}}\xspace, and the description methods for $L(M)$ as well as $L(D \mid M)$. If the models \ensuremath{\mathcal{M}}\xspace under consideration define probability distributions, we can use optimal prefix code given by Shannon entropy, $L(D \mid M) = -\log P(D \mid M)$, where $P(D \mid M)$ is the probability mass or density function of $D$ according to $M$. The definition of $L(M)$, however, is tricky --- $L(M)$ can vary from one encoding to the other, introducing \emph{arbitrariness} in the process. The \emph{refined} version of MDL overcomes this arbitrariness by encoding $M$ and $D$ together. Unlike crude MDL, refined MDL encodes $D$ with the (entire) model class \ensuremath{\mathcal{M}}\xspace, resulting in single \emph{one-part} code $\bar{L}(D \mid \ensuremath{\mathcal{M}}\xspace)$~\cite{grunwald:07:book}. The one-part code length $\bar{L}(D \mid \ensuremath{\mathcal{M}}\xspace)$ is also called the \emph{stochastic complexity} of $D$ with respect to $M$. The code is designed in such a way that if there exists a model $M^* \in \ensuremath{\mathcal{M}}\xspace$ for which $L(D \mid M^*)$ is minimal then $\bar{L}(D \mid \ensuremath{\mathcal{M}}\xspace)$ will also be minimal. Codes with such property are also called \emph{universal codes}. There exist various types of universal codes. Although the coding schemes are different across those codes, the resulting code lengths $\bar{L}(D \mid \ensuremath{\mathcal{M}}\xspace)$ are almost the same~\cite{grunwald:07:book}. In this work, we consider the NML universal code in particular. Next we explain stochastic complexity in detail using the NML universal code. \subsection{Stochastic Complexity} Let $X^n = (x_1, x_2, \dots, x_n)$ be an i.i.d.\ sample of $n$ observed outcomes, where each outcome $x_i$ is an element of a space of observations \ensuremath{\mathcal{X}}\xspace. Let $\ensuremath{\Theta}\xspace \in \mathrm{R}^d$, where $d \in \mathbb{Z}^+$, be the parameter space. A model class \ensuremath{\mathcal{M}}\xspace is a family of probability distributions consisting of all the different distributions $P(.\mid \ensuremath{\bm{\theta}}\xspace)$ that can be produced by varying the parameters \ensuremath{\bm{\theta}}\xspace. Formally, a model class $\ensuremath{\mathcal{M}}\xspace$ is defined as \[ \ensuremath{\mathcal{M}}\xspace = \{ P(\cdot \mid \ensuremath{\bm{\theta}}\xspace) : \ensuremath{\bm{\theta}}\xspace \in \ensuremath{\Theta}\xspace \} \; . \] To encode the data $X^n$ optimally with respect to the model class \ensuremath{\mathcal{M}}\xspace, we can use the code corresponding to the distribution $P(\cdot \mid \hat{\ensuremath{\bm{\theta}}\xspace}(X^n, \ensuremath{\mathcal{M}}\xspace))$ induced by the maximum likelihood estimate $\hat{\ensuremath{\bm{\theta}}\xspace}(X^n, \ensuremath{\mathcal{M}}\xspace)$ of the data $X^n$ for a given model class \ensuremath{\mathcal{M}}\xspace, since this distribution assigns shorter code length, i.e.\ higher likelihood, to the data than any of the other distributions in the model class. The Normalized Maximum Likelihood (NML) distribution is then defined as \[ \pnml{X^n \mid \ensuremath{\mathcal{M}}\xspace} = \frac{P(X^n \mid \hat{\ensuremath{\bm{\theta}}\xspace}(X^n, \ensuremath{\mathcal{M}}\xspace))}{R(\ensuremath{\mathcal{M}}\xspace, n)} \; , \] where the normalizing term $R(\ensuremath{\mathcal{M}}\xspace, n)$ is the sum over maximum likelihoods of all possible datasets of size $n$ under the model class $\ensuremath{\mathcal{M}}\xspace$. For discrete data, $R(\ensuremath{\mathcal{M}}\xspace, n)$ is defined as \[ R(\ensuremath{\mathcal{M}}\xspace, n) = \sum\limits_{Y^n \in \ensuremath{\mathcal{X}}\xspace^n} P(Y^n \mid \hat{\ensuremath{\bm{\theta}}\xspace}(Y^n, \ensuremath{\mathcal{M}}\xspace)) \; , \label{eq:normalizer} \] where $\ensuremath{\mathcal{X}}\xspace^n$ is the $n$-fold Cartesian product $\ensuremath{\mathcal{X}}\xspace \times \cdots \times \ensuremath{\mathcal{X}}\xspace$ indicating set of all possible datasets of size $n$ with domain \ensuremath{\mathcal{X}}\xspace. When the data $X^n$ is defined over a continuous sample space, the summation symbol in Equation~\ref{eq:normalizer} is replaced by an integral. The NML distribution has a number of important theoretical properties. First, it gives a unique solution to the minimax problem posed by Shtarkov~\cite{shtarkov:87:universal}, \[ \min_{\hat{P}} \max_{X^n} \log \frac{P(X^n \mid \hat{\ensuremath{\bm{\theta}}\xspace}(X^n, \ensuremath{\mathcal{M}}\xspace))}{\hat{P}(X^n \mid \ensuremath{\mathcal{M}}\xspace)} \; . \] That is, for \emph{any} data $X^n$, $\pnml{X^n \mid \ensuremath{\mathcal{M}}\xspace}$ assigns a probability, which differs from the highest achievable probability within the model class --- the maximum likelihood $P(X^n \mid \hat{\ensuremath{\bm{\theta}}\xspace}(X^n, \ensuremath{\mathcal{M}}\xspace))$ --- by a constant factor $R(\ensuremath{\mathcal{M}}\xspace, n)$. In other words, the NML distribution is the \emph{mini-max optimal universal model} with respect to the model class~\cite{myung:06:nmltut}. The NML distribution represents the behaviour of all the distributions in the model class \ensuremath{\mathcal{M}}\xspace. Second, it also provides solution to another mini-max problem formulated by Rissanen~\cite{rissanen:01:optregret}, which is given by \[ \min_{\hat{P}} \max_{Q} E_{Q} \log \frac{P(X^n \mid \hat{\ensuremath{\bm{\theta}}\xspace}(X^n, \ensuremath{\mathcal{M}}\xspace))}{\hat{P}(X^n \mid \ensuremath{\mathcal{M}}\xspace)} \; , \] where $Q$ is the worst-case data generating distribution, and $E_{Q}$ is the expectation over $X^n$. That is, even if the true data generating distribution does not reside in the model class \ensuremath{\mathcal{M}}\xspace under consideration, $\pnml{X^n \mid \ensuremath{\mathcal{M}}\xspace}$ still gives the optimal encoding for the data $X^n$ relative to \ensuremath{\mathcal{M}}\xspace. These properties are very important and relevant when modelling real-world problems. In most cases, we do not know the true data generating distribution. In such cases, ideally we would want to encode our data as best as possible --- close to the optimal under the true distribution. The NML distribution provides a theoretically sound means for that. The \emph{stochastic complexity} of data $X^n$ relative to a model class \ensuremath{\mathcal{M}}\xspace using the NML distribution is defined as \begin{align} \scomp{X^n \mid \ensuremath{\mathcal{M}}\xspace} &= -\log \pnml{X^n \mid \ensuremath{\mathcal{M}}\xspace} \nonumber \\ &= -\log P(X^n \mid \hat{\ensuremath{\bm{\theta}}\xspace}(X^n, \ensuremath{\mathcal{M}}\xspace)) + \log R(\ensuremath{\mathcal{M}}\xspace, n) \; .\label{eq:sc} \end{align} The term $\log R(\ensuremath{\mathcal{M}}\xspace, n)$ is the \emph{parametric} complexity of the model class \ensuremath{\mathcal{M}}\xspace. It indicates how well \ensuremath{\mathcal{M}}\xspace can fit random data. The stochastic complexity of data under a model class \ensuremath{\mathcal{M}}\xspace gives the shortest description of the data relative to \ensuremath{\mathcal{M}}\xspace. Hence the richer the \ensuremath{\mathcal{M}}\xspace, the closer we are to Kolmogorov complexity. Intuitively, it is also the amount of information, in bits, in the data relative to the model class. Moreover, it is evident from the formulation that the stochastic complexity of data, relative to a model class, depends only on the data and the model class, but not on the particular way the models are specified. \subsection{Causal Inference by Stochastic Complexity} Unless stated otherwise, we write $X$ for $X^n$, and $Y$ for $Y^n$. The stochastic complexity of data $X$ relative to model class \ensuremath{\mathcal{M}}\xspace corresponds to the complexity of the NML distribution of the data relative to \ensuremath{\mathcal{M}}\xspace. This means we can use the stochastic complexity of $X$ as an approximation of the Kolmogorov complexity of $P(X)$. As such, it provides a general, yet computable, theoretically sound foundation for causal inference based on algorithmic information theory. For ease of notation, wherever clear from context we write $\scomp{X}$ for $\scomp{X \mid \ensuremath{\mathcal{M}}\xspace}$. To infer the causal direction, we look over total stochastic complexity in two directions --- $X$ to $Y$ and vice versa. The total stochastic complexity from $X$ to $Y$, approximating $K(P(X)) + K(P(Y \mid X))$ is given by \[ \ensuremath{\mathcal{S}_{\xtoy}}\xspace = \scomp{X} + \scomp{Y \mid X}\; , \] and that from $Y$ to $X$ is given by \[ \ensuremath{\mathcal{S}_{\ytox}}\xspace = \scomp{Y} + \scomp{X \mid Y}\; . \] Following Theorem~\ref{thm:joint_desc}, using the above indicators we arrive at the following causal inference rules. \begin{itemize} \item If $\ensuremath{\mathcal{S}_{\xtoy}}\xspace < \ensuremath{\mathcal{S}_{\ytox}}\xspace$, we infer \ensuremath{X \rightarrow Y}\xspace. \item If $\ensuremath{\mathcal{S}_{\xtoy}}\xspace > \ensuremath{\mathcal{S}_{\ytox}}\xspace$, we infer \ensuremath{Y \rightarrow X}\xspace. \item If $\ensuremath{\mathcal{S}_{\xtoy}}\xspace = \ensuremath{\mathcal{S}_{\ytox}}\xspace$, we are undecided. \end{itemize} That is, if describing $X$ and then describing $Y$ given $X$ is easier --- in terms of stochastic complexity --- than vice versa, we infer $X$ is likely the cause of $Y$. If it is the other way around, we infer $Y$ is likely the cause of $X$. If both ways of describing are the same, we remain undecided. We refer to this framework as \textsc{cisc}\xspace, which stands for causal inference by stochastic complexity. Causal inference using stochastic complexity has a number of powerful properties. First, unlike Kolmogorov complexity, stochastic complexity is computable. Second, the inference rule is generic in the sense that we are not restricted to one data type or distribution---we are only constrained by the model class $\ensuremath{\mathcal{M}}\xspace$ under consideration, yet by the mini-max property of NML we know that even if the data generating distribution is adversarial, we still identify the best encoding relative to $\ensuremath{\mathcal{M}}\xspace$. Next we discuss how can we instantiate \textsc{cisc}\xspace for discrete data. \subsection{Multinomial Stochastic Complexity} We consider discrete random variable $X$ with $m$ values. Furthermore we assume that our data $X^n = (x_1, \dots, x_n)$ is multinomially distributed. The space of observations \ensuremath{\mathcal{X}}\xspace is then $\{1, 2, \dots, m\}$. The multinomial model class $\ensuremath{\mathcal{M}_m}\xspace$ is defined as \[ \ensuremath{\mathcal{M}_m}\xspace = \{P(X \mid \ensuremath{\bm{\theta}}\xspace) : \ensuremath{\bm{\theta}}\xspace \in \ensuremath{\Theta}\xspace_m \} \; , \] where \ensuremath{\Theta}\xspace is the simplex-shaped parameter space given by \[ \ensuremath{\Theta}\xspace_m = \{\ensuremath{\bm{\theta}}\xspace = (\theta_1, \dots ,\theta_m) : \theta_j \geq 0, \theta_1 + \cdots + \theta_m = 1 \} \; , \] with $\theta_j = P(X=j \mid \ensuremath{\bm{\theta}}\xspace), j=1,\dots,m$. The maximum likelihood parameters for a multinomial distribution are given by $\hat{\ensuremath{\bm{\theta}}\xspace}(X^n, \ensuremath{\mathcal{M}_m}\xspace) = (h_1/n, \dots, h_m/n)$, where $h_j$ is the number of times an outcome $j$ is seen in $X^n$. Then the distribution induced by the maximum likelihood parameters for $X^n$ under the model class \ensuremath{\mathcal{M}_m}\xspace is given by \begin{align*} P(X^n \mid \hat{\ensuremath{\bm{\theta}}\xspace}(X^n, \ensuremath{\mathcal{M}_m}\xspace)) &= \prod\limits_{i=1}^{n} P(x_i \mid \hat{\ensuremath{\bm{\theta}}\xspace}(X^n, \ensuremath{\mathcal{M}_m}\xspace))\\ &= \prod_{j=1}^{m} \left ( \frac{h_j}{n} \right ) ^{h_j} \; . \end{align*} The normalizing term $R(\ensuremath{\mathcal{M}_m}\xspace, n)$ is given by \begin{align}\label{eq:norm_mnsc} R(\ensuremath{\mathcal{M}_m}\xspace, n) &= \sum\limits_{Y^n \in \ensuremath{\mathcal{X}}\xspace^n} P(Y^n \mid \hat{\ensuremath{\bm{\theta}}\xspace}(Y^n, \ensuremath{\mathcal{M}_m}\xspace)) \nonumber \\ &= \sum\limits_{h_1+\cdots+h_m=n} \frac{n!}{h_1!\cdots h_m!} \prod\limits_{j=1}^{m} \left ( \frac{h_j}{n} \right )^{h_j} \; . \end{align} Then the NML distribution for $X^n$ under the model class \ensuremath{\mathcal{M}_m}\xspace is given by \begin{align*} \pnml{X^n \mid \ensuremath{\mathcal{M}_m}\xspace} = \frac{\prod_{j=1}^{m} (h_j/n)^{h_j}}{R(\ensuremath{\mathcal{M}_m}\xspace, n)} \; . \end{align*} Then the stochastic complexity of $X^n$ for the model class \ensuremath{\mathcal{M}_m}\xspace is given by \begin{align}\label{eq:mnsc} \scomp{X^n \mid \ensuremath{\mathcal{M}_m}\xspace} &= -\log \prod_{j=1}^{m} (h_j/n)^{h_j} + \log R(\ensuremath{\mathcal{M}_m}\xspace, n) \nonumber \\ & = \sum_{j=1}^{m} h_j ( \log n - \log h_j) + \log R(\ensuremath{\mathcal{M}_m}\xspace, n) \nonumber \\ &= n \log n - \sum_{j=1}^{m} h_j \log h_j + \log R(\ensuremath{\mathcal{M}_m}\xspace, n) \; . \end{align} \textbf{Computational Complexity ---} We can compute the counts $h_j$ in $\bigo{n}$ by going through the data once. However, computing the normalizing sum (Equation~\ref{eq:norm_mnsc}), and hence the parametric complexity, is exponential in the number of values $m$. As a result, the computational complexity of the multinomial stochastic complexity (Equation~\ref{eq:mnsc}) is dominated by by computation time of the normalizing sum. However, we can approximate the normalising sum up to a finite floating-point precision in \emph{sub-linear} time with respect to the data size $n$ given precomputed counts $h_i$~\cite{mononen:08:sublinearsc}. More precisely, the computational complexity of the sub-linear algorithm is $\bigo{\sqrt{dn}+m}$, where $d$ is the floating-point precision in digits. In the experiments we use $d=10$. Altogether we can compute the multinomial stochastic complexity in $\bigo{n}$. \subsection{Computing Conditional Complexity} So far we only discussed how to compute the stochastic complexity of data under a model class. For our purpose, we also need to compute the conditional stochastic complexity $\scomp{Y \mid X}$ and vice versa. Let $\scomp{Y \mid X=x}$ be the stochastic complexity of $Y$ conditioned on $X=x$. Then the conditional stochastic complexity $\scomp{Y \mid X}$ is the sum of $\scomp{Y \mid X=x}$ over all possible values of $X$. Let \ensuremath{\mathcal{X}}\xspace be the domain of $X$. Then the stochastic complexity of $Y$ given $X$ is defined as \begin{align*} \scomp{Y \mid X} = \sum_{x \in \ensuremath{\mathcal{X}}\xspace} \scomp{Y \mid X = x} \; . \end{align*} \textbf{Computational Complexity ---} We can compute $\scomp{Y \mid X=x}$ in $\bigo{n}$. To compute the conditional stochastic complexity $\scomp{Y \mid X}$, we have to compute $\scomp{Y \mid X=x}$ over all $x \in \ensuremath{\mathcal{X}}\xspace$. Hence the computational complexity of conditional stochastic complexity is $\bigo{n|\ensuremath{\mathcal{X}}\xspace|}$. Likewise, for $\scomp{X \mid Y}$, we have $\bigo{n|\ensuremath{\mathcal{Y}}\xspace|}$. Altogether the computational complexity of \textsc{cisc}\xspace is $\bigo{n \max(|\ensuremath{\mathcal{X}}\xspace|, |\ensuremath{\mathcal{Y}}\xspace|)}$. \section{Related Work}\label{sec:rel} Inferring causal direction from observational data is a challenging task due to the lack of controlled randomised experiments. However, it has also attracted quite a lot of attention over the years~\cite{pearl:00:book,spirtes:00:book,shimizu:06:anm,janzing:10:algomarkov}. Yet, most of the causal inference frameworks are built for continuous real-valued data. Constraint-based approaches like conditional independence test~\cite{spirtes:00:book,pearl:00:book} are one of the widely used causal inference frameworks. However, they require at least three observed random variables. Therefore they cannot distinguish between \ensuremath{X \rightarrow Y}\xspace and \ensuremath{Y \rightarrow X}\xspace as the factorization of the joint distribution $P(X, Y)$ is the same in both direction, i.e.\ $P(X) P(Y\mid X) = P(Y) P(X \mid Y)$. In recent years, several methods have been proposed that exploit the sophisticated properties of the joint distribution. The linear trace method~\cite{janzing:10:ltr,zscheischler:11:letr} infers linear causal relations of the form $Y = AX$, where $A$ is the structure matrix that maps the cause to the effect, using the linear trace condition. The kernelized trace method~\cite{chen:13:ktr} can infer non-linear causal relations, but requires the causal relation to be deterministic, functional, and invertible. In contrast, we do not make any assumptions on the causal relation between the variables. One of the key frameworks for causal inference are the Additive Noise Models (ANMs)~\cite{shimizu:06:anm}. ANMs assume that the effect is a function of the cause and the additive noise that is independent of the cause. Causal inference is then done by finding the direction that admits such a model. Over the years, many frameworks for causal inference from real-valued data have been proposed using ANMs~\cite{shimizu:06:anm,hoyer:09:nonlinear,zhang:09:ipcm,peters:14:continuousanm}. Algorithmic information theory provides a sound general theoretical foundation for causal inference~\cite{janzing:10:algomarkov}. The key idea is that if $X$ causes $Y$, the shortest description of the joint distribution $P(X, Y)$ is given by the separate descriptions of the distributions $P(X)$ and $P(Y \mid X)$~\cite{janzing:10:algomarkov}. It has also been used in justifying the additive noise model based causal discovery~\cite{janzing:10:justifyanm}. However, as Kolmogorov complexity is not computable, practical instantiations require computable notions of independence. For instance, the information-geometric approach~\cite{janzing:12:igci} defines independence via orthogonality in information space. \textsc{Cure}~\cite{sgouristsa:15:cure} defines independence in terms of the accuracy of the estimations of $P(Y \mid X)$ and $P(X \mid Y)$. Using algorithmic information theory, Vreeken~\cite{vreeken:15:ergo} proposes a causal framework based on relative conditional complexity and instantiates it with cumulative entropy to infer the causal direction in continuous real-valued data. Budhathoki \& Vreeken~\cite{budhathoki:16:origo} propose a decision tree based approach for causal inference on univariate and multivariate binary data. All above methods consider either continuous real-valued or binary data. Causal inference from discrete data has received much less attention. Peters et al.~\cite{peters:10:discreteanm} (\textsc{dr}\xspace) extend additive noise models to discrete data. However regression is not ideal for modelling categorical variables, and as it relies on the dependence measure, the choice of which affects the outcome. Liu \& Chan~\cite{liu:16:dc} (\textsc{dc}\xspace) define independence in terms of the distance correlation between empirical distributions $P(X)$ and $P(Y \mid X)$ to infer the causal direction from categorical data. As such, it does not look over all possible space of the observed samples and hence overfits. In contrast, we look over all possible space of the observed samples. Moreover, we provide a general, yet computable, theory for causal inference that is applicable to any type of data. In particular, we directly approximate Kolmogorov complexity using a score that is mini-max optimal with regard to the model class under consideration. The computational complexity of our instantiation, \textsc{cisc}\xspace, is linear in sample size, regardless of the domain of the variables. In the experiments, we consider both \textsc{dc}\xspace and \textsc{dr}\xspace for comparison. \section{Experiments}\label{sec:exps} We implemented \textsc{cisc}\xspace in Python and provide the source code for research purposes, along with the used datasets, and synthetic dataset generator.\!\footnote{\url{http://eda.mmci.uni-saarland.de/cisc/}} All experiments were executed single-threaded on Intel Xeon E5-2643 v3 machine with $256$GB memory running Linux. We consider synthetic, benchmark, and real-world data. In particular, we note that \textsc{cisc}\xspace is parameter-free. We compare \textsc{cisc}\xspace against Discrete Regression (\textsc{dr}\xspace)~\cite{peters:10:discreteanm}, and \textsc{dc}\xspace~\cite{liu:16:dc}. In particular, we use significance level of $\alpha=0.05$ for the independence test in \textsc{dr}\xspace, and threshold of $\epsilon = 0.0$ for \textsc{dc}\xspace. \subsection{Synthetic Data}\label{subsec:synthetic_data} To evaluate \textsc{cisc}\xspace on the data with known ground truth, we consider synthetic data. Generating non-trivial synthetic data with identifiable causal direction is surprisingly difficult, though.\!\footnote{Ideally we would generate data with known $K(P(X))+K(P(Y\mid X)) < K(P(Y))+K(P(X\mid Y))$, and evaluate our inference methods accordingly, yet as Kolmogorov complexity is not computable it is not apparent how to do this in general.} We generate synthetic cause-effect pairs with ground truth \ensuremath{X \rightarrow Y}\xspace using the additive noise model (ANM). That is, first we generate the cause $X$, and then generate the effect $Y$ using the model given by \[ Y = f(X) + N, N \ensuremath{\perp \! \! \! \perp}\xspace X \; , \] where $f$ is a function, and $N$ is additive noise that is independent of $X$. Following \citet{peters:10:discreteanm}, we sample $X$ from the following distributions, using independently generated uniform noise. \begin{itemize} \item uniform from $\{1, \dots, L\}$, \item binomial with parameters $(n, p)$, \item geometric with parameter $p$, \item hypergeometric with parameters $(M, K, N)$, \item poisson with parameter $\lambda$, \item negative binomial with parameters $(n, p)$, and \item multinomial with parameters $\ensuremath{\bm{\theta}}\xspace$. \end{itemize} We note that even though we generate data following ANM from $X$ to $Y$, the joint distribution $P(X, Y)$ might admit an additive noise model in the reverse direction. Therefore in some cases where we say that \ensuremath{X \rightarrow Y}\xspace is the true direction, \ensuremath{Y \rightarrow X}\xspace might also be equally plausible, and hence full accuracy might not be achievable in some cases. However, this happens in only few trivial instances~\cite{peters:10:discreteanm}. We choose parameters of the distributions randomly for each model class. We choose $L$ uniformly between $1$ and $10$, $M, K$ uniformly between $1$ and $40$, $N$ uniformly between $1$ and $\min(41, M + K)$, $p$ uniformly between $0.1$ and $0.9$, $\lambda$ uniformly between $1$ and $10$, $\ensuremath{\bm{\theta}}\xspace$ randomly s.t. $\sum_{\theta \in \ensuremath{\bm{\theta}}\xspace} \theta = 1.0$, function $f(x)$ uniformly between $-7$ to $+7$, and noise $N$ uniformly between $-t$ to $+t$, where $t$ is uniformly randomly chosen between $1$ and $7$. \textbf{Accuracy ---} From each model class, we sample $1000$ different models, and hence $1000$ different cause-effect pairs. For each model, we sample $1000$ points, i.e.\ $n=1000$. In Figure~\ref{fig:acc_synth_data}, we compare the \emph{accuracy} (percentage of correct decisions) of \textsc{cisc}\xspace against \textsc{dc}\xspace and \textsc{dr}\xspace for various model classes. We see that \textsc{cisc}\xspace either outperforms or is as good as the other methods in all but one case. This certainly proves the generality of \textsc{cisc}\xspace. Although we compute the stochastic complexity under multinomial model class, we are still able to perform as good with other model classes. This is due to the optimality property of the NML distribution -- even though the true data generating distribution is not inside the model class \ensuremath{\mathcal{M}}\xspace under consideration, the NML distribution still gives the optimal encoding relative to \ensuremath{\mathcal{M}}\xspace. And as we see, it works well in most cases. \begin{figure}[t] \centering \ifgenplot \ifpdf \tikzsetnextfilename{accuracy_synth} \fi \begin{tikzpicture} \begin{axis}[eda ybar, width=0.75\columnwidth, height=4.5cm, ymin=0.0, ymax=1.0, enlarge x limits=0.08, bar width=0.59em, xlabel=data distribution, ylabel=accuracy, x label style={yshift=-20pt}, y label style={yshift=-25pt}, xtick=data, symbolic x coords={uniform,binomial,geometric,hypergeometric,poisson,multinomial,negativeBinomial}, x tick label style={rotate=45, anchor=east, align=right, yshift=-2pt}, legend style={anchor=south, at={(0.087,0.02)}}, legend image post style={scale=0.6}, ] \pgfplotsinvokeforeach{1,...,3}{ \addplot table[x index=0, y index=#1, header=false] {../expres/accuracy_synthetic.dat}; } \legend{\textsc{dc}\xspace, \textsc{dr}\xspace,\textsc{cisc}\xspace}; \end{axis} \end{tikzpicture} \else \includegraphics[]{accuracy_synth.pdf} \fi \caption{Accuracy on synthetic cause-effect pairs sampled from different distributions.} \label{fig:acc_synth_data} \end{figure} \textbf{Decision Rate ---} Next we investigate the accuracy of \textsc{cisc}\xspace against the fraction of decisions \textsc{cisc}\xspace is forced to make. To this end, for each model class, we sample $1000$ different cause-effect pairs. For each cause-effect pair, we sample $1000$ points. We sort the pairs by their absolute score difference in two directions (\ensuremath{X \rightarrow Y}\xspace vs. \ensuremath{Y \rightarrow X}\xspace), i.e.\ $|\ensuremath{\mathcal{S}_{\xtoy}}\xspace - \ensuremath{\mathcal{S}_{\ytox}}\xspace|$ in descending order. Then we compute the accuracy over top-$k\%$ pairs. The decision rate is the fraction of \emph{top} cause-effect pairs that we consider. Alternatively, it is also the fraction of cause-effect pairs whose $|\ensuremath{\mathcal{S}_{\xtoy}}\xspace - \ensuremath{\mathcal{S}_{\ytox}}\xspace|$ is greater than some threshold $\delta$. For undecided pairs, we flip the coin. For other methods, we follow the similar procedure with their respective absolute score difference. In Figure~\ref{fig:dec_rate_synthetic}, we show the decision rate versus accuracy for different model classes. We see that both \textsc{cisc}\xspace and \textsc{dr}\xspace are highly accurate up to a very high decision rate in all cases. Both \textsc{cisc}\xspace and \textsc{dr}\xspace are highly accurate on the cause-effect pairs where the absolute score difference is very high --- where the methods are most decisive. \textsc{dc}\xspace, on the other hand, doesn't perform well in all cases. The only setting where $\textsc{dc}\xspace$ has a relatively good performance is in the family of Uniform distributions. The results indicate that we can increase the threshold $\delta$, and hence the decision rate, for higher accuracy. \begin{figure*}[tb] \begin{minipage}[t]{0.33\linewidth} \centering \ifgenplot \ifpdf \tikzsetnextfilename{dec_rate_synth_uniform} \fi \begin{tikzpicture} \begin{axis}[eda line, height=3cm, width=\textwidth, xlabel=decision rate,ylabel=accuracy, ymin=0.0, ymax=1.0, xmin=0.0, xmax=1.0, legend style={nodes={scale=0.8, transform shape}, at={(0.91,0.4)}, anchor=north}, ytick={0.0,0.25, ...,1.0}] \pgfplotsinvokeforeach{1,...,3}{ \addplot table[x index=0, y index=#1, header=false] {../expres/dec_rate_synth_uniform.dat}; } \legend{\textsc{dc}\xspace, \textsc{dr}\xspace, \textsc{cisc}\xspace} \end{axis} \end{tikzpicture} \else \includegraphics[]{dec_rate_synth_uniform.pdf} \fi \vspace{-0.4cm} \subcaption{Uniform}\label{fig:dec_rate_uniform} \end{minipage} \begin{minipage}[t]{0.33\linewidth} \centering \ifgenplot \ifpdf \tikzsetnextfilename{dec_rate_synth_binom} \fi \begin{tikzpicture} \begin{axis}[eda line, xlabel=decision rate,ylabel=accuracy, ymin=0.0, ymax=1.0, height=3cm, width=\textwidth, xmin=0.0, xmax=1.0, ytick={0.0,0.25, ...,1.0}] \pgfplotsinvokeforeach{1,...,3}{ \addplot table[x index=0, y index=#1, header=false] {../expres/dec_rate_synth_binomial.dat}; } \end{axis} \end{tikzpicture} \else \includegraphics{dec_rate_synth_binom} \fi \vspace{-0.4cm} \subcaption{Binomial}\label{fig:dec_rate_binomial} \end{minipage} \begin{minipage}[t]{0.33\linewidth} \centering \ifgenplot \ifpdf \tikzsetnextfilename{dec_rate_synth_geometric} \fi \begin{tikzpicture} \begin{axis}[eda line, xlabel=decision rate,ylabel=accuracy, ymin=0.0, ymax=1.0, height=3cm, width=\textwidth, xmin=0.0, xmax=1.0, legend style={nodes={scale=0.8, transform shape}, at={(0.93,0.3)}, anchor=north},ytick={0.0,0.25, ...,1.0}] \pgfplotsinvokeforeach{1,...,3}{ \addplot table[x index=0, y index=#1, header=false] {../expres/dec_rate_synth_geometric.dat}; } \end{axis} \end{tikzpicture} \else \includegraphics{dec_rate_synth_geometric} \fi \vspace{-0.4cm} \subcaption{Geometric}\label{fig:dec_rate_geometric} \end{minipage} \par\medskip \begin{minipage}[t]{0.33\linewidth} \centering \ifgenplot \ifpdf \tikzsetnextfilename{dec_rate_synth_hypergeo} \fi \begin{tikzpicture} \begin{axis}[eda line, xlabel=decision rate,ylabel=accuracy, ymin=0.0, ymax=1.0, height=3cm, width=\textwidth, xmin=0.0, xmax=1.0, ytick={0.0,0.25, ...,1.0}] \pgfplotsinvokeforeach{1,...,3}{ \addplot table[x index=0, y index=#1, header=false] {../expres/dec_rate_synth_hypergeometric.dat}; } \end{axis} \end{tikzpicture} \else \includegraphics{dec_rate_synth_hypergeo} \fi \vspace{-0.4cm} \subcaption{Hypergeometric}\label{fig:dec_rate_hypergeometric} \end{minipage} \begin{minipage}[t]{0.33\linewidth} \centering \ifgenplot \ifpdf \tikzsetnextfilename{dec_rate_synth_multinom} \fi \begin{tikzpicture} \begin{axis}[eda line, xlabel=decision rate,ylabel=accuracy, ymin=0.0, ymax=1.0, height=3cm, width=\textwidth, xmin=0.0, xmax=1.0, ytick={0.0,0.25, ...,1.0}] \pgfplotsinvokeforeach{1,...,3}{ \addplot table[x index=0, y index=#1, header=false] {../expres/dec_rate_synth_multinomial.dat}; } \end{axis} \end{tikzpicture} \else \includegraphics{dec_rate_synth_multinom} \fi \vspace{-0.4cm} \subcaption{Multinomial}\label{fig:dec_rate_multinomial} \end{minipage} \begin{minipage}[t]{0.33\linewidth} \centering \ifgenplot \ifpdf \tikzsetnextfilename{dec_rate_synth_poisson} \fi \begin{tikzpicture} \begin{axis}[eda line, xlabel=decision rate,ylabel=accuracy, ymin=0.0, ymax=1.0, height=3cm, width=\textwidth, xmin=0.0, xmax=1.0, ytick={0.0,0.25, ...,1.0}] \pgfplotsinvokeforeach{1,...,3}{ \addplot table[x index=0, y index=#1, header=false] {../expres/dec_rate_synth_poisson.dat}; } \end{axis} \end{tikzpicture} \else \includegraphics{dec_rate_synth_poisson} \fi \vspace{-0.4cm} \subcaption{Poisson}\label{fig:dec_rate_poisson} \end{minipage} \caption{Accuracy against decision rate on synthetic cause-effect pairs sampled from different distributions.} \label{fig:dec_rate_synthetic} \end{figure*} \textbf{Scalability ---} Next we empirically investigate the scalability of \textsc{cisc}\xspace. First, we examine runtime with regard to the sample size. To this end, we fix the domain size of the cause-effect pairs to $20$, i.e.\ $|\ensuremath{\mathcal{X}}\xspace| = |\ensuremath{\mathcal{Y}}\xspace| = 20$. Then for a given sample size, we sample $X$ uniformly randomly between $1$ and $|\ensuremath{\mathcal{X}}\xspace|$. Likewise for $Y$. In Figure~\ref{fig:size_vs_runtime}, we show the runtime of \textsc{cisc}\xspace, \textsc{dc}\xspace, and \textsc{dr}\xspace for various sample sizes. We observe that both \textsc{cisc}\xspace and \textsc{dc}\xspace (overlapping line) finish within seconds. \textsc{dr}\xspace, on the other hand, takes in the order of hours. Next we fix the sample size to $n = 100\,000$ and vary the domain size $|\ensuremath{\mathcal{X}}\xspace|=|\ensuremath{\mathcal{Y}}\xspace|$. We observe that both \textsc{cisc}\xspace and \textsc{dc}\xspace again finish within seconds over the whole range. As \textsc{dr}\xspace iteratively searches over the entire domain, it shows a non-linear runtime behaviour with respect to the domain size. Overall, these results indicate that \textsc{dr}\xspace is fairly accurate, but relatively slow. \textsc{dc}\xspace, on the other hand, is fast, yet inaccurate. \textsc{cisc}\xspace is both highly accurate, and fast. \begin{figure}[tb] \begin{minipage}[t]{0.45\columnwidth} \centering \ifgenplot \ifpdf \tikzsetnextfilename{size_vs_runtime} \fi \begin{tikzpicture} \begin{axis}[eda line, ylabel={runtime (s)}, xlabel={sample size ($n$)}, width=\textwidth, legend style={nodes={scale=0.8, transform shape}, at={(0.84,0.6)}, anchor=north}, y label style={yshift=0pt}, scaled x ticks=base 10:-6, x tick scale label style={xshift=15pt, yshift=10pt}, scaled y ticks=base 10:-3, y tick scale label style={xshift=-10pt}, ytick={0, 2000, ..., 8000}, ymax=8000 ] \pgfplotsinvokeforeach{1,...,3}{ \addplot table[x index=0, y index=#1, header=false] {../expres/size_vs_runtime.dat}; } \legend{\textsc{dc}\xspace, \textsc{dr}\xspace, \textsc{cisc}\xspace} \end{axis} \end{tikzpicture} \else \includegraphics{size_vs_runtime} \fi \subcaption{Runtime against the sample size ($|\ensuremath{\mathcal{X}}\xspace|=|\ensuremath{\mathcal{Y}}\xspace|=20$).} \label{fig:size_vs_runtime} \end{minipage} \hspace{0.4cm} \begin{minipage}[t]{0.45\columnwidth} \centering \ifgenplot \ifpdf \tikzsetnextfilename{domain_vs_runtime} \fi \begin{tikzpicture} \begin{axis}[eda line, ylabel={runtime (s)}, xlabel={$|\ensuremath{\mathcal{X}}\xspace|=|\ensuremath{\mathcal{Y}}\xspace|$}, width=\textwidth, y label style={yshift=0pt}, ytick={0, 2000, ..., 6000}, ymax=6000, scaled y ticks=base 10:-3, y tick scale label style={xshift=0pt}] \pgfplotsinvokeforeach{1,...,3}{ \addplot table[x index=0, y index=#1, header=false] {../expres/domain_vs_runtime.dat}; } \end{axis} \end{tikzpicture} \else \includegraphics{domain_vs_runtime} \fi \subcaption{Runtime against the domain size ($n = 100\,000$).} \label{fig:domain_vs_runtime.dat} \end{minipage} \caption{Runtime of the frameworks against (a) sample size, and (b) domain size.} \end{figure} \subsection{Benchmark Data} Next we evaluate \textsc{cisc}\xspace on benchmark cause-effect pairs with known ground truth~\cite{mooij:16:pairs}. In particular, we take $95$ univariate cause-effect pairs. So far there does not exist a discretization strategy that provably preserves the causal relationship between variables. Since each cause-effect pair is from a different domain, using one discretization strategy over all the pairs is also unfair. Moreover, we do not know the underlying domain of the data. As a result, we treat the data as discrete for all the pairs. In Figure~\ref{fig:dec_rate_benchmark}, we compare the accuracy of \textsc{cisc}\xspace against \textsc{dc}\xspace and \textsc{dr}\xspace at various decision rate together with the $95\%$ confidence interval for a random coin flip. If we look over all the pairs, we find that \textsc{cisc}\xspace infers correct direction in roughly $67\%$ of all the pairs. When we consider only those pairs where \textsc{cisc}\xspace is most decisive---with a very high value of $|\scomp{\ensuremath{X \rightarrow Y}\xspace} - \scomp{\ensuremath{Y \rightarrow X}\xspace}|$, it is $100\%$ accurate on top $22\%$ of the pairs, $80\%$ accurate on top $45\%$ of the pairs, which is on-par with the top-performing causal inference frameworks for continuous real-valued data~\cite{sgouristsa:15:cure,janzing:12:igci}. On the other hand, the results from both \textsc{dc}\xspace and \textsc{dr}\xspace are insignificant at almost every decision rate. \begin{figure}[tb] \centering \ifgenplot \ifpdf \tikzsetnextfilename{dec_rate_benchmark} \fi \begin{tikzpicture} \begin{axis}[eda line, xlabel=decision rate,ylabel=accuracy, ymin=0.0, ymax=1.0, width=0.75\columnwidth, height=4.1cm, y label style={yshift=5pt}, xmin=0.0, xmax=1.0, legend style={nodes={scale=0.8, transform shape}, at={(0.94,0.22)}, anchor=north}] \pgfplotsinvokeforeach{1,...,3}{ \addplot table[x index=0, y index=#1, header=false] {../expres/dec_rate_benchmark.dat}; } \addplot[name path=f, color=verylightgray] table[x index = 0, y index = 7, header = false] {../expres/dec_rate_crack.dat}; \addplot[name path=g, color=verylightgray] table[x index = 0, y index = 8, header = false] {../expres/dec_rate_crack.dat}; \addplot [verylightgray ] fill between[of=f and g, soft clip={domain=0:1}]; \legend{\textsc{dc}\xspace, \textsc{dr}\xspace, \textsc{cisc}\xspace} \end{axis} \end{tikzpicture} \else \includegraphics{dec_rate_benchmark} \fi \caption{Accuracy against decision rate for univariate T{\"u}bingen cause-effect pairs. Gray area indicates the 95\% confidence interval for a random coin flip.} \label{fig:dec_rate_benchmark} \end{figure} \subsection{Qualitative Case Studies} Next we evaluate \textsc{cisc}\xspace on real-world data for exploratory purpose. \textbf{Abalone ---} First we consider the \emph{Abalone} dataset, which is available from the UCI machine learning repository.\!\footnote{\label{fnote:uci}\url{http://archive.ics.uci.edu/ml/}} The dataset contains the physical measurements of $4\,177$ abalones, which are large, edible sea snails. Out of the nine measurements, we consider the \emph{sex} ($X$), \emph{length} ($Y_1$), \emph{diameter} ($Y_2$), and \emph{height} ($Y_3$). The length, diameter, and height of the abalone are all measured in millimetres, and have $70$, $57$ and $28$ different values, respectively whereas the sex of the abalone is nominal ($\text{male}=1$, $\text{female}=2$, or $\text{infant}=3$). Following Peters et al.~\cite{peters:10:discreteanm}, we regard the data as discrete, and consider $X \rightarrow Y_1$, $X \rightarrow Y_2$, and $X \rightarrow Y_3$ as the ground truth as sex causes the size of the abalone and not the other way around. \textsc{cisc}\xspace infers correct direction in all three cases. \textbf{Car Evaluation ---} The Car Evaluation dataset is available from the UCI machine learning repository. It has $1728$ rows, and is derived from a hierarchical decision model. It contains the evaluation of a car for buying purpose based on six characteristics of the car. We consider the estimated \emph{safety} ($X$) of the car against the \emph{evaluation} ($Y$) of the car. The safety feature of the car takes a nominal value ($\text{low}=1$, $\text{medium}=2$, or $\text{high}=3$), and the evaluation feature of the car also takes a nominal value ($\text{unacceptable}=1$, $\text{acceptable}=2$, $\text{good}=3$, or $\text{very good}=4$). We regard \ensuremath{X \rightarrow Y}\xspace as the ground truth as safety of the car causes the decision on buying the car, but not vice versa. \textsc{cisc}\xspace identifies the correct direction. \textbf{Adult ---} The \emph{Adult} dataset is taken from the UCI machine learning repository and consists of $48\,832$ records from the census database of the US in $1994$. Out of $14$ attributes, we consider only three -- \emph{education} ($X_1$), \emph{occupation} ($X_2$), and \emph{income} ($Y$). The domain of \emph{education} attribute consists of \emph{dropout, associates, bachelors, doctorate, hs-graduate, masters}, and \emph{prof-school}. For \emph{occupation}, we have \emph{admin, armed-force, blue-collar, white-collar, service, sales, professional}, and \emph{other-occupation} as possible values. Lastly, for \emph{income} attribute, we have two values: \emph{\textgreater 50K} and \emph{\textless=50}. As intuitively education causes income, and not vice versa, we regard $X_1 \rightarrow Y$ as the ground truth. Similarly, as occupation causes income, we regard $X_2 \rightarrow Y$ as the ground truth. We run \textsc{cisc}\xspace on both pairs $(X_1, Y)$ and $(X_2, Y)$. We observe that for both pairs \textsc{cisc}\xspace infers the causal direction correctly. Overall, these results illustrate that \textsc{cisc}\xspace finds sensible causal directions from real-world data. \section{Discussion}\label{sec:disc} The experiments show that \textsc{cisc}\xspace works well in practice. \textsc{cisc}\xspace reliably identifies true causal direction regardless of the data distribution. It is remarkably fast. On benchmark data, it's performance is comparable to the state-of-the-art causal inference frameworks for continuous real-valued data. Moreover, the qualitative case studies show that the results are sensible. In this work, we give a general framework for causal inference based on the solid foundations of information theory. To apply the framework in practice, we just have to compute the stochastic complexity relative to a model class. The richer the model class, the better the solution. Although computing the stochastic complexity involves looking over all possible datasets, theoretically it is still computable, and there do exist efficient algorithms for certain model classes. The proposed framework lays a clear computable foundation for algorithmic causal inference principle postulated by Janzing \& Sch{\"o}lkopf~\cite{janzing:10:algomarkov}. Although the results show the strength of the proposed framework, and of \textsc{cisc}\xspace in particular, we see many possibilities to further improve. We instantiated the framework using multinomial stochastic complexity on discrete data. We see that \textsc{cisc}\xspace performs relatively well even in cases where the data is not sampled from the multinomial model class. This is due to the optimality property of the multinomial distribution --- even if the true data generating distribution is not inside the model class \ensuremath{\mathcal{M}}\xspace under consideration, the NML distribution still gives the optimal encoding for the data relative to \ensuremath{\mathcal{M}}\xspace. It would be an engaging future work to instantiate the framework for other types of data (e.g. continuous real-valued, mixed, etc.) and model classes (e.g. family of Gaussians, Dirichlets, etc.). The key aspect to study would be efficient algorithms for computing the stochastic complexity for such model classes. We define conditional stochastic complexity $\scomp{Y \mid X}$ as the sum of the stochastic complexities of $Y$ conditioned on $X=x$ over all $x$. This way we look over local stochastic complexities of parts of $Y$ relative to each value of $x$. Perhaps we can compute the conditional stochastic complexity globally relative to $X$. It would also be interesting to explore factorized normalized maximum likelihood models~\cite{roos:08:structure} to instantiate the framework for multivariate data~\cite{budhathoki:16:origo}. To infer the causal relationship between variables $X$ and $Y$, we assume that there is no confounding variable $Z$. It would be interesting to use the framework to additionally discover the confounding variables. The rough idea is that factorizing the joint complexity $P(X, Y)$ in presence of the confounding variable $Z$ leads to the smallest stochastic complexity compared to factorizing into $P(X)$ and $P(Y \mid X)$ or $P(Y)$ and $P(X \mid Y)$. Another avenue for future work would be to use the framework for causal discovery. The proposed framework infers causal relationship between given two variables $X$ and $Y$. It would be interesting to explore how the framework can be employed to discover (mine) the causal models \emph{directly} from the data. \section{Conclusion}\label{sec:concl} We considered causal inference from observational data. We proposed a general, yet \emph{computable} framework for information-theoretic causal inference with optimality guarantees. In particular, we proposed to perform causal inference by stochastic complexity. To illustrate the strength of this, we proposed \textsc{cisc}\xspace for pairs of univariate discrete variables, using stochastic complexity over the class of multinomial distributions. Extensive evaluation on synthetic, benchmark, and real-world data showed that \textsc{cisc}\xspace is highly accurate, outperforming the state of the art by a margin, and scales extremely well with regard to both sample and domain sizes. Future work includes considering richer model classes, as well as structure learning for the discovery of causal models from data. \begin{acks} Kailash Budhathoki is supported by the International Max Planck Research School for Computer Science. Both authors are supported by the Cluster of Excellence ``Multimodal Computing and Interaction'' within the Excellence Initiative of the German Federal Government. \end{acks} \bibliographystyle{ACM-Reference-Format}
1,108,101,565,824
arxiv
\section{#1}\setcounter{equation}{0}} \renewcommand{\theequation}{\thesection.\arabic{equation}} \newtheorem{Thm}{Theorem}[section] \newtheorem{Defi}[Thm]{Definition} \newtheorem{Cor}[Thm]{Corollary} \newtheorem{Lemma}[Thm]{Lemma} \newtheorem{Prop}[Thm]{Proposition} \newtheorem{Rem}[Thm]{Remark} \newtheorem{Conj}[Thm]{Conjecture} \newtheorem{Prelim}[Thm]{Preliminary} \newenvironment{thm}[0]{\begin{Thm}\noindent}% {\end{Thm}} \newenvironment{defi}[0]{\begin{Defi}\noindent\rm}% {\end{Defi}} \newenvironment{cor}[0]{\begin{Cor}\noindent}% {\end{Cor}} \newenvironment{lemma}[0]{\begin{Lemma}\noindent}% {\end{Lemma}} \newenvironment{prop}[0]{\begin{Prop}\noindent}% {\end{Prop}} \newenvironment{rem}[0]{\begin{Rem}\noindent\rm}% {\end{Rem}} \newenvironment{conj}[0]{\begin{Conj}\noindent}% {\end{Conj}} \newenvironment{prelim}[0]{\begin{Prelim}\noindent}% {\end{Prelim}} \def\par\noindent{\it Proof.}{\ }{\ }{\par\noindent{\it Proof.}{\ }{\ }} \def~\hfill$\square$\medbreak{~\hfill$\square$\medbreak} \def\medbreak\noindent{\medbreak\noindent} \def\text#1{\;\;\;\;{\rm \hbox{#1}}\;\;\;\;} \def\quad\quad{\quad\quad} \def\quad\quad\quad{\quad\quad\quad} \def\vspace{-1mm}\item[{\rm (a)}]{\vspace{-1mm}\item[{\rm (a)}]} \def\item[{\rm (b)}]{\item[{\rm (b)}]} \def\item[{\rm (c)}]{\item[{\rm (c)}]} \def\item[{\rm (d)}]{\item[{\rm (d)}]} \def\item[{\rm (e)}]{\item[{\rm (e)}]} \def\item[{\rm (f)}]{\item[{\rm (f)}]} \def\item[{\rm (g)}]{\item[{\rm (g)}]} \def\item[{\rm (h)}]{\item[{\rm (h)}]} \def\item[{\rm (i)}]{\item[{\rm (i)}]} \def\msy#1{{\mathbb #1}} \def{\msy C}{{\msy C}} \def{\msy N}{{\msy N}} \def{\msy Z}{{\msy Z}} \def{\msy R}{{\msy R}} \def{\msy D}{{\msy D}} \def{\msy T}{{\msy T}} \def\alpha{\alpha} \def\beta{\beta} \def\delta{\delta} \def\varepsilon{\varepsilon} \def\varphi{\varphi} \def\gamma{\gamma} \def\kappa{\kappa} \def\lambda{\lambda} \def\nu{\nu} \def\rho{\rho} \def\sigma{\sigma} \def\chi{\chi} \def\zeta{\zeta} \def\Delta{\Delta} \def\Phi{\Phi} \def\Lambda{\Lambda} \def\Sigma{\Sigma} \def\frak#1{\mathfrak #1} \def{\frak a}{{\frak a}} \def{\frak b}{{\frak b}} \def{\frak g}{{\frak g}} \def{\frak h}{{\frak h}} \def{\frak j}{{\frak j}} \def{\frak k}{{\frak k}} \def{\frak l}{{\frak l}} \def{\frak m}{{\frak m}} \def{\frak n}{{\frak n}} \def{\frak p}{{\frak p}} \def{\frak q}{{\frak q}} \def{\frak s}{{\frak s}} \def{\frak t}{{\frak t}} \def{\frak u}{{\frak u}} \def\Leftarrow{\Rightarrow} \def\rightarrow{\rightarrow} \def{\rm Re}\,{{\rm Re}\,} \def{\rm Im}\,{{\rm Im}\,} \def\inp#1#2{\langle#1\,,\,#2\rangle} \def\hinp#1#2{\langle#1\,|\,#2\rangle} \def{\rm Ad}{{\rm Ad}} \def{\rm End}{{\rm End}} \def{\rm Hom}{{\rm Hom}} \def{\rm I}{{\rm I}} \def{\rm ad}{{\rm ad}} \def\,{\scriptstyle\circ}\,{\,{\scriptstyle\circ}\,} \def\simeq{\simeq} \def{\rm pr}{{\rm pr}} \def\otimes{\otimes} \def\Leftarrow{\Leftarrow} \def\pijl#1{{\buildrel #1 \over \longrightarrow}} \def{\rm tr}\,{{\rm tr}\,} \def{\rm q}{{\rm q}} \def{\scriptscriptstyle \C}{{\scriptscriptstyle {\msy C}}} \def{\scriptscriptstyle \R}{{\scriptscriptstyle \R}} \def{\mathcal A}{{\mathcal A}} \def{\mathcal B}{{\mathcal B}} \def{\mathcal C}{{\mathcal C}} \def{\mathcal D}{{\mathcal D}} \def{\mathcal E}{{\mathcal E}} \def{\mathcal F}{{\mathcal F}} \def{\mathcal H}{{\mathcal H}} \def{\mathcal I}{{\mathcal I}} \def{\mathcal K}{{\mathcal K}} \def{\mathcal L}{{\mathcal L}} \def{\mathcal M}{{\mathcal M}} \def{\mathcal N}{{\mathcal N}} \def{\mathcal O}{{\mathcal O}} \def{\mathcal P}{{\mathcal P}} \def{\mathcal R}{{\mathcal R}} \def{\mathcal S}{{\mathcal S}} \def{\mathcal T}{{\mathcal T}} \def{\mathcal U}{{\mathcal U}} \def{\mathcal V}{{\mathcal V}} \def{\mathcal W}{{\mathcal W}} \def{\mathcal Y}{{\mathcal Y}} \def{\mathcal Z}{{\mathcal Z}} \def{\rm O}{{\rm O}} \def{\rm S}{{\rm S}} \def{\rm U}{{\rm U}} \def\rmS\rmU{{\rm S}{\rm U}} \def\rmS\rmO{{\rm S}{\rm O}} \def{\rm PW}{{\rm PW}} \def{\rm Exp}{{\rm Exp}} \begin{document} \setcounter{section}{0} \title{A local Paley--Wiener theorem for\\ compact symmetric spaces} \author{Gestur \'Olafsso \footnote{Research supported by NSF grant DMS-0402068} \ and Henrik Schlichtkrull} \date{26 April 2007} \maketitle \begin{abstract} The Fourier coefficients of a smooth $K$-invariant function on a compact symmetric space $M=U/K$ are given by integration of the function against the spherical functions. For functions with support in a neighborhood of the origin, we describe the size of the support by means of the exponential type of a holomorphic extension of the Fourier coefficients. \footnote{2000 Mathematics Subject Classification: 33C55, 43A85, 53C35} \end{abstract} \noindent \eqsection{Introduction} The classical Paley-Wiener theorem (also called the Paley-Wiener-Schwartz theorem) describes the image by the Fourier transform of the space of compactly supported smooth functions on ${\msy R}^n$. The theorem was generalized to Riemannian symmetric spaces of the noncompact type by Helgason and Gangolli (see \cite{GGA}, Thm.\ IV,7.1, \cite{Gang}), to semisimple Lie groups by Arthur (see \cite{Arthur}), and to pseudo-Riemannian reductive symmetric spaces by van den Ban and Schlichtkrull (see \cite{BanS}). More precisely, these theorems describe the Fourier image of the space of functions supported in a (generalized) ball of a given size. The image space consists of holomorphic (in the pseudo-Riemannian case, meromorphic) functions with exponential growth, and the size of the ball is reflected in the exponent of the exponential growth estimate. In this paper we present an analogue of these theorems for Riemannian symmetric spaces of the compact type. Obviously the compact support is trivial in this case, and the important issue is the determination of the {\it size} of the support of a smooth function from the growth property of its Fourier transform. Let us illustrate this by recalling the corresponding result for Fourier series. Consider a smooth $2\pi$-periodic function $f\colon {\msy T}={\msy R}/2\pi{\msy Z}\rightarrow{\msy C}$, and suppose that $f$ has support in $[-r,r]+2\pi{\msy Z}$, where $0<r<\pi$. We denote the space of such functions by $C^\infty_r({\msy T})$. The Fourier transform of $f$ is the Fourier coefficient map $n\mapsto \hat f(n)$ on ${\msy Z}$, where $$\hat f(n)=\frac1{2\pi} \int_{-\pi}^\pi f(e^{it}) e^{-int}\,dt,$$ and it extends to a holomorphic function on ${\msy C}$, defined by the same formula with $n$ replaced by $\lambda\in{\msy C}$. By the classical Paley-Wiener theorem for ${\msy R}$ this holomorphic extension has at most exponential growth of type $r$, and every holomorphic function on ${\msy C}$ of this type arises in this fashion from a unique function $f\in C^\infty_r({\msy T})$. It is this 'local' Paley-Wiener theorem for ${\msy T}$ that we generalize to an arbitrary Riemannian symmetric space $M$ of the compact type. We consider spherical functions on $M$, and the relevant transform is the spherical Fourier transform. The theorem presented here is known in some particular cases. In particular, it is known in the case of a compact Lie group $U$, viewed as a symmetric space for the product group $U\times U$ with the left$\times$right action. In this case, the theorem was obtained by Gonzalez (see \cite{Gon}) by a simple reduction to the Euclidean case by means of the Weyl character formula. This result of Gonzalez plays a crucial role in our proof, and it is recalled in Section \ref{s: central} below. Other cases in which the theorem is known, are as follows. If the symmetric space has rank one, the spherical Fourier transform can be expressed in terms of a Jacobi transform, for which the Paley-Wiener theorem has been obtained by Koornwinder (see \cite{TomK} p.\ 158). As an example, we treat the special case $S^2=\rmS\rmU(2)/\rmS\rmO(2)$ in the final section of this paper. In this case, the theorem of Koornwinder is due to Beurling (unpublished, see \cite{TomK}). If the symmetric space is of even multiplicity type, the local Paley-Wiener theorem has been achieved by Branson, \'Olafsson and Pasquale (see \cite{BOP}) by application of a holomorphic version of Opdam's differential shift operators (developed in \cite{Opd}, \cite{OlPa}). The method is strongly dependent on the assumption that the multiplicities are even. The theorem of Gonzalez is a particular case. Finally, the theorem was obtained recently by Camporesi for the complex Grassmann manifolds by reduction to the rank one case, see \cite{Camp}. We shall now give a brief outline of the paper. In Sections \ref{s: notation} and \ref{s: Fourier} we introduce the basic notations. In Section \ref{s: main thm} we define the relevant Paley-Wiener space and state the main theorem, that the Fourier transform is bijective onto this space. The proof, that it maps into the space is given in Section \ref{s: Opdam}. Here we rely on work of Opdam \cite{Opd}. The theorem of Gonzalez, mentioned above, is recalled in Section \ref{s: central}, and the central argument of the present paper, establishing surjectivity, is given in the following Sections \ref{s: K-invariant}-\ref{s: surjective}. An important ingredient is a result of Rais from \cite{Rais}, which has previously been applied in similar situations by Clozel and Delorme, \cite{CD1} Lemma 7, and by Flensted-Jensen \cite{FJ} p.\ 30. Finally, in Section \ref{s: sphere} we treat $S^2$ as an example. The result of \cite{BOP} has been generalized to the Jacobi transform associated to a root system with a multiplicity function which is even, but not necessarily related to a symmetric space (see \cite{BOP2}). For the method of the present paper the geometry of the symmetric space is crucial, especially in Lemma \ref{l: support}, and we do not see how to generalize in this direction. \eqsection{Basic notation} \label{s: notation} Let $M$ be a Riemannian symmetric space of the compact type. We can write $M$ as a homogeneous space $M=U/K$, where $U$ is a connected compact semisimple Lie group which acts isometrically on $M$, and $K$ a closed subgroup with the property that $U^\theta_0\subset K\subset U^\theta$ for an involution $\theta$ of $U$. Here $U^\theta$ denotes the subgroup of $\theta$-fixed points, and $U^\theta_0$ its identity component. It should be emphasized that the pair $(U,K)$ is in general not uniquely determined by $M$ (see \cite{Sig}, Ch. VII). Let ${\frak u}$ denote the Lie algebra of $U$. We denote the involution of ${\frak u}$ corresponding to $\theta$ by the same symbol. Let ${\frak u}={\frak k}\oplus{\frak q}$ be the corresponding Cartan decomposition, then ${\frak k}$ is the Lie algebra of $K$, and ${\frak q}$ can be identified with the tangent space $T_oM$, where $o=eK\in M$ is the origin. Recalling that the Killing form $B(X,Y)$ on ${\frak u}$ is negative definite, let $\langle\,\cdot\,,\,\cdot\,\rangle$ be the inner product on ${\frak u}$ defined by $\langle X,Y\rangle=-B(X,Y)$. Then ${\frak k}$ and ${\frak q}$ are orthogonal subspaces. We assume that the Riemannian metric $g$ of $M$ is normalized such that it agrees with $\langle\,\cdot\,,\,\cdot\,\rangle$ on ${\frak q}=T_oM$. We denote by $\exp$ the exponential map ${\frak u}\rightarrow U$ (which is surjective), and by ${\rm Exp}$ the map ${\frak q}\rightarrow M$ given by ${\rm Exp}(X)=\exp(X) \cdot o$. By identification of ${\frak q}$ with the tangent space $T_oM$, we thus identify ${\rm Exp}$ with the exponential map associated to the Riemannian connection. The inner product on ${\frak u}$ determines an inner product on the dual space ${\frak u}^*$ in a canonical fashion. Furthermore, these inner products have complex bilinear extensions to the complexifications ${\frak u}_{\msy C}$ and ${\frak u}_{\msy C}^*$. All these bilinear forms are denoted by the same symbol $\langle\,\cdot\,,\,\cdot\,\rangle$. Let ${\frak a}\subset{\frak q}$ be a maximal abelian subspace, ${\frak a}^*$ its dual space, and ${\frak a}^*_{\msy C}$ the complexified dual space. Let $\Sigma$ denote the set of non-zero (restricted) roots of ${\frak u}$ with respect to ${\frak a}$, then $\Sigma\subset{\frak a}^*_{\msy C}$ and all the elements of $\Sigma$ are purely imaginary on ${\frak a}$. The multiplicity of a root $\alpha\in\Sigma$ is denoted $m_\alpha$. The corresponding Weyl group, generated by the reflections in the roots, is denoted $W$. Recall that it is naturally isomorphic with the factor group $N_K({\frak a})/Z_K({\frak a})$ of the normalizer and the centralizer of ${\frak a}$ in $K$ (see \cite{Sig}, Cor.\ VII.2.13). \eqsection{Fourier series} \label{s: Fourier} Let $(\pi,V)$ be an irreducible unitary representation of $U$, and let $$V^K=\{v\in V\mid \forall k\in K: \pi(k)v=v\},$$ then $V^K$ is either $0$ or 1-dimensional. In the latter case $\pi$ is said to be a $K$-{\it spherical} representation. Let ${\frak h}\subset{\frak u}$ be a Cartan subalgebra containing ${\frak a}$, then ${\frak h}={\frak h}_m\oplus{\frak a}$, where ${\frak h}_m={\frak h}\cap{\frak k}$. Let $\Delta$ denote the set of roots of ${\frak u}$ with respect to ${\frak h}$, then $\Sigma$ is exactly the set of non-zero restrictions to ${\frak a}$ of elements of $\Delta$. We fix a set $\Sigma^+\subset\Sigma$ of positive restricted roots, and a compatible set $\Delta^+\subset\Delta$ of positive roots. The set of dominant integral linear functionals on ${\frak h}$ is $$\Lambda^+({\frak h})=\{\lambda\in{\frak h}_{\msy C}^*\mid \forall\alpha\in\Delta^+: \frac{2\langle\lambda,\alpha\rangle}{\langle\alpha,\alpha\rangle} \in{\msy Z}^+\},$$ where ${\msy Z}^+=\{0,1,2,\dots\}$. Notice that since ${\frak u}$ is compact, all elements of $\Delta$ and $\Lambda^+({\frak h})$ take purely imaginary values on on ${\frak h}$. Let $\Lambda^+(U)\subset{\frak h}^*$ denote the set of highest weights of irreducible representations of $U$, then $\Lambda^+(U)\subset\Lambda^+({\frak h})$ with equality if and only if $U$ is simply connected. Let $\Lambda^+_K(U)$ denote the subset of $\Lambda^+(U)$ which corresponds to $K$-spherical representations. We recall the following identification of $\Lambda^+_K(U)$, due to Helgason (see \cite{GGA}, p.\ 535). \begin{thm} \label{t: Helgason classification} Let $\lambda\in\Lambda^+(U)$. Then $\lambda\in\Lambda^+_K(U)$ if and only if $\lambda|_{{\frak h}_m}=0$ and the restriction $\mu=\lambda|_{{\frak a}}$ satisfies \begin{equation} \label{e: Helgason condition} \frac{\langle\mu,\alpha\rangle}{\langle\alpha,\alpha\rangle} \in{\msy Z}^+, \end{equation} for all $\alpha\in\Sigma^+$. Furthermore, if $\mu\in{\frak a}^*$ satisfies {\rm (\ref{e: Helgason condition})} for all $\alpha\in\Sigma^+$, then the element $\lambda\in{\frak h}^*_{\msy C}$ defined by $\lambda|_{{\frak h}_m}=0$ and $\lambda|_{{\frak a}}=\mu$ belongs to $\Lambda^+({\frak h})$. If this element $\lambda$ belongs to $\Lambda^+(U)$, then it belongs to $\Lambda^+_K(U)$. \end{thm} Let $\Lambda^+(U/K)$ denote the set of restrictions $\mu=\lambda|_{{\frak a}}$ where $\lambda\in\Lambda^+_K(U)$, according to the preceding theorem this set is in bijective correspondence with $\Lambda^+_K(U)$. For each $\mu\in\Lambda^+(U/K)$ we fix an irreducible unitary representation $(\pi_\mu,V_\mu)$ of $U$ with highest weight $\lambda$, and we fix a unit vector $e_\mu\in V_\mu^K$. The {\it spherical function} on $U/K$ associated with $\mu$ is the matrix coefficient $$\psi_\mu(x)=(\pi_\mu(x)e_\mu,e_\mu), \quad x\in U,$$ viewed as a function on $U/K$. It is $K$-invariant on both sides, and it is is independent of the choice of the unit vector $e_\mu$. The {\it spherical Fourier transform} of a continuous $K$-invariant function $f$ on $M=U/K$ is the function $\tilde f$ on $\Lambda^+(U/K)$ defined by $$\tilde f(\mu)=\int_M f(x)\overline{\psi_\mu(x)}\,dx,$$ where $dx$ is the Riemannian measure on $M$, normalized with total measure 1. Notice that $\overline{\psi_\mu(gK)} =\psi_\mu(g^{-1}K)$ for $g\in U$, since $\pi_\mu$ is unitary. The {\it spherical Fourier series} for $f$ is the series given by \begin{equation} \label{e: U/K Fourier series} \sum_{\mu\in\Lambda^+(U/K)} d(\mu)\tilde f(\mu)\psi_\mu \end{equation} where $d(\mu)=\dim V_\mu$. The Fourier series converges to $f$ in $L^2$ and, if $f$ is smooth, absolutely and uniformly (see \cite{GGA}, p.\ 538). Furthermore, $f$ is smooth if and only if the Fourier transform $\tilde f$ is {\it rapidly decreasing}, that is, for each $k\in{\msy N}$ there exists a constant $C_k$ such that $$|\tilde f(\mu)|\leq C(1+\|\mu\|)^{-k}$$ for all $\mu\in\Lambda^+(U/K)$ (see \cite{Sugiura}). \eqsection{Main theorem} \label{s: main thm} For each $r>0$ we denote by $B_r(0)$ the open ball in ${\frak q}$ centered at $0$ and with radius $r$. The exponential image ${\rm Exp} B_r(0)$ is the ball in $M$, centered at the origin and of radius $r$. Let $\bar B_r(0)$ and ${\rm Exp} \bar B_r(0)$ denote the corresponding closed balls. We denote by $C^\infty_r(U/K)^K$ the space of $K$-invariant smooth functions on $M=U/K$ supported in ${\rm Exp} \bar B_r(0)$. Let $\rho=\frac12\sum_{\alpha\in\Sigma^+} m_\alpha\alpha\in{\frak a}^*_{\msy C}$. \begin{defi} \label{d: PW space} (Paley-Wiener space) For $r>0$ let ${\rm PW}_r({\frak a})$ denote the space of holomorphic functions $\varphi$ on ${\frak a}_{\msy C}^*$ satisfying the following. \item{(a)} For each $k\in{\msy N}$ there exists a constant $C_k>0$ such that $$|\varphi(\lambda)|\leq C_k(1+\|\lambda\|)^{-k} e^{r\|{\rm Re}\,\lambda\|}$$ for all $\lambda\in{\frak a}_{\msy C}^*.$ \item{(b)} $\varphi(w(\lambda+\rho)-\rho)=\varphi(\lambda)$ for all $w\in W$, $\lambda\in{\frak a}_{\msy C}^*$. \end{defi} We can now state the main theorem. \begin{thm} {\rm (The local Paley--Wiener theorem)} \label{t: PW} There exists $R>0$ such that the following holds for each $0<r<R$. \smallskip {\rm (i)} Let $f\in C^\infty_r(U/K)^M$. Then the Fourier transform $\tilde f\colon \Lambda^+(U/K)\rightarrow{\msy C}$ of~$f$ extends to a function in ${\rm PW}_r({\frak a})$. {\rm (ii)} Let $\varphi\in{\rm PW}_r({\frak a})$. There exists a unique function $f\in C^\infty_r(U/K)^K$ such that $\tilde f(\mu)=\varphi(\mu)$ for all $\mu\in \Lambda^+(U/K)$. {\rm (iii)} The functions in the Paley-Wiener space ${\rm PW}_r({\frak a})$ are uniquely determined by their values on $\Lambda^+(U/K)$. \smallskip Thus the Fourier transform followed by the extension gives a bijection $$C^\infty_r(U/K)^K\rightarrow{\rm PW}_r({\frak a}).$$ \end{thm} \begin{rem}\label{r: best R} It would be reasonable to expect the theorem above to hold with $R$ equal to the injectivity radius of $M$, that is, the supremum of the values $r$ for which the restriction of ${\rm Exp}$ to $B_r(0)$ is a diffeomorphism onto its image. We have not been able to establish that. It should be noted that parts (i)-(iii) of the above theorem may be valid with different values of $R$. In fact, it can be seen from the proofs below, that if $U$ is simply connected, then part (i) of the theorem is valid with $R$ equal to half the injectivity radius of $M$. Furthermore, part (ii) will be established with $R$ equal to the injectivity radius of $U$. For part (iii) we need a possibly smaller value of $R$. \end{rem} \eqsection{The invariant differential operators} \label{s: D(U/K)} Let ${\msy D}(U/K)$ denote the algebra of $U$-invariant differential operators on $U/K$. It is commutative (see \cite{GGA}, Cor.\ II.5.4). Recall that the {\it Harish-Chandra homomorphism} maps $\gamma\colon {\msy D}(U/K)\rightarrow S({\frak a}^*)^W$. It can be defined as follows. Let ${\mathcal U}({\frak u})$ denote in the universal enveloping algebra of ${\frak u}$. The algebra ${\msy D}(U/K)$ is naturally isomorphic with the quotient ${\mathcal U}({\frak u})^K/{\mathcal U}({\frak u})^K\cap{\mathcal U}({\frak u}){\frak k}$, see \cite{GGA}, Thm.\ II.4.6. It follows from \cite{GGA} Thm.\ II.5.17 (by application to a symmetric pair of the non-compact type with Lie algebras ${\frak g}={\frak k}+i{\frak q}$ and ${\frak k}$), that there exists an isomorphism of the quotient ${\mathcal U}({\frak u})^{\frak k}/{\mathcal U}({\frak u})^{\frak k}\cap{\mathcal U}({\frak u}){\frak k}$ onto $S({\frak a}^*)^W$. The Harish-Chandra map results from composition of the two, using that ${\mathcal U}({\frak u})^K\subset{\mathcal U}({\frak u})^{\frak k}$. We shall need the following fact. \begin{lemma}\label{l: D(U/K)} The Harish-Chandra map $\gamma$ is an isomorphism onto $S({\frak a}^*)^W$. \end{lemma} \begin{proof} Let $K_0$ denote the identity component of $K$. It follows from the description of $\gamma$ above, that it suffices to prove equality between the quotients ${\mathcal U}({\frak u})^K/{\mathcal U}({\frak u})^K\cap{\mathcal U}({\frak u}){\frak k}$ and ${\mathcal U}({\frak u})^{\frak k}/{\mathcal U}({\frak u})^{\frak k}\cap{\mathcal U}({\frak u}){\frak k}= {\mathcal U}({\frak u})^{K_0}/{\mathcal U}({\frak u})^{K_0}\cap{\mathcal U}({\frak u}){\frak k}$. We shall employ \cite{GGA}, Cor.\ II.4.8, according to which the two quotients are in bijective linear correspondence with $S({\frak q})^K$ and $S({\frak q})^{K_0}$, respectively. It therefore suffices to prove identity between these two spaces. Let $p\in S({\frak q})^{K_0}$ and let $k\in K$. By means of the Killing form we regard $p$ as a polynomial function on ${\frak q}$. The claimed identity amounts to $p\circ{\rm Ad}k=p$. Notice that $p\circ{\rm Ad}k \in S({\frak q})^{K_0}$, since $k$ normalizes $K_0$. According to \cite{GGA}, Cor.\ II.5.12, the elements of $S({\frak q})^{K_0}$ are uniquely determined by restriction to ${\frak a}$. According to the lemma below, $k$ is a product of elements from $K_0$ and $Z_K({\frak a})$, and hence it follows that $p\circ{\rm Ad}k=p$ on ${\frak a}$. ~\hfill$\square$\medbreak\end{proof} \begin{lemma} Each component of $K$ contains an element from the centralizer $Z_K({\frak a})$. \end{lemma} \begin{proof} Let $k\in K$ be arbitrary. Then ${\rm Ad} k$ maps ${\frak a}$ to a maximal abelian subspace in ${\frak q}$, hence to ${\rm Ad} k_0({\frak a})$ for some $k_0\in K_0$. It follows that $k_0^{-1}k$ normalizes ${\frak a}$. The description of the Weyl group cited in the end of Section \ref{s: notation} implies that $N_K({\frak a})/Z_K({\frak a})=N_{K_0}({\frak a})/Z_{K_0}({\frak a})$, hence $k_0^{-1}k\in N_{K_0}({\frak a})Z_K({\frak a})$ and $k\in K_0Z_K({\frak a})$. ~\hfill$\square$\medbreak\end{proof} The spherical function $\psi_\mu$ satisfies the joint eigenequation \begin{equation} \label{e: eigenequation D} D\psi_\mu=\gamma(D,\mu+\rho)\psi_\mu,\quad\quad D\in{\msy D}(U/K) \end{equation} (see \cite{BOP} Lemma 2.5). It follows that $$ (Df)^\sim(\mu)= \overline{\gamma(D^*,\mu+\rho)}\tilde f(\mu) $$ where $D^*\in{\msy D}(U/K)$ is the adjoint of $D$. In particular, the Laplace-Beltrami operator $L$ on $M$ belongs to ${\msy D}(U/K)$, and we have $$ \gamma(L,\lambda)=\langle\lambda,\lambda\rangle- \langle\rho,\rho\rangle. $$ Since $L$ is self-adjoint it follows that \begin{equation} \label{e: eigenequation L} (Lf)^\sim(\mu)=(\langle\mu+\rho,\mu+\rho\rangle- \langle\rho,\rho\rangle)\tilde f(\mu) \end{equation} for all $f\in C^\infty(U/K)^K$. \eqsection{The estimate of Opdam} \label{s: Opdam} In this section we prove part (i) of Theorem \ref{t: PW}. The proof is based on the following result. Let $\bar\Omega$ be the closure of $$\Omega=\{ X\in{\frak a} \mid \forall\alpha\in\Sigma: |\alpha(X)|<\frac \pi2\}.$$ \begin{thm}% \label{t: estimate {\rm [Opdam]} For each $X\in \bar\Omega$ the map $$\mu\mapsto \psi_\mu({\rm Exp} X), \quad \mu\in\Lambda^+(U/K), $$ has an analytic continuation to ${\frak a}_{\msy C}^*$, denoted $\lambda\mapsto \psi_\lambda({\rm Exp} X)$, with the following properties. There exists a constant $C>0$ such that \begin{equation} \label{e: Opdam} |\psi_\lambda({\rm Exp} X)|\leq C \,e^{\max_{w\in W}{\rm Re}\, w\lambda(X)} \end{equation} for all $\lambda\in{\frak a}_{\msy C}^*$, $X\in\bar \Omega$. Furthermore, the map $X\mapsto \psi_\lambda({\rm Exp} X)$ is analytic, and \begin{equation} \label{e: W-inv} \psi_{w(\lambda+\rho)-\rho}({\rm Exp} X)=\psi_{\lambda}({\rm Exp} X) \end{equation} for all $w\in W$. \end{thm} \begin{proof} The existence of the analytic continuation follows from \cite{Opd} Theorem 3.15, by identification of $\psi_{\mu}({\rm Exp} X)$ with $G(\mu+\rho,k;X)$, where $G$ is the function appearing there. Recall that the root system $R$ in \cite{Opd} is $2\Sigma$. For the shift by $\rho$ and (\ref{e: W-inv}), see \cite{BOP}, Lemma 2.5. It follows from \cite{Opd} Theorem 6.1 (2) that the analytic extension satisfies (\ref{e: Opdam}). ~\hfill$\square$\medbreak\end{proof} \begin{rem} An analytic extension of $\psi_\mu({\rm Exp} X)$ exists for $X$ in the larger domain $2\Omega$. This was proved by Faraut (see \cite{BOP}, p.\ 418) and by Kr\"otz and Stanton (see \cite{KrSt}). However, the estimate (\ref{e: Opdam}) has not been obtained in this generality.\end{rem} We can now derive Theorem \ref{t: PW} (i). The following integration formula holds on $M=U/K$ (see \cite{GGA}, p.\ 190), up to normalization of measures: $$\int_{M} f(x)\,dx = \int_{K}\int_{A_*} f(ka\cdot o) \delta(a)\,da\,dk$$ where $A_*$ is the torus $\exp{\frak a}$ in $U$ equipped with Haar measure, and where $\delta$ is defined by $$\delta(\exp H)= \Pi_{\alpha\in\Sigma^+} |\sin i\alpha(H)|^{m_\alpha}$$ for $H\in{\frak a}$. It follows that $$ \tilde f(\mu)= \int_{A_*} f(a\cdot o) \psi_\mu(a^{-1}\cdot o) \delta(a)\,da.$$ Let $R>0$ be sufficiently small, such that the restriction of $\exp$ to $B_R(0)$ is injective, then if $r<R$ and $f$ is $K$-invariant with support inside ${\rm Exp} \bar B_r(0)$, it follows that \begin{equation} \label{e: tilde f(mu)} \tilde f(\mu)= \int_{B_r(0)\cap{\frak a}} f({\rm Exp} H) \psi_\mu({\rm Exp}(-H)) \delta(\exp H)\,dH. \end{equation} Assume in addition that $R\leq\pi/(2\|\alpha\|)$ for all $\alpha\in\Sigma$. Then $B_r(0)\cap{\frak a}\subset\Omega$ for $r<R$, and it follows from Theorem \ref{t: estimate} that $\mu\mapsto\tilde f(\mu)$ allows an analytic continuation to ${\frak a}^*_{\msy C}$, given by the same formula (\ref{e: tilde f(mu)}), and denoted $\tilde f(\lambda)$, such that \begin{equation} \label{e: est} |\tilde f(\lambda)| \leq C \max_{a\in A_*}\{|f(a\cdot o)\delta(a)|\}\, e^{r\|{\rm Re}\,\lambda\|} \end{equation} where $C$ is a constant depending on $r$, but not on $f$. The derivation of the polynomial decay of $\tilde f(\lambda)$ in (a) of Definition \ref{d: PW space} is then easily obtained from the estimate (\ref{e: est}), when applied to the function $L^mf$ with a sufficiently high power of $L$, by means of (\ref{e: eigenequation L}). The Weyl group transformation property in part (b) of Definition \ref{d: PW space} follows immediately from (\ref{e: W-inv}). Hence we can conclude that $\tilde f(\lambda)$ belongs to ${\rm PW}_r({\frak a})$. \eqsection{Uniqueness} \label{s: carlson} In this section part (iii) of Theorem \ref{t: PW} is proved. The proof is based on the following simple generalization of Carlson's theorem (see \cite{Boas} p.\ 153). \begin{lemma} \label{l: Carlson} Let $f\colon{\msy C}^n\rightarrow{\msy C}$ be holomorphic. Assume: \smallskip {\rm (i)} There exist a constant $c<\pi$, and for each $z\in{\msy C}^n$ a constant $C$ such that $$|f(z+\zeta e_i)|\leq Ce^{c|\zeta|}$$ for all $\zeta\in{\msy C}$, $i=1,\dots,n$. {\rm (ii)} $f(k)=0$ for all $k\in({\msy Z}^{+})^n$. \smallskip\noindent Then $f=0$. \end{lemma} \begin{proof} For $n=1$ this is Carlson's theorem. In general it follows by induction that $z\mapsto f(z,\kappa)$ is identically $0$ on ${\msy C}^{n-1}$ for each $\kappa\in{\msy Z}^+$. By a second application of Carlson's theorem it then follows that $f(z,\zeta)=0$ for all $(z,\zeta)\in{\msy C}^n$. ~\hfill$\square$\medbreak\end{proof} It follows that if $X$ is sufficiently close to 0, then the analytic continuation $\lambda\mapsto \psi_\lambda({\rm Exp} X)$ in Theorem \ref{t: estimate} is unique, when (\ref{e: Opdam}) is required. More precisely, let $\mu_1,\dots,\mu_n\in{\frak a}^*_{\msy C}$ be such that $\Lambda^+(U/K)={\msy Z}^+\mu_1+\dots+{\msy Z}^+\mu_n$. If $U$ is simply connected the elements $\mu_1,\dots,\mu_n\in{\frak a}^*_{\msy C}$ are the fundamental weights determined by $$\frac{\langle \mu_i,\alpha_j\rangle}{\langle\alpha_j,\alpha_j\rangle} =\delta_{ij}$$ for the simple roots $\alpha_1,\dots,\alpha_n$ of $\Sigma^+$. If $U$ is not simply connected, the $\mu_i$ are suitable integral multiples of the fundamental weights, in order that they correspond to representations of $U$. If $\|X\|<\pi/\|\mu_i\|$ for all $i$ the uniqueness of the analytic continuation now follows by application of Lemma \ref{l: Carlson} to the function $z\mapsto f(z_1\mu_1+\dots+z_n\mu_n)$. In the same fashion, if $R\leq\pi/\|\mu_i\|$ for all $i$, it follows from Lemma \ref{l: Carlson} that for $r<R$ the elements $\varphi\in{\rm PW}_r({\frak a})$ are uniquely determined on $\Lambda^+(U/K)$, as claimed in Theorem \ref{t: PW} (iii). Notice that the minimal value of $\pi/\|\mu_i\|$ can be strictly smaller than the injectivity radius of $U/K$. See Remark \ref{r: best R}. \eqsection{The theorem of Gonzalez} \label{s: central} In this section we treat the special case, where the symmetric space is the compact semisimple Lie group $U$ itself, viewed as a symmetric space for the product group $U\times U$ with the action given by $(g,h)\cdot x=gxh^{-1}$. The stabilizer at $e$ is the diagonal subgroup $\Delta=\{(x,x)\mid x\in U\}$ in $U\times U$, and the corresponding involution of $U\times U$ is $(x,y)\mapsto (y,x)$. The $\Delta$-invariant functions on $U$ are the class functions (also called central functions), that is, those for which $f(uxu^{-1})=f(x)$ for all $u,x\in U$. In this case the local Paley-Wiener theorem was obtained by Gonzalez \cite{Gon}. Let us recall his result. As before, we denote by ${\frak h}$ a Cartan subalgebra of ${\frak u}$, and by $\Lambda^+({\frak h})\subset i{\frak h}^*$ the set of dominant integral linear functionals. For $\mu\in\Lambda^+(U)$ we denote by $\chi_\mu$ the character of $\pi_\mu$, that is, $\chi_\mu(x)$ is the trace of $\pi_\mu(x)$ for $x\in U$. The function $d(\mu)^{-1}\chi_\mu$ is normalized so that its value at $e$ is 1, and when $U$ is viewed as a symmetric space, this class function is exactly the spherical function associated with $\pi_\mu$. It is however more convenient to use the unnormalized function $\chi_\mu$ in the definition of the Fourier transform, since it is a unit vector in $L^2$ (with the normalized Haar measure on $U$). Following custom, we thus define the Fourier transform by $$\hat F(\mu)= \langle F,\chi_\mu \rangle =\int_U F(u)\overline{\chi_\mu(u)}\,du, \quad \mu\in\Lambda^+(U),$$ for class functions $F\in L^2(U)^U$. The corresponding Fourier series is given by \begin{equation} \label{e: U Fourier series} \sum_{\mu\in\Lambda^+(U)} \hat F(\mu)\chi_\mu(x). \end{equation} It converges to $F$ in $L^2$. If $F$ is smooth it also converges absolutely and uniformly (see \cite{GGA} p.\ 534). The theorem of Gonzalez \cite{Gon} now reads as follows. Let $R>0$ be the injectivity radius of $U$. If $U$ is simply connected, this means that $R=2\pi/\|\alpha\|$ where $\alpha$ is the longest root in $\Delta$ (see \cite{Sig} p.\ 318). \begin{thm}\label{t: Gonzalez}{\rm [Gonzalez]} Let a class function $F\in C^\infty(U)^U$ be given, and let $0<r<R$. Then $F$ belongs to $C^\infty_r(U)^U$ if and only if the Fourier transform $\mu\mapsto \hat F(\mu)$ extends to a holomorphic function $\Phi$ on ${\frak h}^*_{\msy C}$ with the following properties \smallskip \item{\rm (a)} For each $k\in{\msy N}$ there exists a constant $C_k>0$ such that $$|\Phi(\lambda)|\leq C_k(1+\|\lambda\|)^{-k} e^{r\|{\rm Re}\,\lambda\|}$$ for all $\lambda\in{\frak h}_{\msy C}^*.$ \item{\rm (b)} $\Phi(w(\lambda+\rho)-\rho)=\det(w)\Phi(\lambda)$ for all $w\in W$, $\lambda\in{\frak h}_{\msy C}^*$. \end{thm} Notice that as before the extension $\Phi$ is unique if $r$ is sufficiently small. In that case, the Fourier transform, followed by holomorphic extension, is then a bijection onto the space of holomorphic functions satisfying (a) and (b). \eqsection{Construction of $K$-invariant functions} \label{s: K-invariant} The following result is important for the proof of Theorem \ref{t: PW}. \begin{lemma} Let $F\in C^\infty(U)^U$ and define $f\colon U\rightarrow{\msy C}$ by $$f(u)=\int_K F(ku) \,dk=\int_K F(uk)\, dk.$$ Then $f\in C^\infty(U/K)^K$ and \begin{equation} \label{e: Fourier coefficients} d(\mu)\tilde f(\mu)=\hat F(\lambda) \end{equation} for all $\mu\in\Lambda^+(U/K)$, where $\lambda\in{\frak h}^*_{\msy C}$ is the extension of $\mu$ determined by $$\lambda|_{\frak a}=\mu,\quad\lambda|_{{\frak h}_m}=0.$$ \end{lemma} \begin{proof} The fact that $f\in C^\infty(U/K)^K$ is clear. From the uniform convergence of the Fourier series (\ref{e: U Fourier series}) it follows that $$f(u)= \sum_{\lambda\in\Lambda^+(U)} \hat F(\lambda) \int_K\chi_\lambda(uk)\,dk. $$ By the lemma below we then obtain $$f(u)= \sum_{\mu\in\Lambda^+(U/K)} \hat F(\lambda) \psi_\mu(u) $$ where $\lambda$ is the extension of $\mu$ as above. The statement (\ref{e: Fourier coefficients}) now follows by comparison with (\ref{e: U/K Fourier series}).~\hfill$\square$\medbreak \end{proof} \begin{lemma} Let $\lambda\in\Lambda^+(U)$ and $\mu=\lambda|_{\frak a}$. If $\lambda\in\Lambda^+_K(U)$ then $$\int_K\chi_\lambda(uk)\,dk=\psi_\mu(u)$$ for all $u\in U$, and otherwise $ \int_K\chi_\lambda(uk)\,dk=0$. \end{lemma} \begin{proof} (See also \cite{GGA}, p.\ 417). The function $u\mapsto\int_K\chi_\lambda(uk)\,dk$ is a $K$-fixed vector in the right representation generated by $\chi_\lambda$, which is equivalent with $\pi_\lambda$, hence it vanishes if $\lambda\notin \Lambda^+_K(U)$. Assume $\lambda\in \Lambda^+_K(U)$, and choose an orthonormal basis $v_1,\dots, v_d$ for the representation space $V$, such that $v_1$ is $K$-fixed. Then $$\int_K \chi_\lambda(uk)\,dk= \int_K \sum_{i=1}^{d}\langle\pi_\lambda(u)\pi_\lambda(k)v_i,v_i\rangle \,dk . $$ Since the operator $\int_K\pi_\lambda(k)\,dk$ is the orthogonal projection onto $V^K$, it follows that $$ \int_K \chi_\lambda(uk)\,dk= \langle\pi_\lambda(u)v_1,v_1\rangle= \psi_\mu(u). $$ as claimed.~\hfill$\square$\medbreak \end{proof} \begin{lemma} \label{l: support} Let $F\in C^\infty(U)^U$ and $f\colon U/K\rightarrow{\msy C}$ be as above. If $F\in C_r^\infty(U)^U$ for some $r>0$ then $f\in C_r^\infty(U/K)^K$. \end{lemma} \begin{proof} Let $x\in M$ with $f(x)\neq 0$ and choose $X\in{\frak q}$ such that the curve on $M$ given by $t\mapsto\gamma(t)={\rm Exp}(tX)$ where $t\in[0,1]$, is a minimal geodesic from $o$ to $x$. The length of $\gamma$ is $\|X\|$. Let $x=u\cdot o$ where $u\in U$, then there exists $k\in K$ such that $F(ku)\neq 0$. Hence $ku=\exp Y$ where $Y\in{\frak u}$ with $\|Y\|<r$. Let $Z={\rm Ad}(k^{-1})Y$, then $\|Z\|=\|Y\|<r$. The smooth curve $\xi(t)=\exp(tZ)\cdot o,$ where $t\in[0,1]$, also joins $o$ to $x$. Hence it has length $\ell(\xi)\geq\|X\|$. Let $L_u$ denote left translation by $u$, then $\xi'(t)=dL_{\exp(tY)}(\xi'(0))$, and hence $\|\xi'(t)\|=\|\xi'(0)\|$ for all $t$. Let $Z_{\frak q}$ denote the ${\frak q}$ component of $Z$ in the orthogonal decomposition ${\frak u}={\frak k}+{\frak q}$. Then $\xi'(0)=Z_{\frak q}$, and we conclude that $$\|X\|\leq \ell(\xi)= \int_0^1 \|\xi'(t)\|\,dt=\|Z_{\frak q}\|\leq \|Z\|=\|Y\|<r.$$ Thus $f$ has support in ${\rm Exp}\bar B_r(0)$~\hfill$\square$\medbreak \end{proof} \eqsection{The result of Rais} \label{s: Rais} The following result is due to M. Rais. Let $r>0$ and recall that a holomorphic function $\varphi$ on ${\frak h}_{\msy C}^*$ is said to be of exponential type $r$ if it satisfies (a) of Theorem \ref{t: Gonzalez}. Let $\tilde W$ denote the Weyl group of the root system $\Delta$ on ${\frak h}$. Let $l=|\tilde W|$, and let $P_1,\dots,P_l$ be a basis for $S({\frak h}^*)$ over $I({\frak h}^*)=S({\frak h}^*)^{\tilde W}$ (see \cite{GGA}, p.\ 360). \begin{thm} \label{t: Rais} For each holomorphic function $\psi$ of exponential type $r$ there exist unique $\tilde W$-invariant holomorphic functions $\phi_1,\dots,\phi_l$ of exponential type $r$ such that $\psi=P_1\phi_1+\dots+P_l\phi_l$. \end{thm} \begin{proof} See \cite{CD2}, Appendix B.~\hfill$\square$\medbreak \end{proof} In the following statement, we regard ${\frak a}^*$ as a subset of ${\frak h}^*$, by trivial extension on ${\frak h}_m$. Likewise ${\frak h}_m^*$ is regarded as a subspace by trivial extension on ${\frak a}$. Then ${\frak h}^*={\frak a}^*\oplus{\frak h}_m^*$ holds as an orthogonal sum decomposition. \begin{Cor} \label{c: Rais} There exist a collection of polynomials $p_1\dots,p_l\in S({\frak a}^*)^W$ with the following property. For each $W$-invariant holomorphic function $\varphi$ on ${\frak a}^*_{\msy C}$ of exponential type $r$, there exist $\tilde W$-invariant holomorphic functions $\phi_1,\dots,\phi_l$ on ${\frak h}^*_{\msy C}$ of exponential type $r$, such that \begin{equation} \label{e: Rais} \varphi=p_1(\phi_1|_{{\frak a}^*_{\msy C}})+\dots+p_l(\phi_l|_{{\frak a}^*_{\msy C}}). \end{equation} \end{Cor} \begin{proof}(See also \cite{FJ} p.\ 30). Notice that when $\phi_j$ is $\tilde W$-invariant, then $\phi_j|_{{\frak a}^*}$ is $W$-invariant, since the normalizer in $\tilde W$ of ${\frak a}$ maps surjectively onto $W$ (see \cite{GGA} p. 366). Fix a holomorphic function $\varphi_m$ on ${\frak h}_{m{\msy C}}^*$ of exponential type $r$, with the value $\varphi_m(0)=1$. Put $\psi(\lambda)=\varphi(\lambda_1)\varphi_m(\lambda_2)$, where $\lambda_1$ and $\lambda_2$ are the components of $\lambda$. Then $\psi$ is of exponential type $r$, and we can apply Theorem \ref{t: Rais}. The restriction of $\psi$ to ${\frak a}^*$ is exactly $\varphi$. Taking restrictions to ${\frak a}^*$ we thus obtain (\ref{e: Rais}) with $p_j=P_j|_{{\frak a}^*}$. The desired expression is obtained by averaging over~$W$. ~\hfill$\square$\medbreak\end{proof} \eqsection{Proof of the main theorem} \label{s: surjective} It remains to be seen that every function $\varphi\in {\rm PW}_r({\frak a})$ is the extension of $\tilde f$ for some $f\in C^\infty(U/K)^K$. Thus let $\varphi\in {\rm PW}_r({\frak a})$ be given. Let $p_1,\dots,p_l$ and $\phi_1,\dots,\phi_l$ be as in Corollary \ref{c: Rais}, applied to the $W$-invariant function $\lambda\mapsto \varphi(\lambda-\rho)$ on ${\frak a}^*$. By Lemma \ref{l: D(U/K)} there exist $D_j\in {\msy D}(U/K)$ such that $\overline{\gamma(D_j^*,\lambda)}=p_j(\lambda)$ for $\lambda\in i{\frak a}^*$. It follows from the Weyl dimension formula (\cite{GGA} p.\ 502) that $\mu\mapsto d(\mu)$ extends to a polynomial on ${\frak h}^*$ which satisfies the transformation property (b) of Theorem \ref{t: Gonzalez}. Hence the function on ${\frak h}^*_{\msy C}$ defined by $\Phi_j(\lambda)=d(\lambda)\phi_j(\lambda+\rho)$ satisfies both (a) and (b) in that theorem, and thus we can find $F_1,\dots,F_n\in C^\infty_r(U)^U$ such that $$\hat F_j(\mu)=\Phi_j(\mu)$$ for all $\mu$. Let $f_j(uK)=\int_K F_j(uk)\, dk$ and define $f=\sum D_j f_j$. Then by Lemma \ref{l: support} we have $f\in C^\infty_r(U/K)^K$, and it follows from (\ref{e: Fourier coefficients}) that \begin{eqnarray*} \tilde f(\mu)&=&\sum_j \gamma(D_j,\mu+\rho)\tilde f_j(\mu)\\ &=&\sum_j \gamma(D_j,\mu+\rho)d(\mu)^{-1}\hat F_j(\mu)\\ &=&\sum_j p_j(\mu+\rho)\phi_j(\mu+\rho)=\varphi(\mu).\quad\quad\square \end{eqnarray*} \eqsection{The sphere $S^2$} \label{s: sphere} Let $M=S^2=\{(x,y,z)\in{\msy R}^3\mid x^2+y^2+z^2=1\}$, then $M$ can be realized as a homogeneous space for $U=\rmS\rmU(2)$ with the following action. Identify ${\msy R}^3$ with the space of Hermitian $2\times 2$ matrices $H$ with trace 0, $$H=\pmatrix{z&x+iy\cr x-iy&-z\cr},$$ then $u.H=uHu^{-1}$ for $u\in U$. The stabilizer of the point $o=(0,0,1)\in S^2$ is the set of diagonal elements in $U$, and the diagonal element $$k_\theta=\pmatrix{e^{i\theta/2}&0\cr 0&e^{-i\theta/2}\cr}$$ acts by rotation around the $z$-axis of angle $\theta$. A $K$-invariant function on $M$ is determined by its values along the elements $(x,y,z)=(0,\sin t,\cos t)$, and it thus becomes identified as an even function of $t\in [-\pi,\pi]$. With the notation of above, the function is identified through the map $t\mapsto f(a_t\cdot o)$ where $$a_t= \pmatrix{\cos(t/2) & i\sin(t/2)\cr i\sin(t/2)&\cos(t/2)\cr}.$$ The irreducible representations of $U$ are parametrized by half integers $l=0,\frac12,1,\frac32,\dots$, where $\pi_l$ has dimension $2l+1$, and the spherical representations are those for which $l$ is an integer. The corresponding spherical functions are given by $\psi_l(a_t)=P_l(\cos t)$, where $P_l$ is the $l$'th Legendre polynomial. The Fourier series of a $K$-invariant function on $S^2$, identified as an even function on $[-\pi,\pi]$, is then the Fourier-Legendre series $$\sum_{l=0}^\infty (2l+1) \tilde f(l) P_l(\cos t)$$ where $$\tilde f(l)= \frac12 \int_0^\pi f(t) P_l(\cos t) \sin t\,dt.$$ Our local Paley-Wiener theorem asserts the following for $r<\pi$: \smallskip {\it An even function $f\in C^\infty(-\pi,\pi)$ is supported in $[-r,r]$ if and only if the Legendre transform $l\mapsto \tilde f(l)$ of $f$ extends to an entire function $g$ on ${\msy C}$ of exponential type $$|g(\lambda)|\leq C_k(1+|\lambda|)^{-k}e^{r|{\rm Im}\,\lambda|}$$ such that $g(\lambda-\frac12)$ is an even function of $\lambda$. The extension $g$ with these properties is unique. Moreover, every such function $g$ on ${\msy C}$ is obtained in this fashion from a unique function $f\in C_r^\infty(-\pi,\pi)$.} \smallskip Essentially this is the result stated by Koornwinder (and attributed to Beurling) in \cite{TomK} p.\ 158.
1,108,101,565,825
arxiv
\section{Introduction} In this paper we characterize ${\rm PSL}_n\mathbb{R}$-Fuchsian representations and verify how Teichm\"uller spaces are embedded into Hitchin components. Let $S$ be a compact hyperbolic oriented surface. The ${\rm PSL}_n\mathbb{R}$-Hitchin components $H_n(S)$ of $S$ is a prefered component of the ${\rm PSL}_n\mathbb{R}$-character variety ${\rm Hom}(\pi_1(S), {\rm PSL}_n\mathbb{R})/{\rm PSL}_n\mathbb{R}$. Thee elements of $H_n(S)$ are called Hitchin representations. These components are a higher dimensional analog of the Teichm\"uller space of $S$, which is the deformation space of hyperbolic structures of $S$. The Hitchin component contains a subset $F_n(S)$ which corresponds to the Teichm\"uller space, called the Fuchsian locus. The goal of this paper is to study the behavior of certain invariants of Hitchin representations on the Fuchsian locus, and to describe Fuchsian loci concretely. For our purpose, we use the Bonahon-Dreyer parameterization of Hitchin components. Let $\mathcal{L}$ be a maximal geodesic lamination of $S$ with finitely many leaves. Such a lamination induces an ideal triangulation of $S$. In particular we consider a maximal geodesic lamination associated to a pants decomposition of $S$, {\it i.e.} a maximal geodesic lamination whose closed leaves induces a pants decomposition of $S$. For the lamination and the ideal triangulation, we can define three kinds of invariants of Hitchin representations: (i) the triangle invariants for ideal triangles, (ii) the shearing invariants for biinfinite leaves, and (iii) the gluing invariants for closed leaves. The Bonahon-Dreyer parameterization is defined by using these invariants. This is a parameterization of $H_n(S)$ by the interior of a convex polytope in $\mathbb{R}^N$, where $N$ is a number depending on $\mathcal{L}$. We denote the Bonahon-Dreyer parameterization associated to $\mathcal{L}$ by $\Phi_{\mathcal{L}} : H_n(S) \rightarrow \mathcal{P}_{\mathcal{L}} \subset \mathbb{R}^N$. The main result of this paper is as follows. \begin{theorem} There is a slice $\mathcal{S}_{\mathcal{L}}$ of the range $\mathcal{P}_{\mathcal{L}}$ of the Bonahon-Dreyer parameterization associated to $\mathcal{L}$ such that the image $\Phi_{\mathcal{L}}(F_n(S))$ coincides with $\mathcal{S}_{\mathcal{L}}$. \end{theorem} Moreover, we obtain the property of triangle, shearing, and gluing invariants on Fuchsian loci as corollary. \begin{corollary} A Hitchin representation is ${\rm PSL}_n\mathbb{R}$-Fuchsian if and only if \begin{itemize} \item[(i)] the triangle invariants are all zero, and \item[(ii)] the shearing and gluing invariants are independent of their index. \end{itemize} \end{corollary} \begin{remark} Theorem 1.1 and Corollary 1.2 hold for any maximal geodesic laminations which consist of finitely many leaves. We can use the argument of the proof of the main results to show this. \end{remark} \subsection*{Structure of this paper} In Section 2, we recall the hyperbolic geometry of surfaces. The tools, the shearing parameterization of a pair of pants, the Fenchel-Nielsen coordinate, the twist deformation, which are used in the proof of the main result, are defined in this section. In Section 3, we define Hitchin components and recall properties of Hitchin representations, called the hyperconvex property and the Anosov property. The Bonahon-Dreyer coordinate is defined in Section 4. After the precise definition of the triangle, shearing, and gluing invariant, we recall the Bonahon-Dreyer parameterization theorem. In Section 5, we show the only-if part of Corollary 1.2. The proof is due to direct computations of the invariants. In Section 6, we show the main result by using the technique of hyperbolic geometry of surfaces. Theorem 5.6 and Theorem 6.1 imply Theorem 1.1 and Corollary 1.2. In Section 7, we refer to the case of surfaces with boundary. \begin{remark} The results of this paper are a generalization of \cite{I}. We use a technique which is used in \cite{I} to show Proposition 5.1 and Proposition 5.5. \end{remark} \subsection*{Acknowledgements} The author would like to thank Shinpei Baba, Hideki Miyachi, and Ken'ichi Ohshika for their warm encouragement and valuable discussion. \section{Hyperbolic geometry of surface} \subsection{Hyperbolic structures of surfaces} Let $S$ be a compact oriented surface. We denote the hyperbolic plane of upper-half plane model by $\mathbb{H}^2$. In this paper, we endow $\mathbb{H}^2$ with the orientation induced by the framing $<e_1, e_2>$, where $e_1 = (1,0)^t, e_2 = (0,1)^t$. The group of orientation-preserving isometries ${\rm Isom}^+(\mathbb{H}^2)$ is isomorphic to the group ${\rm PSL}_2\mathbb{R}$, and the group ${\rm PSL}_2\mathbb{R}$ acts on $\mathbb{H}^2$ as linear fractional transformations. A {\it hyperbolic metric} of $S$ is a complete Riemannian metric of constant curvature $-1$, which makes the boundary totally geodesic if $S$ has a nonempty boundary. An isometric class of a hyperbolic metric on $S$ is often called a {\it hyperbolic structure} of $S$. The hyperbolic structure of $S$ is related to a good representation of the fundamental group $\pi_1(S)$. A representation $\rho : \pi_1(S) \rightarrow {\rm PSL}_2 \mathbb{R}$ is said to be {\it Fuchsian} if (i) $\rho$ is faithful and discrete, and (ii) $\rho$ sends the boundary components to hyperbolic elements if $S$ has a nonempty boundary. If $\rho : \pi_1(S) \rightarrow {\rm PSL}_2\mathbb{R}$ is Fuchsian, then there exists a subset $\Omega_{\rho}$, which is called a domain of discontinuity of $\rho$, such that $\rho(\pi_1(S))$ acts on $\Omega_{\rho}$ properly and $S_{\rho}= \rho(\pi_1(S)) \setminus \Omega_{\rho}$. The surface $S_{\rho}$ is a surface with a hyperbolic metric. For a Fuchsian representation $\rho$, we can construct a $(\pi_1(S), \rho)$-equivariant local homeomorphism $f_{\rho} : \tilde{S} \rightarrow \mathbb{H}^2$ from the universal covering of $S$ to the hyperbolic plane. The image coincides with $\Omega_{\rho}$. This map $f_{\rho}$ is called the {\it developing map} associated to $\rho$. In this paper we assume that Fuchsian representations are orientation-preserving, i.e. the associated developing map is orientation-preserving. In addition to, we suppose that the reference surface $S$ is given a hyperbolic metric. \subsection{Geodesic laminations} A {\it geodesic lamination} is a closed subset of $S$ which can be decomposed to a disjoint union of simple complete geodesics called {\it leaves}. Geodesic laminations consist of closed and biinfinite geodesics, and we call them {\it closed leaves} and {\it biinfinite leaves} respectively. The concept of geodesics depends on a hyperbolic metric of $S$. We remark that there exists a natural bijection between the set of $g_1$-geodesic laminations and the set of $g_2$-geodesic laminations for different hyperbolic metrics $g_1$ and $g_2$ of $S$. In particular, for any hyperbolic metric $g$ and any simple curve $c$ on $S$, there is a $g$-geodesic $c_g$ which is isotopic to $c$. A geodesic lamination is said to be {\it maximal} if it is properly contained in no other geodesic lamination. In this paper, we consider only laminations consisting of finitely many leaves. For a geodesic lamination $\mathcal{L}$ of $S$, the preimage $\tilde{\mathcal{L}}$ of $\mathcal{L}$ in $\tilde{S}$ gives a geodesic lamination of $\mathbb{H}^2$. A connected component of the closure of $\mathbb{H}^2 \setminus \tilde{\mathcal{L}}$ is called a {\it plaque}. A geodesic lamination is {\it oriented} if each leaf is oriented. We may choose the orientation of each leaf independently. Given maximal geodesic lamination $\mathcal{L}$, we define a short arc system for closed leaves as an additional data. A {\it short arc system} $K = \{ K_C \}_C$ is a family of an arc $K_C$ defined for each closed leaf $C$ of $\mathcal{L}$ which satisfies two condition (i),(ii) below: (i) The arc $K_C$ is transverse to $\mathcal{L}$ and the intersection $K_C \cap C$ is just one point $x$. (ii) Let $K_1$ and $K_2$ be component of $K_C \setminus C$. Then there exists an immersion $f_i : K_i \times [0, +\infty) \rightarrow S$ such that $f_i(x,0) = x$, the subset $\{x \} \times [0, \infty)$ parametrizes a geodesic with unit speed spiraling along $C$, and the image $f_i(x,[0, \infty))$ is contained in a leaf of $\mathcal{L}$ if $x \in \mathcal{L} \cap K_i$. We denote, by $\mathcal{L}_K$, the geodesic lamination $\mathcal{L}$ with a short arc system $K$. Note that the closed leaf which intersect to $K_C$ is only $C$ by definition. \subsection{Teichm\"uller space} The {\it Teichm\"uller space} $\mathscr{T}(S)$ of $S$ is defined by \[ \mathscr{T}(S) = \{ \rho : \pi_1(S) \rightarrow {\rm PSL}_2\mathbb{R} ~|~ \mbox{Fuchsian} \}/{\rm PSL}_2\mathbb{R} \] where the quotient is defined by the conjugate action of ${\rm PSL}_2\mathbb{R}$ on the set of Fuchsian representations. The topology of $\mathscr{T}(S)$ is given by the compact open topology. The Teichm\"uller space is the deformation space of hyperbolic structures of $S$. Let ${\rm Hyp}(S)$ be the set of hyperbolic metrics of $S$, and ${\rm Diff}_0(S)$ be the identity component of the group of diffeomorpshisms of $S$. The group ${\rm Diff}_0(S)$ acts on ${\rm Hyp}(S)$ by pull-back. The Teichm\"uller space is also defined by $\mathscr{T}(S) ={\rm Hyp}(S) / {\rm Diff}_0(S)$. This definition is equivalent to the definition by Fuchsian representations. If we have a Fuchsian representation of $\pi_1(S)$, then the associated hyperbolic metric is defined by the covering $\Omega_{\rho} \rightarrow S$. Conversely, for any a hyperbolic metric $g$ of $S$, there is an orientation-preserving isometric embedding $f_g : \tilde{S}_g \rightarrow \mathbb{H}^2$ where $\tilde{S}_g$ is the universal covering of $S_g$ with pullback of $g$. Then we can take a representation $\rho : \pi_1(S) \rightarrow {\rm PSL}_2\mathbb{R}$ such that $f_g$ is $(\pi_1(S), \rho)$-equivariant. This representation is Fuchsian. There are some equivalent definitions of $\mathscr{T}(S)$, see \cite{IT}. \subsection{Parameterizations of hyperbolic structures of a pair of pants} \subsection*{Length parameterization} We consider some parameterizations of the Teichm\"uller space of a pair of pants. Note that a pair of pants is oriented. It is well known that hyperbolic structures of a pair of pants $P$ is uniquely determined by the length of boundary components. \begin{theorem}(\cite{IT}, Theorem 3.5.) Let $C_1, C_2, C_3$ be boundary components of $P$. Then the map \[ \mathscr{T}(P) \rightarrow \mathbb{R}_{>0}^3 : \rho \mapsto (l_{\rho}(C_1), l_{\rho}(C_2), l_{\rho}(C_3)) \] is a diffeomorphism, where $l_{\rho}(\cdot)$ is the length function associated to a hyperbolic structure $\rho$. \end{theorem} \subsection*{Shearing parameterization} We give another parameterization of $\mathscr{T}(P)$ by the shearing parameter along ideal triangles. An ideal triangle is a geodesic triangle in $\mathbb{H}^2$ which has vertices at infinite boundary. This is unique up to isometry. Let us consider two ideal triangles $\triangle(x, y, z_0), \triangle(x,y,z_1)$ which are adjacent along the side $[x,y]$. For each triangle, we draw the geodesic $p_0, p_1$ joining $z_0$, $z_1$ to $[x,y]$ which is orthogonal to $[x,y]$. Let $b_i = p_i \cap [x,y]$. The {\it shearing parameter} $\sigma(\triangle(x, y, z_0), \triangle(x,y,z_1) )$ of $\triangle(x,y, z_0)$ and $\triangle(x,y,z_1)$ along $[x,y]$ is a signed hyperbolic distance $d(b_0, b_1)$. If $b_1$ is on the left side of $b_0$ with respect to the direction of $[z_0, b_0]$ from $z_0$ to $b_0$, then we define the sign of $\sigma(\triangle(x, y, z_0), \triangle(x,y,z_1) )$ is positive. See Figure 1. \begin{figure}[htbp] \begin{center} \includegraphics[width = 7cm, height=4cm]{shearing.pdf} \caption{The shearing parameter is positive.} \end{center} \end{figure} We can describe shearing parameters in terms of cross ratio. \begin{definition} Let $a,b,c,d \in \partial \mathbb{H}^2$ be a quadruple of distinct points of the ideal boundary $\partial \mathbb{H}^2$. The cross ratio $z(a,b,c,d)$ is the ratio \[ z(a,b,c,d) = \dfrac{(d-a)(b-c)}{(d-c)(b-a)}. \] \end{definition} \begin{remark} The cross ratio of $a,b,c,d \in \partial \mathbb{H}^2$ is usually defined by \[ z'(a,b,c,d) = \dfrac{(a-c)(b-d)}{(a-d)(b-c)}. \] Two definitions have the relation $z(a,b,c,d) = z'(d,b,a,c)$. The prefered point of our definition is to satisfy $ z(0,1,\infty,d) = d$. \end{remark} Let $B$ be a biinfinite leaf with the end points $x,y$. We consider two ideal triangles $T^l = \triangle (x,z^l,y)$ and $T^r = \triangle(x, y, z^r)$ where the points $x,z^l,y,z^r$ are in counterclockwise order. The following relation is given by a direct computation. \begin{proposition}The following relation holds. \[ \sigma(T^l, T^r) = \log -z(y, z^r, x , z^l)^{-1}. \] \end{proposition} Using shearing parameters, we can parameterize hyperbolic structures of a pair of pants $P$. Consider a maximal geodesic lamination of $P$. Maximal geodesic laminations of $P$ which consist of finitely many leaves are classified into 2 types (I) and (II) as in Figure 2 and Figure 3. \begin{figure}[htbp] \begin{minipage}{0.45\hsize} \begin{center} \includegraphics[width = 40mm, height=2cm]{type1.pdf} \caption{A lamination of type 1.} \end{center} \end{minipage} \begin{minipage}{0.45\hsize} \begin{center} \includegraphics[width=40mm, height=2cm]{type2.pdf} \caption{A lamination of type 2.} \end{center} \end{minipage} \end{figure} The lamination of type (I) is represented by $\{ C_1, C_2, C_3, B_{12}, B_{23}, B_{31} \}$, where $C_i$ is a boundary component and $B_{ij}$ is a biinfinite leaf which spirals to $C_i$ and $C_j$. The lamination of type (II) is represented by $\{ C_1, C_2, C_3, B_{ii}, B_{ij}, B_{ik} \}$. They contain a biinfinite leaf spiraling to the same closed leaf in its ends. Moreover we characterize these laminations by the direction of the spiraling. When the spiraling occurs in the direction opposite to the orientation of pants, we call the spiraling {\it positive spiraling}. See Figure 4. Similarly, we call the spiraling in Figure 5 {\it negative spiraling}. Maximal geodesic laminations on $P$ are classified by types and signatures of the spiraling. \begin{figure}[htbp] \begin{minipage}{0.45\hsize} \begin{center} \includegraphics[width = 40mm, height=3cm]{positivesp.pdf} \caption{Positive spiraling.} \end{center} \end{minipage} \begin{minipage}{0.45\hsize} \begin{center} \includegraphics[width=40mm, height=3cm]{negativesp.pdf} \caption{Negative spiraling.} \end{center} \end{minipage} \end{figure} We fix a maximal geodesic lamination $\mathcal{L} = \{ C_1, C_2, C_3, B_1, B_2, B_3\}$ of $P$. Note that both types (I) and (II) consist of three closed leaves and three biinfinite leaves. This lamination induces an ideal triangulation of $P$. Let $\rho \in \mathscr{T}(P)$ be a hyperbolic structure and $f_{\rho} : \tilde{P} \rightarrow \mathbb{H}^2$ be the associated developing map. The shearing parameter of $\rho$ along $B_i$ is defined as follows. Lift $B_i$ to $\tilde{B}_i$, which is a biinfinite geodesic in the universal covering $\tilde{P}$. We denote the end points of $\tilde{B}_i$ by $x$ and $y$. Under the ideal triangulation, $\tilde{B}_i$ is adjacent to two ideal triangles $T^l = \triangle (x,y,z^l)$ and $T^r = \triangle (x,y, z^r)$. Here the vertices $z^r$ and $z^l$ are determined so that $x,z^l, y, z^r$ are in counterclockwise order. We define the shearing parameter $\sigma^{\rho}(B_i)$ by the shearing parameter $\sigma(f_{\rho}(T^l), f_{\rho}(T^r))$. \begin{proposition}(See \cite{Ma}, Proposition 7.4.9.) There is an analytic embedding \[ \sigma_{\mathcal{L}} ~:~\mathscr{T}(P) \rightarrow \mathbb{R}^3 : \rho \mapsto (\sigma^{\rho}(B_1), \sigma^{\rho}(B_2), \sigma^{\rho}(B_3)). \] \end{proposition} To describe the range of this parameterization, we consider the relation between the shearing parameter and the boundary length, both of which determine hyperbolic structures of $P$. For a closed leaf $C_i$, we suppose that biinfinite leaves $B_1, \cdots, B_k$ spiral to $C_i$. \begin{proposition}[\cite{Ma}, Proposition 7.4.8] If the spiraling of $B_i$ is positive, then \[ l_{\rho}(C_i) = \sum_{j=1}^k \sigma^{\rho}(B_j), \] and if the spiraling of $B_i$ is negative, then \[ l_{\rho}(C_i) = - \sum_{j=1}^k \sigma^{\rho}(B_j). \] \end{proposition} Consider a maximal geodesic lamination of type (I). When we represent the lamination by $\mathcal{L} = \{ C_1, C_2, C_3, B_{12}, B_{23}, B_{31} \}$, the shearing parameterization associated to $\mathcal{L}$ is defined by $\sigma_{\mathcal{L}}(\rho) = (\sigma^{\rho}(B_{12}), \sigma^{\rho}(B_{23}), \sigma^{\rho}(B_{31}))$. The range of this parameterization is described as follows; \[ \{ (x_{12}, x_{23}, x_{31}) \in \mathbb{R}^3 ~|~\forall i,j,k ~\mbox{with}~ \{i,j,k\} = \{1,2,3\}, {\rm sgn}(C_i) ( x_{ij} + x_{ik} ) > 0 \}, \] where sgn($C_i$) is the signature of spiraling along $C_i$. In the case of laminations of typer (II), we consider $\mathcal{L} = \{ C_1, C_2, C_3, B_{ii}, B_{ij}, B_{ik} \}$ and the associated shearing parameterization $\sigma_{\mathcal{L}}(\rho) = (\sigma^{\rho}(B_{ii}), \sigma^{\rho}(B_{ij}), \sigma^{\rho}(B_{ik}))$. The range of this parameterization is equal to the following space; \[ \{ (x_{ii}, x_{ij}, x_{ik}) \in \mathbb{R}^3 ~|~x_{ij} > 0, x_{ik} > 0, {\rm sgn}(C_i)(x_{ii}+x_{ij} + x_{ik}) > 0 \}. \] \subsection{Fenchel-Nielsen coordinate and twist deformations} In this subsection, we recall the Fenchel-Nielsen coordinate, which is a global coordinate of $\mathscr{T}(S)$. See the detail in Section 3.2 of \cite{IT}. To define this coordinate, we recall a pants decomposition of surfaces. It is known that any compact orientable surface $S$ of negative Euler characteristic number $\chi(S)$ with $b$ boundary components is decomposed into $|\chi(S)|$ pairs of pants by a family of $\frac{3|\chi(S)| - b}{2}$ disjoint simple closed curves . If $S$ is decomposed into pairs of pants $\mathcal{P} = \{ P_1, \cdots, P_{|\chi(S)|} \}$ along simple closed curves $\mathcal{C} = \{ C_1, \cdots, C_{\frac{3|\chi(S)| - b}{2}}\}$, we call $\mathcal{P}$ a pants decomposition of $S$ and $C_i \in \mathcal{C}$ the decomposing curves of $\mathcal{P}$. We suppose that $C_i$ is geodesic. The Fenchel-Nielsen coordinate is a coordinate of $\mathscr{T}(S)$ by the hyperbolic length of decomposing curves and the twist parameter along decomposing curves. We define the twist parameter. Suppose two pairs of pants $P_1$ and $P_2$ are glued along the closed geodesic $C$ which is a boundary component of $P_1$ and $P_2$. We fix a hyperbolic structure $\rho$ of $P_1 \cup_C P_2$. For each pants, we fix an orthogonal arc $H_i$ which joins $C$ and an other boundary component of $P_i$. Such an arc exists since there is an isometric involution of a pair of pants, and its fixed set consists of three geodesics which are orthogonal to two boundary components. One can choose this geodesic as an orthogonal arc. To define the twist parameter, we lift $P_i$ to $\tilde{P}_i$ which is a subset of the universal covering of $P_1 \cup_C P_2$ so that $\tilde{P}_i$ are adjacent. Take lifts $\tilde{C}$ and $\tilde{H}_i$ of the arcs $C$ and $H_i$ so that they are on $\tilde{P}_i$. Then the {\it twist parameter} $\theta^{\rho}(C)$ along $C$ is defined by \[ \theta^{\rho}(C) = 2 \pi \dfrac{{\rm Length}_{\rho}(H_1, H_2)}{{\rm Length}_{\rho}(C)} \] where ${\rm Length}_{\rho}(C)$ is the $\rho$-length of the closed curve $C$, and ${\rm Length}_{\rho}(H_1, H_2)$ is the signed $\rho$-length between the end points $\tilde{H}_1 \cap \tilde{C}$ and $\tilde{H}_2 \cap \tilde{C}$. The signature of ${\rm Length}_{\rho}(H_1, H_2)$ is positive if $\tilde{H}_1$ and $\tilde{H}_2$ are as in Figure 6. \begin{figure}[htbp] \begin{center} \includegraphics[width = 7cm, height=4cm]{FN.pdf} \caption{The twist parameter is positive.} \end{center} \end{figure} Fix a hyperbolic structure $\rho$ of $S$ and a pants decomposition of $S$. We denote boundary components of $S$ by $\partial_1, \cdots, \partial_b$. For a pants decomposition of $S$ by $\{ C_i \}$, the {\it Fenchel-Nielsen coordinate} is defined by \[ FN : \mathscr{T}(S) \rightarrow \mathbb{R}^{3|\chi(S)|} : \rho \mapsto (l_{\rho}(C_i), \cdots, l_{\rho}(\partial_i), \cdots, \theta^{\rho}(C_i), \cdots ). \] We recall the twist deformation of hyperbolic structures which corresponds to the change of twist parameters. Let $C$ be a decomposing curve of a pants decomposition of $S$ and $\rho$ be a hyperbolic structure of $S$. We take the preimage $\mathscr{C}$ of $C$ by the covering $f_{\rho} (\tilde{S}) \rightarrow S_{\rho}$, which is a geodesic lamination of $\mathbb{H}^2$. Choose a leaf $\tilde{C} \in \mathscr{C}$. The geodesic $\tilde{C}$ is a side of two plaque $Q^l$ and $Q^r$ of $\mathscr{C}$. We orient $\tilde{C}$ so that the plaque $Q^l$ is on the left of $\tilde{C}$ with respect to the orientation. Let ${\rm tw}_t$ be a hyperbolic isometry with the axis $\tilde{C}$, which is conjugate to \[ \begin{bmatrix} \exp(t) && 0 \\ 0 && \exp(-t) \\ \end{bmatrix} \] by the normalization which sends the attracting (resp. repelling) point of $\tilde{C}$ to $\infty$ (resp. 0). Glue ${\rm tw}_t(Q^l)$ and $Q^r$ along $\tilde{C}$. Iterating this operation for all leaves of $\mathscr{C}$, we obtain a new developing map ${\rm Tw}_t \circ f_{\rho} : \tilde{S} \rightarrow \mathbb{H}^2$ where ${\rm Tw}_t$ is a map $\mathbb{H}^2 \rightarrow \mathbb{H}^2$ induced by the iteration. The developing map induces an element $\eta$ of $\mathscr{T}(S)$. We call $\eta$ a {\it twist deformation} of $\rho$. \section{Hitchin representations and their properties} \subsection{Hitchin components} Let $\Gamma$ be a finitely generated group. The ${\rm PSL}_n\mathbb{R}$-{\it representation variety} $\mathcal{R}_n(\Gamma)$ of $\Gamma$ is the set of group homomorphisms $\mathcal{R}_n(\Gamma)={\rm Hom}(\Gamma, {\rm PSL}_n(\mathbb{R}))$ with the compact open topology. ${\rm PSL}_n\mathbb{R}$ acts on the representation variety by conjugation. The quotient space $\mathcal{X}_n(\Gamma)=\mathcal{R}_n(\Gamma)/ {\rm PSL}_n(\mathbb{R})$ is called the ${\rm PSL}_n(\mathbb{R})$-{\it character variety}. When the finitely generated group $\Gamma$ is the fundamental group of a manifold $M$, we denote the representation (resp. character) variety $\mathcal{R}_n(\pi_1(M))$ (resp. $\mathcal{X}_n(\pi_1(M))$) by $\mathcal{R}_n(M)$ (resp. $\mathcal{X}_n(M)$) simply. When $\Gamma = \pi_1(S)$, then the Teichm\"uller space $\mathscr{T}(S)$ is naturally embedded in the character variety $\mathcal{X}_2(S)$ by definition. It is known that $\mathscr{T}(S)$ is a connected component of $\mathcal{X}_2(S)$. (See \cite{Go88}.) The Hitchin component is a perfered component of $\mathcal{X}_n(S)$ which contains $\mathscr{T}(S)$. Let us consider an irreducible representation ${\rm SL}_2\mathbb{R} \rightarrow {\rm SL}_n\mathbb{R}$ which is unique up to equivalence. This representation is obtained by the symmetric power. We denote its projectivization ${\rm PSL}_2\mathbb{R} \rightarrow {\rm PSL}_n\mathbb{R}$ by $\iota_n$. The representation $\iota_n$ induces a map between character varieties $(\iota_n)_* : \mathcal{X}_2(S) \rightarrow \mathcal{X}_n(S)$ by the correspondence $\rho \mapsto \iota_n \circ \rho$. Since $\iota_n$ is a group homomorphism, this induced map is well-defined. When $\partial S = \emptyset$, the Hitchin component is defined as below. \begin{definition} The $({\rm PSL}_n\mathbb{R}-)$ Hitchin component $H_n(S)$ is the connected component of $\mathcal{X}_n(S)$ which contains the image $F_n(S) = (\iota_n)_*(\mathscr{T}(S))$. \end{definition} When $\partial S \neq \emptyset$, a representation $\rho : \pi_1(S) \rightarrow {\rm PSL}_n\mathbb{R}$ is said to be {\it purely loxodromic} if the image of boundary components via $\rho$ is conjugate to an element in the interior of a Weyl chamber, so an element with distinct, only real eigenvalues. We denote the space of purely-loxodromic representations by $\mathcal{R}_n^{loxo}(S)$, and $\mathcal{X}_n^{loxo}(S) = \mathcal{R}_n^{loxo}(S) / {\rm PSL}_n\mathbb{R}$. Note that $(\iota_n)_*(\mathscr{T}(S))$ consists of only purely loxodromic elements. The (${\rm PSL}_n\mathbb{R}$-) Hitchin components $H_n(S)$ is the connected component of $\mathcal{X}^{loxo}_n(S)$ which contains the image $F_n(S)=(\iota_n)_*(\mathscr{T}(S))$. We call the image $F_n(S)$ of $\mathscr{T}(S)$ the {\it Fuchsian locus} of $H_n(S)$. {\it Hitchin representations} are representations $\rho : \pi_1(S) \rightarrow {\rm PSL}_n\mathbb{R}$ whose conjugacy class belongs to $H_n(S)$. A Hitchin representation $\rho$ is ${\rm PSL}_n\mathbb{R}$-{\it Fuchsian} if $\rho$ is contained in $F_n(S)$, {\it i.e.} there is a Fuchsian representation $\rho_0 : \pi_1(S) \rightarrow {\rm PSL}_2\mathbb{R}$ such that $\rho = \iota_n \circ \rho_0$. We remark the homeomorphic type of Hitchin components of closed surfaces. \begin{theorem}[Hitchin \cite{Hi92} ] If the surface $S$ is closed, the Hitchin component $H_n(S)$ is homeomorphic to $\mathbb{R}^{(2g-2)(n^2-1)}$. \end{theorem} \begin{remark} If it is clear from context, we call ${\rm PSL}_n\mathbb{R}$-Fuchsian representations Fuchsian representations simply. In addition to, if we confuse Fuchsians representations which are elements of the Teichm\"uller space, and ${\rm PSL}_n\mathbb{R}$-Fuchsian representations, we call Fuchsian representations hyperbolic holonomy. \end{remark} \begin{caution} In the following, we consider only closed surfaces. Non-closed case is discussed in Section 7. \end{caution} \subsection{Hyperconvex property} The projective special linear group ${\rm PSL}_n\mathbb{R}$ acts on the projective space $\mathbb{RP}^{n-1} = P(\mathbb{R}^n)$ by the projectivization of linear action of ${\rm SL}_n\mathbb{R}$ on $\mathbb{R}^n$. We define the hyperconvexity of projective linear representations of $\pi_1(S)$. Let $\partial \pi_1(S)$ be the ideal boundary of $\pi_1(S)$ which is a visual boundary of a Cayley graph of $\pi_1(S)$. Note that $\partial \pi_1(S)$ is homeomorphic to $\partial \tilde{S}$ through a hyperbolic structure of $S$. Therefore, in this paper, we identify $\partial \pi_1(S)$ with $\partial \tilde{S}$ by using the reference hyperbolic structure of $S$. \begin{definition} A representation $\rho : \pi_1(S) \rightarrow {\rm PSL}_n\mathbb{R}$ is said to be hyperconvex if there exists a $(\pi_1(S), \rho)$-equivariant continuous map $\xi_{\rho}:\partial \pi_1(S) \rightarrow \mathbb{RP}^{n-1}$ such that $\xi_{\rho}(x_1) + \cdots + \xi_{\rho}(x_n)$ is direct for any pairwise distinct points $x_1, \cdots, x_n \in \partial \pi_1(S)$. \end{definition} The associated curve $\xi_{\rho}$ is called the {\it hyperconvex curve} of $\rho$. All Hitchin representations have hyperconvex property. Labourie showed that Hitchin representations are hyperconvex by Anosov property which is explained in the next subsection. Moreover the converse result was shown by Guichard. Hence the following result holds. \begin{theorem}[Guichard \cite{Gu08}, Labourie \cite{La06}] A representation $\rho : \pi_1(S) \rightarrow {\rm PSL}_n\mathbb{R}$ is Hitchin if and only if $\rho$ is hyperconvex. \end{theorem} Moreover Labourie showed the following. \begin{theorem}[\cite{La06}] Let $\rho :\pi_1(S) \rightarrow {\rm PSL}_n\mathbb{R}$ be a hyperconvex representation with the hyperconvex curve $\xi_{\rho} : \partial \pi_1(S) \rightarrow \mathbb{RP}^{n-1}$. Then there exists a unique curve $\xi_{\rho}^i : \partial \pi_1(S) \rightarrow {\rm Gr}^k(\mathbb{R}^n)$ with the properties from (i) to (iv) below. \begin{itemize} \item[(i)] $\xi^p(x) \subset \xi^{p+1}(x)$ for any $x \in \partial \pi_1(S) $. \item[(ii)] $\xi^1(x) = \xi_{\rho}(x) $ for any $x \in \partial \pi_1(S) $. \item[(iii)] If $n_1, \cdots , n_l$ are positive integers such that $ \sum n_i \leq n$, then $\xi^{n_1}(x_1) + \cdots + \xi^{n_l}(x_l)$ is direct for any pairwise distinct points $x_1, \cdots, x_l \in \partial \pi_1(S)$. \item[(iv)] If $n_1, \cdots , n_l$ are positive integers such that $ p =\sum n_i \leq n$, then \[ \lim_{(y_1, \cdots, y_l) \rightarrow x; y_i \mbox{distinct}} \xi^{n_1}(y_1) + \cdots + \xi^{n_l}(y_l) \rightarrow \xi^p(x) \] \end{itemize} \end{theorem} Theorem 3.7 implies that any hyperconvex curves are extended to curves into the flag manifold. (See Section 4.1 for the precise definition of flags.) The map $(\xi^1, \cdots, \xi^{n-1}) : \partial \pi_1(S) \rightarrow {\rm Flag}(\mathbb{R}^n)$ is called the {\it (osculating) flag curve} of the hyperconvex curve $\xi_{\rho}$. We can explicitly describe the hyperconvex curve of Fuchsian representations. Let $\rho_n = \iota_n \circ \rho$ be a Fuchsian representation. Recall that the irreducible representation $\iota_n$ is defined by symmetric power of the representation $({\rm SL}_2\mathbb{R}, \mathbb{R}^2)$. We identify $\mathbb{R}^n$ with $Sym^{n-1}(\mathbb{R}^2)$. Consider the Veronese embedding $\nu : \mathbb{RP}^1 \rightarrow \mathbb{RP}^{n-1}$ defined by sending $[a: b]$ to $[a^{n-1} : a^{n-2}b : \cdots : b^{n-1}]$. Then the composition $\nu \circ f_{\rho}$ of the Veronese embedding with the developing map gives the hyperconvex curve of $\rho_n$. Using homogeneous polynomials, the flag is also described explicitly. The symmetric power $Sym^{n-1}(\mathbb{R}^2)$, which is identified with $\mathbb{R}^n$, is also identified with the vector space \[ {\rm Poly}_n(X,Y) = \{ a_1 X^{n-1} + a_2 X^{n-2}Y + \cdots + a_n Y^{n-1} ~|~a_i \in \mathbb{R} \} \] of homogeneous polynomials of degree $n-1$. If we denote a canonical basis of $Sym^{n-1}(\mathbb{R}^2)$ by $e_1^{n-1}, e_1^{n-2} \cdot e_2, \cdots , e_2^{n-1}$, where $e_1, e_2$ are canonical basis of $\mathbb{R}^2$, the identification is defined by mapping the vector $e_1^i \cdot e_2^{n-1-i}$ to $\binom{n-1}{i}X^iY^{n-1-i}$. Then the one dimensional subspace $\nu ([a:b])$ is equal to $\mathbb{R} <(aX+bY)^{n-1}>$ in the vector space ${\rm Poly}_n(X,Y)$. In addition to the flag curve associated to $\nu$, which is again denoted by $\nu$, is defined by $ \{ P(X,Y) \in {\rm Poly}_n(X,Y) ~|~ \exists Q(X,Y) ~s.t.~ P(X,Y) = (aX + bY)^{n-d} Q(X,Y) \}$, which is a $d$-dimensional subspace. We call this flag curve $\nu \circ f_{\rho}$ the {\it Veronese flag curve}. This Veronese flag curve is the flag curve of Fuchsian representations. \subsection{Anosov property} We recall the Anosov property of representations which is strongly related to the hyperconvexity of representations. See \cite{GGKW17} for the detail. Let $G$ be a semisimple Lie group and $K$ be a maximal compact Lie group. The Lie algebra of $G$, denoted by $\mathfrak{g}$, is decomposed into $\mathfrak{k} \oplus \mathfrak{p}$ by the Killing form, where $\mathfrak{k}$ is the Lie algebra of $K$. We take a maximal abelian subalgebra $\mathfrak{a} \subset \mathfrak{p}$. Let $\mathfrak{g} = \mathfrak{g}_0 \oplus \bigoplus_{\alpha \in \Sigma}\mathfrak{g}_{\alpha}$ be a root decomposition where $\Sigma$ is the system of restricted roots of $\mathfrak{g}$. We denote the set of positive roots by $\Sigma^+ = \{ \alpha \in \Sigma ~|~ \alpha > 0 \}$ and the set of negative roots by $\Sigma^- = \{ \alpha \in \Sigma ~|~ \alpha < 0 \}$. The set $\Delta \subset \Sigma^+$ is the set of simple roots. Let $\mathfrak{n}^{\pm} = \bigoplus_{\alpha \in \Sigma^{\pm}} \mathfrak{g}_{\alpha}$ and $N^{\pm} = \exp(\mathfrak{n}^{\pm})$. For a subset $\theta \subset \Delta$, we set $\mathfrak{a}_{\theta} = \bigcap_{\alpha \in \theta}{\rm Ker}\alpha$, and $M_{\theta} = Z_K(\mathfrak{a}_{\theta})$, the centralizer of $\mathfrak{a}_{\theta}$ in $K$. The subgroup $P_{\theta} = M_{\theta}\exp(\mathfrak{a})N^+$ is called a parabolic subgroup of $G$. Two parabolic subgroups are said to be opposite if their intersection is reductive. It is known that any pair of opposite parabolic subgroups is conjugate to a pair $(P_{\theta}, P^-_{\theta})$ for a subset $\theta \subset \Delta$ where $P^-_{\theta} = M_{\theta}\exp(\mathfrak{a})N^-$. Let $\mathfrak{a}^+ = \{ a \in \mathfrak{a} ~|~ \alpha(a) > 0 ~\forall \alpha \in \Sigma^+ \}$ which is called a Weyl chamber. There is a decomposition of $G$ into $K \exp(\bar{\mathfrak{a}}^+) K$, called the Cartan decomposition. In particular, any element $g \in G$, there exists $k, k' \in K$ and a unique $\mu(g) \in \bar{\mathfrak{a}}^+$ such that $g = k \exp(\mu(g))k'$. The correspondence $\mu : G \rightarrow \bar{\mathfrak{a}}^+$ is called the Cartan projection. For a parabolic subgroup $P_{\theta}$, the homogeneous space $G/P_{\theta}$ is called a flag manifold. Flag manifolds $G/P_{\theta}$ are identified with the set of conjugates of $P_{\theta}$ in $G$ which are also parabolic subgroups. Consider two maps $\xi^+ : \partial \Gamma \rightarrow G/P_{\theta}$ and $\xi^- : \partial \Gamma \rightarrow G/P^-_{\theta}$ from the ideal boundary of a hyperbolic group $\Gamma$ into flag manifolds. The maps $\xi, \xi^-$ are said to be transverse if $\xi^+(x)$ and $\xi^-(y)$, which are identified with parabolic subgroups, are opposite for any distinct points $x,y \in \partial \Gamma$. Moreover they are said to be dynamics-preserving for a representation $\rho : \Gamma \rightarrow G$ if for any $\gamma \in \Gamma$ of infinite order $\xi(\gamma^+)$ and $\xi^-(\gamma^+)$ are the attracting fixed point of $\rho(\gamma)$ where $\gamma^+ \in \partial \Gamma$ is the attracting fixed point of $\gamma$. \begin{definition} Let $\Gamma$ be a word hyperbolic group, G a semisimple Lie group, and $\theta \subset \Delta$ a nonempty subset of the restricted roots of $G$. A representation $\rho : \Gamma \rightarrow G$ is said to be $P_{\theta}$-Anosov if there exists continuous, $\rho$-equivariant and transverse maps $\xi^+ : \partial \Gamma \rightarrow G/P_{\theta}$ and $\xi^- : \partial \Gamma \rightarrow G/P_{\theta}^-$ such that the maps $\xi^{\pm}$ are dynamics-preserving for $\rho$ and \[ \exists c, C > 0, \forall \alpha \in \theta, \forall \gamma \in \Gamma, \alpha(\mu(\rho(\gamma))) \geq c|\gamma| - C. \] \end{definition} In \cite{La06}, Labourie showed Hitchin representations are $B$-Anosov for a Borel subgroup $B$ of ${\rm PSL}_n\mathbb{R}$, and a faithful discrete irreducible representation. The maps $\xi^{\pm}$ are called the {\it boundary maps} of the Anosov representation $\rho$. Since $G/B \cong {\rm Flag}(\mathbb{R}^n)$ when $B$ is Borel, the boundary maps are maps from $\partial \pi_1(S)$ to the flag manifold ${\rm Flag}(\mathbb{R}^n)$ and coincide with the flag curve induced by the hyperconvexity of Hitchin representation. \begin{remark} In the definition of Anosov representations, we follow Gu\'eritaud-Guichard-Kassel-Wienhard \cite{GGKW17}. The original definition is given by Labourie \cite{La06} and Guichard-Wienhard \cite{GW12}. Kapovich-Leeb-Porti \cite{KLP17} gives another definition in the viewpoint of the geometry of symmetric spaces. \end{remark} Here we recall the property of eigenvalues of Hitchin representation shown by Anosov property. \begin{proposition}[Labourie \cite{La06}, Bonahon-Dreyer \cite{BD14}] Let $\rho : \pi_1(S) \rightarrow {\rm PSL}_n\mathbb{R}$ be a Hitchin representation and $\gamma \in \pi_1(S)$ a nontrivial element of $\pi_1(S)$. Then $\rho(\gamma)$ has a lift $ \widetilde{\rho(\gamma)} \in {\rm SL}_n(\mathbb{R})$ whose eigenvalues are distinct and positive. \end{proposition} In the setting of this proposition, we denote the eigenvalues of a lift $\widetilde{\rho(\gamma)}$ by $\lambda^{\rho}_1(\gamma) > \lambda^{\rho}_2(\gamma) > \cdots > \lambda^{\rho}_n(\gamma) >0$. We define the {\it $k$-th length function} of a Hitchin representation $\rho$ by \[ l_k^{\rho}(\gamma) = \log \dfrac{\lambda_k^{\rho}(\gamma)}{\lambda_{k+1}^{\rho}(\gamma)} \] where $k=1, \cdots, n-1$. This is well-defined on the Hitchin component $H_n(S)$ since the conjugation preserves eigenvalues. The length function of Hitchin representations is an extension of a hyperbolic length function of simple closed curves of surfaces. This is used in the closed leaf condition in the next section. \section{The Bonahon-Dreyer parameterization} \subsection{Projective invariants} We define projective invariants of tuples of flags. A (complete) {\it flag} in $\mathbb{R}^n$ is a sequence of nested vector subspaces of $\mathbb{R}^n$ \[ F = ( \{0\} = F^0 \subset F^1 \subset F^2 \subset \cdots \subset F^n = \mathbb{R}^n )\] where ${\rm dim}F^d = d$. The {\it flag manifold} of $\mathbb{R}^n$ is a set of flags in $\mathbb{R}^n$. We denoted the flag manifold by ${\rm Flag}(\mathbb{R}^n)$. Note that ${\rm Flag}(\mathbb{R}^n)$ is homeomorphic to a homogeneous space ${\rm PSL}_n\mathbb{R} / B$, where $B$ is a Borel subgroup of ${\rm PSL}_n\mathbb{R}$, and ${\rm PSL}_n\mathbb{R}$ naturally acts on the flag manifold. A {\it generic} tuple of flags is a tuple $(F_1, F_2, \cdots, F_k)$ of a finite number of flags $F_1, F_2, \cdots, F_k \in {\rm Flag}(\mathbb{R}^n)$ such that if $n_1, \cdots, n_k$ are nonnegative integers satisfying $n_1 + \cdots + n_k = n$, then $F_1^{1} \cap \cdots \cap F_k^{n_k} = \{ 0 \}$. Let $(E, F, G)$ be a generic triple of flags, and $p,q,r \geq 1$ integers with $p+q+r = n$. Choose a basis $e^d, f^d, g^d$ of the wedge product spaces $\bigwedge^dE^d, \bigwedge^dF^d, \bigwedge^dG^d$, which are one dimensional subspaces, for each $d=1, \cdots , n$ respectively. We fix an identification between $\bigwedge^n \mathbb{R}^n$ with $\mathbb{R}$. Then we can regard $e^{d_1} \wedge f^{d_2} \wedge g^{d_3} $ as an element of $\mathbb{R}$ since $d_1 + d_2 + d_3 = n$. In particular $e^{d_1} \wedge f^{d_2} \wedge g^{d_3} $ is not equal to $0$ since $(E,F,G)$ is generic. \begin{definition} The $(p,q, r)$-th triple ratio $T_{pqr}(E,F,G)$ for a generic triple of flags $(E, F, G)$ is defined by \[ T_{pqr}(E,F,G) = \dfrac{ e^{p+1} \wedge f^{q} \wedge g^{r-1} \cdot e^{p} \wedge f^{q-1} \wedge g^{r+1} \cdot e^{p-1} \wedge f^{q+1} \wedge g^{r} }{ e^{p-1} \wedge f^{q} \wedge g^{r+1} \cdot e^{p} \wedge f^{q+1} \wedge g^{r-1} \cdot e^{p+1} \wedge f^{q-1} \wedge g^{r} }. \] \end{definition} The value of $T_{pqr}(E,F,G)$ is independent of the fixed identification $\bigwedge^n \mathbb{R}^n \cong \mathbb{R}$ and the choice of elements $e^d, f^d, g^d$. If the one of exponent of $e^d, f^d, g^d$ is equal to $0$, then we ignore the corresponding terms. For example, $e^0 \wedge f^q \wedge g^{n-q} = f^{q} \wedge g^{n-q}$. The action of ${\rm PSL}_n\mathbb{R}$ leaves the triple ratio invariant by definition. For the permutation of $(E,F,G)$, the triple ratio behaves as below. \begin{proposition} For a generic tuple of flags $(E,F,G)$, \[ T_{pqr}(E,F,G) = T_{qrp}(F,G,E) = T_{qpr}(F,E,G)^{-1}. \] \end{proposition} Let $(E,F,G,G')$ be a generic quadruple of flags, and $p$ an integer with $1 \leq p \leq n-1$. We choose nonzero elements $e^{d}, f^{d}, g^{d}$ and $g'^{d}$ respectively in $\bigwedge^{d}E^{d}, \bigwedge^{d}F^{d}, \bigwedge^{d}G^{d}$ and $\bigwedge^{d}G'^{d}$. \begin{definition} The $p$-th double ratio $D_p(E,F,G,G')$ is defined by \[ D_p(E,F,G,G') = - \dfrac{e^p \wedge f^{n-p-1} \wedge g^1 \cdot e^{p-1} \wedge f^{n-p} \wedge g'^1}{e^p \wedge f^{n-p-1} \wedge g'^1 \cdot e^{p-1} \wedge f^{n-p} \wedge g^1}. \] \end{definition} This is also valued in the real number, well-defined, and invariant for the action of ${\rm PSL}_n\mathbb{R}$. \subsection{Construction of invariants} We define three kinds of invariants of Hitchin representations, {\it triangle invariant}, {\it shearing invariant}, and {\it gluing invariant} for an oriented maximal geodesic lamination with a short arc system associated to a pants decomposition. The triangle invariant is defined for ideal triangles, induced by the ideal triangulation, using the triple ratio. The shearing invariant is defined for biinfinite leaves using the double ratio. The gluing invariant is defined for closed leaves using the short arc system and he double ratio. We take a pants decomposition $\mathcal{P}$ of the reference hyperbolic surface $S$. Let $ \{ C_1, \cdots, C_{\frac{3|\chi(S)| - b}{2}} \}$ be the family of decomposing curves. If $P \in \mathcal{P}$ is bounded by $C_i, C_j, C_k$, we write the pants $P$ by $P_{ijk}$. Consider the oriented maximal geodesic lamination $\mathcal{L} = \{ C_i, B_{ij} \}$ where $B_{ij}$ is a spiraling biinfinite geodesic connecting decomposing curves $C_i,C_j$. In the notation above, we do not care the ordering of the indices. For example, $B_{ij} = B_{ji}$. The signature of each spiraling is arbitrary. In addition to we fix a short arc system $K$ of $\mathcal{L}$. Note that $\mathcal{L}$ induces an ideal triangulation of $S$. We denote the set of ideal triangles of this triangulation by $\mathcal{T} = \{T_{ijk}^0, T_{ijk}^1\}$. where $T_{ijk}^0,T_{ijk}^1$ are ideal triangles contained in a pair of pants $P_{ijk}$. Let $\rho : \pi_1(S) \rightarrow {\rm PSL}_n\mathbb{R}$ be a Hitchin representation and $\xi_{\rho} : \partial \pi_1(S) \rightarrow \mathbb{H}^2$ the associated flag curve. Fix a lift $\tilde{T}$ of $T \in \mathcal{T}$ and choose an ideal vertex $v_0$ of $\tilde{T}$ arbitrarily. We call the other ideal vertices $v_1, v_2$ so that $v_0, v_1, v_2$ are in clockwise order. Let $p,q,r$ be integers such that $p, q, r \geq 1$ and $p+q+r=n$. \begin{definition} The $(p,q,r)$-th triangle invariant $\tau_{pqr}((T,v_0), \rho)$ of a Hitchin representation $\rho$ and an ideal triangle $T$ and a chosen vertex $v_0$ is defined by \[ \tau_{pqr}((T, v_0), \rho) = \log T_{pqr}(\xi_{\rho}(v_0), \xi_{\rho}(v_1), \xi_{\rho}(v_2)). \] \end{definition} The triangle invariant is independent of a choice of the lift $\tilde{T}$ since flag curves are $\rho$-equivariant and the triple ratio is invariant for the ${\rm PSL}_n\mathbb{R}$-action. A biinfinite leaf $B \in \mathcal{L}_K$ is a side of two ideal triangles. Let $T^l$ (resp. $T^r$) be the ideal triangle which is on the left (resp. right) side with respect to the orientation of $B$. We lift $B$ to a geodesic $\tilde{B}$ in $\tilde{S}$, and we also lift $T^l$ and $T^r$ to two ideal triangles $\tilde{T^l}$ and $\tilde{T^r}$ so that they are adjacent along $\tilde{B}$. We denote the repelling point and attracting point of $\tilde{B}$ by $y$ and $x$, and denote the other vertex of $\tilde{T}^l$ and $\tilde{T}^r$ by $z^l$ and $z^r$ respectively. Let $p$ be an integer with $1 \leq p \leq n-1$. \begin{definition} The $p$-th shearing invariant of a Hitchin representation $\rho$ along $B$ is defined by \[ \sigma_p(B, \rho) = \log D_p(\xi_{\rho}(x), \xi_{\rho}(y), \xi_{\rho}(z^l), \xi_{\rho}(z^r)). \] \end{definition} This invariant is also well-defined for a choice of lifts by the same reason with the case of triangle invariants. Consider a closed leaf $C \in \mathcal{L}_K$ with the short arc $K_C$. Let $T^l$(resp. $T^r$) $\in \mathcal{T}$ be ideal triangles which is spiraling along $C$ from the left (resp. right) of $C$ and contains the endpoints of $K_C$. Lift $C$ and $K_C$ to a geodesics $\tilde{C}$ and an arc $\tilde{K_C}$ so that $\tilde{K}_C$ intersects $\tilde{C}$. In addition to we take lifts $\tilde{T}^l$ and $\tilde{T}^r$ of $T^l$ and $T^r$ respectively such that they contain the endpoints of $\tilde{K}_C$. We denote, by $x$ and $y$, the repelling and attracting point of the geodesic $\tilde{C}$ respectively. Let us define the vertex $z^l, z^r$ of ideal triangles $\tilde{T}^l, \tilde{T}^r$ as follows. In the sides of $\tilde{T}^l$, two sides are asymptotic to $\tilde{C}$. One of these sides cuts the universal cover $\tilde{S}$ such that an ideal triangle $\tilde{T}^l$ and the geodesic $\tilde{C}$ is contained in the same connected component. The ideal vertex $z^l$ is the end of such a geodesic side of $\tilde{T}^l$ other from the ends of $x,y$. We define $v^r$ for $\tilde{T}^r$ similarly. Let $p$ be an integer with $1 \leq p \leq n-1$. \begin{definition} The $p$-th gluing invariant of a Hitchin representation $\rho$ along $C$ is defined by \[ \theta_p(C, \rho)= \log D_p(\xi_{\rho}(x), \xi_{\rho}(y), \xi_{\rho}(z^l), \xi_{\rho}(z^r)). \] \end{definition} The invariants above are well-defined on Hitchin components {\it i.e.} these invariants are independent of representatives of conjugacy class of Hitchin representations. \subsection{Closed leaf condition} There is a nice relation between length functions, triangle invariants and shearing invariants. Let $C$ be a closed leaf of the lamination $\mathcal{L}_K$. Let us focus on the right side of $C$ with respect to the orientation of $C$. Let $B_{1}, \cdots, B_{k}$ be the biinfinite leaves spiraling along $C$ from the right, and $T_{1}, \cdots, T_{k}$ the ideal triangles which spiral along $C$ from the right. Suppose that these leaves and triangles spiral to $C$ in the direction (resp. the opposite direction) of the orientation of $C$. Define $\overline{\sigma}_{p}(B_i, \rho)$ by $\sigma_p(B_i, \rho)$ if $B_i$ is oriented toward $C$, and by $\sigma_{n-p}(B_i, \rho)$ otherwise. Then we define \begin{align*} R_p^{\rho}(C) &=\sum_{i=1}^{k} \overline{\sigma}_{p}(B_i, \rho) + \sum_{i=1}^k \sum_{q+r=n-p}\tau_{pqr}((T_i, v_i), \rho), \\ (\mbox{resp. } R_p^{\rho}(C) &= -\sum_{i=1}^{k} \overline{\sigma}_{n-p}(B_i, \rho) - \sum_{i=1}^k \sum_{q+r=p}\tau_{(n-p)qr}((T_{i}, v_{i}), \rho) ~,) \end{align*} where $v_{i}$ is the ideal vertex of a lift $\tilde{T}_{i}$ of $T_i$ which is an attracting (resp. repelling) point of a lift of $C$. When we focus on the left side of $C$, we can define $L_p^{\rho}(C)$ similarly as follows. \begin{align*} L_p^{\rho}(C) &=- \sum_{i=1}^{k} \overline{\sigma}_{p}(B_i, \rho) - \sum_{i=1}^k \sum_{q+r=n-p}\tau_{pqr}((T_i, v_i), \rho). \\ (\mbox{resp. } L_p^{\rho}(C) &= \sum_{i=1}^{k} \overline{\sigma}_{n-p}^{\rho}(B_i, \rho) + \sum_{i=1}^k \sum_{q+r=p}\tau_{(n-p)qr}((T_{i}, v_{i}), \rho) ~.) \end{align*} \begin{proposition}[Bonahon-Dreyer \cite{BD14}, Proposition 13] For any $\rho \in {H}_n(S)$ and any closed leaf $C$, it holds that \[ l_p^{\rho}(C) = R_p^{\rho}(C) = L_p^{\rho}(C). \] \end{proposition} \subsection{Bonahon-Dreyer parameterization} We apply the Bonahon-Dreyer parameterization theorem in our case. For the geodesic lamination $\mathcal{L}_K$, we have $\frac{3|\chi(S)|}{2}$ closed leaves $C_i$, $3|\chi(S)|$ biinfinite leaves $B_{ij}$, and $2|\chi(S)|$ ideal triangles $T_{ijk}^l$. Set $N = \frac{3|\chi(S)|}{2}(n-1) + 3|\chi(S)|(n-1) + 2|\chi(S)|\binom{n-1}{2}$. By proposition 4.2, we have a relation between triangle invariants: \begin{proposition} \[ \tau_{pqr}((T, v_0), \rho) = \tau_{qrp}((T, v_1), \rho) = \tau_{rpq}((T, v_2), \rho). \] \end{proposition} Thus it is enough to consider only the triangle invariant defined for one ideal vertex and we denote the triangle invariant as $\tau_{pqr}(T, \rho)$ simply. Bonahon-Dreyer showed that Hitchin representations are parameterized by the all triangle invariants, shearing invariants, and gluing invariants we can consider. \begin{theorem}[Bonahon-Dreyer \cite{BD14}, \cite{BD17}] The map \begin{align*} &\Phi_{\mathcal{L}_K} : H_n(S) \rightarrow \mathbb{R}^N \\ &\Phi_{\mathcal{L}_K}(\rho) = (\tau_{pqr}(T_{ijk}^l, \rho), \cdots, \sigma(B_{ij},\rho), \cdots, \theta(C_i, \rho), \cdots). \end{align*} is a homeomorphism onto the image. Moreover the image of this map is the interior $\mathcal{P}_{\mathcal{L}_K}$ of a convex polytope. \end{theorem} The parameter space is coincides with the interior of the convex polytope which is defined by the closed leaf condition. We denote the coordinate of the target space $\mathbb{R}^N$ by $(\tau_{pqr}(T_{ijk}^l), \cdots, \sigma(B_{ij}), \cdots, \theta(C_i), \cdots)$. \section{Invariants of Fuchsian representations} Let $\rho = \iota_n \circ \rho$ be a Fuchsian representation defined by a hyperbolic holonomy $\rho : \pi_1(S) \rightarrow {\rm PSL}_2\mathbb{R}$. We denote, by $\partial \pi_1(S)^{(3)}$ (resp. $\partial \mathbb{H}^{(3)}$), the set of triples of pairwise distinct points of $\partial \pi_1(S)$ (resp. $\partial \mathbb{H}^2$. \begin{proposition} For any triples $(x,y,z) \in \partial \pi_1(S) ^{(3)}$ in clockwise order, the $(p,q,r)$-triple ratio $T_{pqr}(\xi_{\rho_n}(x),\xi_{\rho_n}(y),\xi_{\rho_n}(z)) = 1$. \end{proposition} \begin{proof} Since ${\rm PSL}_2\mathbb{R}$ transitively acts on the set of triples $\partial \mathbb{H}^{(3)}$, we can take a transformation $A \in {\rm PSL}_2\mathbb{R}$ such that $A(f_{\rho}(x))=\infty, A(f_{\rho}(y)) = 1, A(f_{\rho}(z)) = 0$. Using this normalization, we have \begin{align*} T_{pqr}(\xi_{\rho_n}(x),\xi_{\rho_n}(y),\xi_{\rho_n}(z)) &= T_{pqr}(\nu(f_{\rho}(x)), \nu(f_{\rho}(y)), \nu(f_{\rho}(z))) \\ &= T_{pqr}(\nu(A^{-1}(\infty)), \nu(A^{-1}(1)), \nu(A^{-1}(0)) \\ &= T_{pqr}(\iota_n(A)^{-1}\nu(\infty), \iota_n(A)^{-1}\nu(1), \iota_n(A)^{-1}\nu(0)) \\ &= T_{pqr}(\nu(\infty), \nu(1), \nu(0)). \end{align*} Thus it is enough to consider the value $T_{pqr}(\nu(\infty), \nu(1), \nu(0))$. Recall that the flag $\nu([a:b]) = \{ V_d \}_d$ for $[a:b] \in \mathbb{RP}^1$ consists of the nested vector space $V_d$ of dimension $d = 0,1, \cdots, n$ defined by \[ V_d = \{ P(X,Y) \in {\rm Poly}_n(X,Y) ~|~ \exists Q(X,Y) ~s.t.~ P(X,Y) = (aX + bY)^{n-d} Q(X,Y) \}. \] For example, the $r$-dimensional vector space $\nu(0)^r$ is \begin{align*} \nu(0)^d &= \{ P(X,Y) ~|~ \exists Q(X,Y) ~s.t.~ P(X,Y) = Y^{n-d} Q(X,Y) \} \\ &= \{ (k_1 X^{d-1} + k_2 X^{d-2} Y + \cdots + k_{d} Y^{d-1})Y^{n-d} ~|~ k_1, \cdots k_d \in \mathbb{R} \} \\ &= {\rm Span}<X^{d-1}Y^{n-d}, X^{d-2}Y^{n-d+1}, \cdots, Y^{n-1} > . \end{align*} Similarly, \begin{align*} \nu(\infty)^d &= {\rm Span}<X^{n-1}, X^{n-2}Y, \cdots, X^{n-d}Y^{d-1}>, \\ \nu(1)^d &= {\rm Span}<(X+Y)^{n-d}X^{d-1}, (X+Y)^{n-d}X^{d-2}Y, \cdots, (X+Y)^{n-d}Y^{d-1} >. \end{align*} To compute the triple ratio, first we choose a basis of $\bigwedge^d \nu(0)^d, \bigwedge^d \nu(1)^d, \bigwedge^d \nu(\infty)^d$ as follows: \begin{align*} t_0^d &= X^{d-1}Y^{n-d} \wedge X^{d-2}Y^{n-d+1} \wedge \cdots \wedge Y^{n-1} \in \bigwedge^d \nu(0)^d, \\ t_{\infty}^d &= X^{n-1} \wedge X^{n-2}Y \wedge \cdots \wedge X^{n-d}Y^{d-1} \in \bigwedge^d \nu(\infty)^d, \\ t_1^d &= (X+Y)^{n-d}X^{d-1} \wedge (X+Y)^{n-d}X^{d-2}Y \wedge \cdots \wedge (X+Y)^{n-d}Y^{d-1} \in \bigwedge^d \nu(1)^d. \end{align*} Then $T_{pqr}(\nu(\infty), \nu(1), \nu(0))$ is precisely equal to \[ \dfrac { t_{\infty}^{p+1} \wedge t_1^{q} \wedge t_0^{r-1} \cdot t_{\infty}^{p} \wedge t_1^{q-1} \wedge t_0^{r+1} \cdot t_{\infty}^{p-1} \wedge t_1^{q+1} \wedge t_0^{r} } { t_{\infty}^{p-1} \wedge t_1^{q} \wedge t_0^{r+1} \cdot t_{\infty}^{p} \wedge t_1^{q+1} \wedge t_0^{r-1} \cdot t_{\infty}^{p+1} \wedge t_1^{q-1} \wedge t_0^{r} }, \] so we should verify values of wedge products $t_{\infty}^{p} \wedge t_1^{q} \wedge t_0^{r}$ for integers $p,q,r$ with $0 \leq p,q,r \leq n$ and $p+q+r = n$. (We abuse notion $p,q,r$ which appeared in the statement of proposition 5.1.) The following formula is shown by easy linear algebra. \begin{lemma} Let $V$ be an $n$-dimensional vector space with a basis $\{b_1, \cdots, b_n \}$ and $\{v_1, \cdots, v_n\}$ be arbitrary vectors in $V$. If $v_i = \sum_{i=1}^n v_{ij}b_j$, then \[v_1 \wedge \cdots \wedge v_n = {\rm Det}((v_{ij}))b_1 \wedge \cdots \wedge b_n. \] \end{lemma} We fix a basis of ${\rm Poly}_n(X,Y)$ by $b_1 = X^{n-1}, b_2 = X^{n-2}Y, \cdots, b_n = Y^{n-1}$, and we may choose an identification $\bigwedge^n {\rm Poly}_n(X,Y) \rightarrow \mathbb{R}$ such that $b_1 \wedge b_2 \wedge \cdots \wedge b_n$ is identified with 1. Then, using this basis, \begin{align*} t_{\infty}^{p} \wedge t_1^{q} \wedge t_0^{r} &= X^{n-1} \wedge X^{n-2}Y \wedge \cdots \wedge X^{n-p}Y^{p-1} \wedge \\ &\qquad (X+Y)^{n-q}X^{q-1} \wedge (X+Y)^{n-q}X^{q-2}Y \wedge \cdots \wedge (X+Y)^{n-q}Y^{q-1} \wedge \\ &\qquad \quad X^{r-1}Y^{n-r} \wedge X^{r-2}Y^{n-r+1} \wedge \cdots \wedge Y^{n-1} \\ &= b_1 \wedge b_2 \wedge \cdots b_p \wedge \\ & \qquad \sum_{i=1}^{n-q}\binom{n-q}{i} b_{i+1} \wedge \sum_{i=1}^{n-q}\binom{n-q}{i} b_{i+2} \wedge \cdots \wedge \sum_{i=1}^{n-q}\binom{n-q}{i} b_{i+q} \wedge \\ &\qquad \quad b_{n-r+1} \wedge b_{n-r+2} \wedge \cdots \wedge b_n. \end{align*} By Lemma 5.2 and an easy computation of determinant of matrix, we get \[ t_{\infty}^{p} \wedge t_1^{q} \wedge t_0^{r} = \begin{vmatrix} \binom{p+r}{p} & \cdots & \binom{p+r}{p-q+1} \\ \vdots & \vdots & \vdots \\ \binom{p+r}{p+q-1} & \cdots & \binom{p+r}{p} \\ \end{vmatrix} \] if $q \neq 0$ and $t_{\infty}^{p} \wedge t_1^{0} \wedge t_0^{r} = 1$. We suppose $q \neq 0$. Note that we now consider an extended binomial coefficient defined by \[ \binom{n}{p} = \begin{cases} \dfrac{n!}{p!(n-p)!} & (0 \leq p \leq n) \\ 0 & (otherwise). \end{cases} \] Hence many zero entries may appear in this determinant. \begin{lemma} The determinant \[ \begin{vmatrix} \binom{p+r}{p} & \cdots & \binom{p+r}{p-q+1} \\ \vdots & \vdots & \vdots \\ \binom{p+r}{p+q-1} & \cdots & \binom{p+r}{p} \\ \end{vmatrix} \] is equal to \[ (-1)^\frac{(q-1)q}{2}\dfrac{(n-q)!(n-q+1)! \cdots (n-1)! 1!2! \cdots (q-1)!}{(n-r-q)!(n-r-q+1)! \cdots (n-r-1)! r! (r+1)! \cdots (r+q-1)!}. \] \end{lemma} \begin{proof}[Proof of Lemma 5.3.] The following formulae still hold for the definition of the extended binomial coefficient. \begin{align} \binom{n}{p} &= \binom{n}{n-p}, \\ \binom{n}{p} + \binom{n}{p+1} &= \binom{n+1}{p+1}. \end{align} By the elemental transformations of matrices, adding the second row to the first row, the third row to the second row, ..., and the $q$-th row to the $(q-1)$-th row and using the formula (2), we get \[ \begin{vmatrix} \binom{p+r}{p} & \cdots & \binom{p+r}{p-q+1} \\ \vdots & \vdots & \vdots \\ \binom{p+r}{p+q-1} & \cdots & \binom{p+r}{p} \\ \end{vmatrix} = \begin{vmatrix} \binom{p+r+1}{p+1} & \cdots & \binom{p+r+1}{p-q+2} \\ \binom{p+r+1}{p+2} & \cdots & \binom{p+r+1}{p-q+3} \\ \binom{p+r+1}{p+3} & \cdots & \binom{p+r+1}{p-q+4} \\ \vdots & \vdots & \vdots \\ \binom{p+r+1}{p+q-2} & \cdots & \binom{p+r+1}{p-1} \\ \binom{p+r+1}{p+q-1} & \cdots & \binom{p+r+1}{p} \\ \binom{p+r}{p+q-1} & \cdots & \binom{p+r}{p} \\ \end{vmatrix} .\] Next, by adding the second row to the first row, the third row to the second row, ..., and the $(q-1)$-th row to the $(q-2)$-th row and using the formula (2), \[ \begin{vmatrix} \binom{p+r+1}{p+1} & \cdots & \binom{p+r+1}{p-q+2} \\ \binom{p+r+1}{p+2} & \cdots & \binom{p+r+1}{p-q+3} \\ \binom{p+r+1}{p+3} & \cdots & \binom{p+r+1}{p-q+4} \\ \vdots & \vdots & \vdots \\ \binom{p+r+1}{p+q-2} & \cdots & \binom{p+r+1}{p-1} \\ \binom{p+r+1}{p+q-1} & \cdots & \binom{p+r+1}{p} \\ \binom{p+r}{p+q-1} & \cdots & \binom{p+r}{p} \\ \end{vmatrix} = \begin{vmatrix} \binom{p+r+2}{p+2} & \cdots & \binom{p+r+2}{p-q+3} \\ \binom{p+r+2}{p+3} & \cdots & \binom{p+r+2}{p-q+4} \\ \binom{p+r+2}{p+4} & \cdots & \binom{p+r+2}{p-q+5} \\ \vdots & \vdots & \vdots \\ \binom{p+r+2}{p+q-1} & \cdots & \binom{p+r+2}{p-1} \\ \binom{p+r+1}{p+q-1} & \cdots & \binom{p+r+1}{p} \\ \binom{p+r}{p+q-1} & \cdots & \binom{p+r}{p} \\ \end{vmatrix} .\] Iterating this deformation, we get \[ \begin{vmatrix} \binom{p+r}{p} & \cdots & \binom{p+r}{p-q+1} \\ \vdots & \vdots & \vdots \\ \binom{p+r}{p+q-1} & \cdots & \binom{p+r}{p} \\ \end{vmatrix} = \begin{vmatrix} \binom{p+r+q-1}{p+q-1} & \cdots & \binom{p+r+q-1}{p} \\ \binom{p+r+q-2}{p+q-1} & \cdots & \binom{p+r+q-2}{p} \\ \binom{p+r+q-3}{p+q-1} & \cdots & \binom{p+r+q-3}{p} \\ \vdots & \vdots & \vdots \\ \binom{p+r+2}{p+q-1} & \cdots & \binom{p+r+2}{p} \\ \binom{p+r+1}{p+q-1} & \cdots & \binom{p+r+1}{p} \\ \binom{p+r}{p+q-1} & \cdots & \binom{p+r}{p} \\ \end{vmatrix} = \begin{vmatrix} \binom{n-1}{p+q-1} & \cdots & \binom{n-1}{p} \\ \binom{n-2}{p+q-1} & \cdots & \binom{n-2}{p} \\ \binom{n-3}{p+q-1} & \cdots & \binom{n-3}{p} \\ \vdots & \vdots & \vdots \\ \binom{n-q+2}{p+q-1} & \cdots & \binom{n-q+2}{p} \\ \binom{n-q+1}{p+q-1} & \cdots & \binom{n-q+1}{p} \\ \binom{n-q}{p+q-1} & \cdots & \binom{n-q}{p} \\ \end{vmatrix} . \] Note that $p+q+r =n$ for the last equality. We consider a similar deformation for columns. By adding the second column to the first column, the third column to the second column, ..., and the $q$-th column to the $(q-1)$-th column, and using the formula (2), the determinant above is deformed to \[ \begin{vmatrix} \binom{n}{p+q-1}&\binom{n}{p+q-2}&\binom{n}{p+q-3} & \cdots &\binom{n}{p+2}&\binom{n}{p+1} & \binom{n-1}{p} \\ \vdots & \vdots & \vdots &~& \vdots & \vdots & \vdots \\ \binom{n-q+1}{p+q-1}&\binom{n-q+1}{p+q-2}&\binom{n-q+1}{p+q-3} & \cdots &\binom{n-q+1}{p+2}&\binom{n-q+1}{p+1} & \binom{n-q}{p} \\ \end{vmatrix} . \] By adding the second column to the first column, the third column to the second column, ..., and the $(q-1)$-th column to the $(q-2)$-th column, and using the formula (2), the determinant is again deformed to the following form \[ \begin{vmatrix} \binom{n+1}{p+q-1}&\binom{n+1}{p+q-2}&\binom{n+1}{p+q-3} & \cdots &\binom{n+1}{p+2}&\binom{n}{p+1} & \binom{n-1}{p} \\ \vdots & \vdots & \vdots &~& \vdots & \vdots & \vdots \\ \binom{n-q+2}{p+q-1}&\binom{n-q+2}{p+q-2}&\binom{n-q+2}{p+q-3} & \cdots &\binom{n-q+2}{p+2}&\binom{n-q+1}{p+1} & \binom{n-q}{p} \\ \end{vmatrix} . \] By iterating this deformation, the original determinant can be deformed to the following one: \[ \begin{vmatrix} \binom{n+q-2}{p+q-1}&\binom{n+q-3}{p+q-2}&\binom{n+q-4}{p+q-3} & \cdots &\binom{n+1}{p+2}&\binom{n}{p+1} & \binom{n-1}{p} \\ \vdots & \vdots & \vdots &~& \vdots & \vdots & \vdots \\ \binom{n-1}{p+q-1}&\binom{n-2}{p+q-2}&\binom{n-3}{p+q-3} & \cdots &\binom{n-q+2}{p+2}&\binom{n-q+1}{p+1} & \binom{n-q}{p} \\ \end{vmatrix} . \] Using $p+q+r=n$, and replacing columns and rows, the determinant above can be deformed as follows. \begin{align*} & \hspace{5pt} \begin{vmatrix} \binom{n+q-2}{p+q-1}&\binom{n+q-3}{p+q-2}& \cdots &\binom{n}{p+1} & \binom{n-1}{p} \\ \binom{n+q-3}{p+q-1}&\binom{n+q-4}{p+q-2}& \cdots &\binom{n-1}{p+1} & \binom{n-2}{p} \\ \vdots & \vdots & \vdots &\vdots & \vdots \\ \binom{n}{p+q-1}&\binom{n-1}{p+q-2}& \cdots &\binom{n-q+2}{p+1} & \binom{n-q+1}{p} \\ \binom{n-1}{p+q-1}&\binom{n-2}{p+q-2}& \cdots &\binom{n-q+1}{p+1} & \binom{n-q}{p} \\ \end{vmatrix} \\ &=(-1)^{\frac{q(q-1)}{2}} \begin{vmatrix} \binom{n-1}{p+q-1}&\binom{n-2}{p+q-2}& \cdots &\binom{n-q+1}{p+1} & \binom{n-q}{p} \\ \binom{n}{p+q-1}&\binom{n-1}{p+q-2}& \cdots &\binom{n-q+2}{p+1} & \binom{n-q+1}{p} \\ \vdots & \vdots & \vdots &\vdots & \vdots \\ \binom{n+q-3}{p+q-1}&\binom{n+q-4}{p+q-2}& \cdots &\binom{n-1}{p+1} & \binom{n-2}{p} \\ \binom{n+q-2}{p+q-1}&\binom{n+q-3}{p+q-2}& \cdots &\binom{n}{p+1} & \binom{n-1}{p} \\ \end{vmatrix} \\ &=(-1)^{\frac{q(q-1)}{2}}\cdot(-1)^{\frac{q(q-1)}{2}} \begin{vmatrix} \binom{n-q}{p} & \binom{n-q+1}{p+1} & \cdots & \binom{n-2}{p+q-2} & \binom{n-1}{p+q-1} \\ \binom{n-q+1}{p} & \binom{n-q+2}{p+1} & \cdots & \binom{n-1}{p+q-2} & \binom{n}{p+q-1} \\ \vdots & \vdots & \vdots &\vdots & \vdots \\ \binom{n-2}{p} & \binom{n-1}{p+1} & \cdots & \binom{n+q-4}{p+q-2} & \binom{n+q-3}{p+q-1} \\ \binom{n-1}{p} & \binom{n}{p+1} & \cdots & \binom{n+q-3}{p+q-2} & \binom{n+q-2}{p+q-1} \\ \end{vmatrix} \\ &= \begin{vmatrix} \binom{n-q}{n-r-q} & \binom{n-q+1}{n-r-q+1} & \cdots & \binom{n-2}{n-r-2} & \binom{n-1}{n-r-1} \\ \binom{n-q+1}{n-r-q} & \binom{n-q+2}{n-r-q+1} & \cdots & \binom{n-1}{n-r-2} & \binom{n}{n-r-1} \\ \vdots & \vdots & \vdots &\vdots & \vdots \\ \binom{n-2}{n-r-q} & \binom{n-1}{n-r-q+1} & \cdots & \binom{n+q-4}{n-r-2} & \binom{n+q-3}{n-r-1} \\ \binom{n-1}{n-r-q} & \binom{n}{n-r-q+1} & \cdots & \binom{n+q-3}{n-r-2} & \binom{n+q-2}{n-r-1} \\ \end{vmatrix} \hspace{2pt} \cdots (\dag). \end{align*} Lemma 5.3 is obtained by applying the following lemma. The determinant $\Diamond(n,k,l)$ below corresponds to a rhombus in Pascal's triangle. The entries of $\Diamond(n,k,l)$ are usual binomial coefficients, so positive integers. We can apply the formula in Lemma 5.4 to compute $(\dag)$ by replacing $n,k,l$ to $n-q, n-r-q, q-1$, and we get Lemma 5.3. \end{proof} \begin{lemma} Let $n,l \in \mathbb{N}$ and $0 \leq k \leq n$. The determinant \[ \Diamond(n,k,l)= \begin{vmatrix} \binom{n}{k} & \binom{n+1}{k+1} &\cdots& \binom{n+l}{k+l} \\ \binom{n+1}{k} & \binom{n+2}{k+1} &\cdots & \binom{n+l+1}{k+l} \\ \vdots & \vdots & \vdots & \vdots \\ \binom{n+l}{k} & \binom{n+l+1}{k+1} & \cdots & \binom{n+2l}{k+l} \\ \end{vmatrix} \] is equal to \[ \frac{n!(n+1)! \cdots (n+l)!}{k! (k+1)! \cdots (k+l)! (n-k)! \cdots (n-k+l)!} \cdot (-1)^{\frac{l(l+1)}{2}} 1! \cdots l!. \] \end{lemma} \begin{proof}[Proof of Lemma 5.4.] First, we deform $\Diamond(n,k,l)$ as follows. \begin{align*} & \Diamond(n,k,l) = \begin{vmatrix} \frac{n!}{k!(n-k)!} & \frac{(n+1)!}{(k+1)!(n-k)!} &\cdots & \frac{(n+l)!}{(k+l)!(n-k)!} \\ \frac{(n+1)!}{k!(n-k+1)!} & \frac{(n+2)!}{((k+1)!(n-k+1)!} &\cdots & \frac{(n+l+1)!}{(k+l)!(n-k+1)!} \\ \vdots & \vdots & \vdots & \vdots \\ \frac{(n+l)!}{k!(n-k+l)!} & \frac{(n+l+1)!}{(k+1)!(n-k+l)!} & \cdots & \frac{(n+2l)!}{(k+l)!(n-k+l)!} \\ \end{vmatrix} \\ &=C \begin{vmatrix} 1 &1 & \cdots &1 \\ (n+1) & (n+2) & \cdots & (n+l+1) \\ \vdots & \vdots & \vdots & \vdots \\ (n+1) \cdots (n+l) & (n+2) \cdots (n+l+1) & \cdots & (n+l+1) \cdots (n+2l) \\ \end{vmatrix}, \end{align*} where \[ C = \dfrac{n!(n+1)! \cdots (n+p)!}{k! (k+1)! \cdots (k+l)! (n-k)! \cdots (n-k+l)!}. \] We add the $(-l+1)$ times of the $l$-th row to the $(l+1)$-th row, the $(-l+2)$ times of the $(l-1)$-th row to the $l$-th row, ..., and ($-1$) times of the second row to the third row: \begin{align*} & \begin{vmatrix} 1 &1 & \cdots &1 \\ (n+1) & (n+2) & \cdots & (n+l+1) \\ \vdots & \vdots & \vdots & \vdots \\ (n+1) \cdots (n+l) & (n+2) \cdots (n+l+1) & \cdots & (n+l+1) \cdots (n+2l) \\ \end{vmatrix} \\ & \qquad = \begin{vmatrix} 1 &1 & \cdots &1 \\ (n+1) & (n+2) & \cdots & (n+l+1) \\ \vdots & \vdots & \vdots & \vdots \\ (n+1)^2 \cdots (n+l) & (n+2)^2 \cdots (n+l+1) & \cdots & (n+l+1)^2 \cdots (n+2l) \\ \end{vmatrix}. \end{align*} The iteration of such a deformation gives us the following determinant: \[ \begin{vmatrix} 1 &1 & \cdots &1 \\ (n+1) & (n+2) & \cdots & (n+l+1) \\ \vdots & \vdots & \vdots & \vdots \\ (n+1)^l & (n+2)^l& \cdots & (n+l+1)^l \\ \end{vmatrix}. \] We can use the formula of Vandermonde's determinant and expand this as follows. \begin{align*} \begin{vmatrix} 1 &1 & \cdots &1 \\ (n+1) & (n+2) & \cdots & (n+l+1) \\ \vdots & \vdots & \vdots & \vdots \\ (n+1)^l & (n+2)^l& \cdots & (n+l+1)^l \\ \end{vmatrix} &= (-1)^l l! \cdot (-1)^{l-1} (l-1)! \cdots (-1)\\ &= (-1)^{l +(l-1) + \cdots +1} l! (l-1)! \cdots 1 \\ &= (-1)^{\frac{l(l+1)}{2}} 1! \cdots l!. \end{align*} Thus \[ \Diamond(n,k,l) = \frac{n!(n+1)! \cdots (n+l)!}{k! (k+1)! \cdots (k+l)! (n-k)! \cdots (n-k+l)!} \cdot (-1)^{\frac{l(l+1)}{2}} 1! \cdots l!. \] \end{proof} Finally, applying Lemma 5.3 to $T_{pqr}(\nu(\infty), \nu(1), \infty(0))$, we can check the value is equal to $1$. Therefore the triple ratio of ordered triple is always equal to $1$ when we consider the Veronese flag curve. We finish the proof of proposition 5.1. \end{proof} \begin{proposition} Let $(a,b,c,d) \in \partial\pi_1(S)^{(4)}$ be a quadruple in counterclockwise order. Then $p$-th double ratio $D_p(\xi_{\rho_n}(a), \xi_{\rho_n}(c), \xi_{\rho_n}(b), \xi_{\rho_n}(d))$ is equal to $-z^{-1}$, where $z=z(f_{\rho}(c),f_{\rho}(d),f_{\rho}(a),f_{\rho}(b))$ is the cross ratio of the quadruple $(f_{\rho}(c),f_{\rho}(d),f_{\rho}(a),f_{\rho}(b))$. \end{proposition} \begin{proof} The proof is similar to one of proposition 5.1. Let $A \in {\rm PSL}_2\mathbb{R}$ be a transformation which sends $f_{\rho}(c)$ to $0$, $f_{\rho}(d)$ to $1$, $f_{\rho}(a)$ to $\infty$. The transformation $A$ maps $f_{\rho}(b)$ to the cross ratio $z= z(f_{\rho}(c),f_{\rho}(d),f_{\rho}(a),f_{\rho}(b))$. Then, by the same computation with the case of triple ratio, \[ D_p(\xi_{\rho_n}(a), \xi_{\rho_n}(c), \xi_{\rho_n}(b), \xi_{\rho_n}(d)) = D_p(\nu(\infty), \nu(0), \nu(z), \nu(1)). \] The flags $\nu(\infty), \nu(0), \nu(1), \nu(z)$ is defined by the following vector spaces: \begin{align*} \nu(\infty)^d &= {\rm Span}< b_1, b_2, \cdots, b_d> \\ \nu(0)^d &= {\rm Span}<b_{n-d+1}, b_{n-d+2}, \cdots, b_n > \\ \nu(1)^1 &= \mathbb{R} \sum_{i=0}^{n-1}\binom{n-1}{i}b_{i+1} \\ \nu(z)^1 &= \mathbb{R} \sum_{i=0}^{n-1}\binom{n-1}{i}z^{n-1-i}b_{i+1} \end{align*} where $b_1, \cdots, b_n$ are the basis of ${\rm Poly}_n(X,Y)$ we used. We choose a basis of the wedge products of the vector spaces $\bigwedge^d \nu(\infty)^d, \bigwedge^d \nu(0)^d, \nu(1)^1, \nu(z)^1$ as follows: \begin{align*} s_{\infty}^d &= b_1 \wedge b_2 \wedge \cdots \wedge b_d \in \bigwedge^d \nu(\infty)^d \\ s_0^d &= b_{n-d+1} \wedge b_{n-d+2} \wedge \cdots \wedge b_n \in \bigwedge^d \nu(0)^d \\ s_1^1 &= \sum_{i=0}^{n-1}\binom{n-1}{i}b_{i+1} \in \nu(1)^1 \\ s_z^1 &= \sum_{i=0}^{n-1}\binom{n-1}{i}z^{n-1-i}b_{i+1} \in \nu(z)^1 \end{align*} Recall that the double ratio $D_p(\nu(\infty), \nu(0), \nu(z), \nu(1))$ is defined by \[ D_p(\nu(\infty), \nu(0), \nu(z), \nu(1)) = - \dfrac{ s_{\infty}^p \wedge s_0^{n-p-1} \wedge s_z^1 \cdot s_{\infty}^{p-1} \wedge s_0^{n-p} \wedge s_1^1}{s_{\infty}^p \wedge s_0^{n-p-1} \wedge s_1^1 \cdot s_0^{p-1} \wedge s_0^{n-p} \wedge s_z^1} \] Thus it is enough to compute each factors of this fraction. The computation is very simple: \begin{align*} s_{\infty}^p \wedge s_0^{n-p-1} \wedge s_z^1 &= \begin{vmatrix} {\rm Id}_p & \mbox{\LARGE 0} & \binom{n-1}{0}z^{n-1} \\ & & \binom{n-1}{1}z^{n-2} \\ & & \vdots \\ \mbox{\LARGE 0} & {\rm Id}_{n-p-1} & \binom{n-1}{n-1}z^{0} \end{vmatrix}\\ &= (-1)^{n-p-1}\binom{n-1}{p}z^{n-p-1}, \\ \end{align*} \begin{align*} s_{\infty}^p \wedge s_0^{n-p-1} \wedge s_1^1 &= \begin{vmatrix} {\rm Id}_p & \mbox{\LARGE 0} & \binom{n-1}{0} \\ & & \binom{n-1}{1} \\ & & \vdots \\ \mbox{\LARGE 0} & {\rm Id}_{n-p-1} & \binom{n-1}{n-1} \end{vmatrix}\\ &= (-1)^{n-p-1}\binom{n-1}{p} .\\ \end{align*} \end{proof} \begin{theorem} If $\rho_n = \iota_n \circ \rho : \pi_1(S) \rightarrow {\rm PSL}_n\mathbb{R}$ is a ${\rm PSL}_n\mathbb{R}$-Fuchsian representation, then it follows that \begin{itemize} \item[(i)] all triangle invariants $\tau_{pqr}(T_{ijk}, \rho_n)$ are equal to $0$, \item[(ii)] all shearing invariants $\sigma_p(B_{ij}, \rho_n)$ and all gluing invariants $\theta_p(C_i,\rho_n)$ are independent of the index $p$. \end{itemize} \end{theorem} \begin{proof} (i) Recall the definition of triangle invariants. Fix a lift $\tilde{T}_{ijk}$ of an ideal triangle $T_{ijk}$. Let $x,y,z \in \partial \pi_1(S)$ be the vertices of $\tilde{T}_{ijk}$ which are in clock-wise ordering. Then $\tau_{pqr}(T_{ijk}, \rho_n) = \log ( T_{pqr}(\xi_{\rho_n}(x), \xi_{\rho_n}(y) , \xi_{\rho_n}(z)))$. In this case, the triple ratio is equal to $1$ by proposition 5.1, so $\tau_{pqr}(T_{ijk}, \rho_n)=0$. (ii)Let $\tilde{B}_{ij}$ be a lift of a biinfinite leaf $B_{ij}$ and $\tilde{T}^l$ and $\tilde{T}^r$ be lifts of the left $T^l$ and right triangles $T^r$ respectively. Respecting the orientation of $\tilde{B}_{ij}$, we label $x,y,z^l,z^r$ on the ideal vertices of $\tilde{T}^l, \tilde{T}^r$ as in Section 4.2. Then the quadruple $(x, z^l, y, z^r)$ is clock-wisely ordered, so by proposition 5.5, \begin{align*} \sigma_p(B_{ij}, \rho_n) &= \log D_p(\xi_{\rho_n}(x),\xi_{\rho_n}(y),\xi_{\rho_n}(z^l),\xi_{\rho_n}(z^r)) \\ &= \log -z(f_{\rho}(y),f_{\rho}(z^r),f_{\rho}(x),f_{\rho}(z^l))^{-1}. \end{align*} Especially, the shearing invariant is independent of the index $p$. We can similarly show the case of gluing invariants. The differences are only in the choice of ideal triangles and a quadruple of ideal vertices which are used in the definition of the gluing invariants. \end{proof} \begin{corollary} The shearing invariants $\sigma_p(B_{ij}, \rho_n)$ of a Fuchsian representation $\rho_n = \iota_n \circ \rho$ is equal to the shearing parameter along the biinfinite leaf $B_{ij}$ defined by $\rho$. \end{corollary} \begin{proof} We have $ \sigma_p(B_{ij}, \rho_n) = \log -z(f_{\rho}(y),f_{\rho}(z^r),f_{\rho}(x),f_{\rho}(z^l))^{-1}, $ and this is equal to the shearing parameter along $B_{ij}$ by proposition 2.4. \end{proof} \section{Fuchsian locus is a slice.} Let $\mathcal{S}_{\mathcal{L}_K}$ be a slice of the convex polytope $\mathcal{P}_{\mathcal{L}_K}$, the Bonahon-Dreyer parameter space, defined by $\tau_{pqr}(T^0_{ijk}), \tau_{pqr}(T^1_{ijk}) = 0$, $\sigma_p(B_{ij}) = \sigma_q(B_{ij})$ and $\theta_p(C_i) = \theta_q(C_i)$. By theorem 5.6, the image of the Fuchsian locus $F_n(S)$ by the Bonahon-Dreyer parameterization $\Phi_{\mathcal{L}_K}$ is contained in this slice $\mathcal{S}_{\mathcal{L}_K}$. \begin{theorem} The restriction map $\Phi_{\mathcal{L}_K} : F_n(S) \rightarrow \mathcal{S}_{\mathcal{L}_K}$ is surjective. \end{theorem} \begin{proof} Suppose that $z_{ij}, w_i \in \mathbb{R}$ give a point of the parameter space $\mathcal{S}_{\mathcal{L}_K}$ by $\tau_{pqr}(T^0_{ijk}) , \tau_{pqr}(T^1_{ijk})= 0, \sigma_p(B_{ij}) = z_{ij}, \theta_p(C_i) = w_i$. We construct a hyperbolic structure of $S$ with a holonomy $\eta$ such that $\Phi_{\mathcal{L}_K}(\iota_n \circ \eta)$ is equal to the point of $\mathcal{S}_{\mathcal{L}_K}$. We show this only in the case of the maximal geodesic lamination of type (I) because the argument is completely similar. First we focus on each pants which is given by the pants decomposition by $\{C_i\}$. Let $P_{ijk}$ be a pants bounded three closed geodesic leaf $C_i, C_j, C_k$. By proposition 2.5, the hyperbolic structure of a pair of pants is uniquely determined by the shearing parameters along biinfinite leaves $B_{ij}, B_{jk}, B_{ki}$. We endows with $P_{ijk}$ the hyperbolic structure $\rho_{ijk}$ defined by $\sigma^{\rho_{ijk}}(B_{ij}) = z_{ij}, \sigma^{\rho_{ijk}}(B_{jk}) = z_{jk}, \sigma^{\rho_{ijk}}(B_{ki}) = z_{ki}$. Since $z_{ij}, z_{jk}, z_{ki}$ satisfy the closed leaf condition, they are indeed in the image of the shearing parameterization $\sigma_{\mathcal{L}_K}$. Now we glue the hyperbolic structures of $P_{ijk}$. The hyperbolic structures of each $P_{ijk}$ gives the length of closed leaves $C_i$. For example, the length of $C_i$ is given by $l(C_i) = |z_{ij} + z_{ik}|$, see Proposition 2.6. We take a hyperbolic structure of $S$ such that the hyperbolic length of $C_i$ is equal to given $l(C_i)$ by the Fenchel-Nielsen coordinate of $S$ associated to the pants decomposition along $\{C_i\}$. Let $\rho : \pi_1(S) \rightarrow {\rm PSL}_2\mathbb{R}$ be a hyperbolic holonomy of this hyperbolic structure and $f_{\rho} : \tilde{S} \rightarrow \mathbb{H}^2$ be the developing map. Fix a lift $\tilde{C}_i$ of the closed geodesic $C_i$. We lift the short transverse arc $K_i = K_{C_i}$ to $\tilde{K}_i$ so that $\tilde{K}_i$ intersects to $\tilde{C}_i$. The endpoints of $\tilde{K}_i$ are contained in two plaque $Q_i^l,Q_i^r$. They are lifts of one of $T^0_{ijk},T^1_{ijk}, T^0_{ilm}, T^1_{ilm}$ which are ideal triangles spiraling to $C_i$. We may assume that $Q_i^l$ is on the left and $Q_i^r$ is on the right with respect to the orientation of $C_i$. Let $x_i$ and $y_i$ be the starting and terminal points respectively. Choose ideal points $z_i^l$ and $z_i^r$ of plaques $Q_i^l$ and $Q_i^r$ respectively as in Section 4.2. We deform the hyperbolic structure $\rho$ of $S$ to a hyperbolic structure $\eta$ which realizing the following equation \[ \log -z(f_{\eta}(y),f_{\eta}(z_i^r),f_{\eta}(x),f_{\eta}(z_i^l))^{-1} = w_i \] by twist deformation. \begin{lemma} For any $r \in \mathbb{R}_{<0}$, there is a twist deformation $\eta$ of $\rho$ along $C_i$ such that $z(f_{\eta}(y),f_{\eta}(z_i^r),f_{\eta}(x),f_{\eta}(z_i^l)) = r$. \end{lemma} \begin{proof}[Proof of Lemma 6.2.] Consider the geodesic lamination $\mathscr{C}_i$ which consists of the preimage of $C_i$ by the covering map $f_{\rho}(\tilde{S}) \rightarrow S_{\rho}$, where $S_{\rho}$ is the surface with the hyperbolic structure $\rho$. Let $R_i^l$ and $R_i^r$ be plaques of $\mathscr{C}_i$ containing $Q_i^l$ and $Q_i^r$ respectively. Set $\tilde{C}_i = R_i^l \cap R_i^r$. We observe the behavior of cross ratio under the twist deformation along $\mathscr{C}_i$. The twists along leaves of $\mathscr{C}_i$ other from $\tilde{C}_i$ do not change the cross ratio $z(f_{\rho}(y),f_{\rho}(z_i^r),f_{\rho}(x),f_{\rho}(z_i^l))$ since the twists act on the quadruple by isometry. Only the twist along $\tilde{C}_i$ change the cross ratio to $z( f_{\rho}(y),f_{\rho}(z_i^r),f_{\rho}(x), {\rm Tw}_t \circ f_{\rho}(z_i^l))$, where that the map ${\rm Tw}_t$ is the extension of the twist deformation onto the ideal boundary of $\mathbb{H}^2$. Since \[ \lim_{t \rightarrow \infty}{\rm Tw}_t \circ f_{\rho}(z_i^l) = -\infty, ~~ \lim_{t \rightarrow -\infty}{\rm Tw}_t \circ f_{\rho}(z_i^l) = 0, \] there exists $t_0$ such that $z( f_{\rho}(y),f_{\rho}(z_i^r),f_{\rho}(x), {\rm Tw}_{t_0} \circ f_{\rho}(z_i^l)) = r$ for given $r$. \end{proof} Using this lemma, we can deform $\rho$ to $\eta$ which satisfies for each $i$ that \[ \log -z(f_{\eta}(y),f_{\eta}(z_i^r),f_{\eta}(x),f_{\eta}(z_i^l))^{-1} = w_i. \] In particular we apply Lemma 6.2 for $r = -e^{-w_i}$. In this deformation, we should check two twist deformations along distinct curves $C_i$ and $C_j$ do not change the gluing invariant each other. \begin{lemma} We suppose that the hyperbolic structure $\rho$ is deformed to a hyperbolic structure $\rho_i$ by a twist deformation along $C_i$. The twist deformation along $C_j$ does not change the cross ratio $z( f_{\rho_i}(y),f_{\rho_i}(z_i^r),f_{\rho_i}(x), f_{\rho_i}(z_i^l))$. \end{lemma} \begin{proof}[Proof of Lemma 6.3] If a lift $\tilde{C}_j$ of $C_j$ divides ideal points $f_{\rho_i}(y),f_{\rho_i}(z_i^r),f_{\rho_i}(x), f_{\rho_i}(z_i^l)$, then $\tilde{C}_j$ intersects $\bar{Q}^l \cup \bar{Q}^r$, where the closure is taken in $\mathbb{H}^2 \bigsqcup \partial \mathbb{H}^2$. It contradicts that $C_i$ and the projections of $Q^l$ and $Q^r$, which are ideal triangles spiraling to $C_i$, do not intersect to $C_j$ since $C_j$ is a pants-decomposing curve. Hence ideal points $f_{\rho_i}(y),f_{\rho_i}(z_i^r),f_{\rho_i}(x), f_{\rho_i}(z_i^l)$ are in a same plaque of the geodesic lamination $\mathscr{C}_j$ which is defined by the preimage of $C_j$. Since the cross ratio is invariant for isometries, we obtain the statement of the lemma. \end{proof} Thus we can deform the original structure $\rho$ of $S$ to a hyperbolic structure $\eta$ by a twist deformation along each $C_i$ so that, for each $i$, $\eta$ realizes the equation \[ \log -z(f_{\eta}(y),f_{\eta}(z_i^r),f_{\eta}(x),f_{\eta}(z_i^l))^{-1} = w_i .\] Then $\Phi_{\mathcal{L}_K} ( \iota_n \circ \eta)$ coincides with the given point defined by $\tau_{pqr}(T^0_{ijk}) , \tau_{pqr}(T^1_{ijk})= 0, \sigma_p(B_{ij}) = z_{ij}, \theta_p(C_i) = w_i$. We finish the proof of Theorem 6.1. \end{proof} \section{The case of surfaces with boundary} To define the Bonahon-Dreyer parameterization for surfaces with boundary, Bonahon-Dreyer used the result of Labourie-McShane. \begin{theorem}[Labourie-McShane \cite{LaMc09} Theorem 9.1.] Let $S$ be a compact hyperbolic oriented surface with nonempty boundary, and $\rho: \pi(S) \rightarrow {\rm PSL}_n\mathbb{R}$ be a Hitchin representation. Then there exists a Hitchin representation $\hat{\rho} : \pi_1(\hat{S}) \rightarrow {\rm PSL}_n\mathbb{R}$ of the fundamental group of the double $\hat{S}$ of $S$ such that the restriction $\hat{\rho}$ to $\pi_1(S)$ is equal to $\rho$. \end{theorem} For the flag curve $\hat{\xi}_{\hat{\rho}} : \partial \pi_1(\hat{S}) \rightarrow {\rm Flag}(\mathbb{R}^n)$, we set $\xi_{\rho} = \hat{\xi}_{\hat{\rho}}|\partial \pi_1(S)$, the restriction to the boundary of $S$. We call this restriction the {\it restricted flag curve}. We use this restriction to define the Bonahon-Dreyer parameterization of surfaces with boundary. As the case of closed surfaces, we consider triangle, shearing, and gluing invariants defined by restricted flag curves. In particular, the parameterization map $\Phi_{\mathcal{L}_K} : H_n(S) \rightarrow \mathbb{R}^N$ is defined by \begin{itemize} \item all triangle invariants for ideal triangles which give the ideal triangulation by $\mathcal{L}_K$, \item all shearing invariants for biinfinite leaves of $\mathcal{L}_K$, and \item all gluing invariants for closed leaves of $\mathcal{L}_K$ which are not a boundary component of $S$. \end{itemize} The range is the interior of a convex polytope in $\mathbb{R}^N$. The convex polytope is defined by the closed equality condition for closed leaves which are not on boundary of $S$, and the closed inequality condition for boundary components. Here the closed inequality condition is the condition $L_p^{\rho}~ \mbox{or}~R_p^{\rho}(C) > 0$. In this case, Theorem 5.6 and Theorem 6.1 also hold. To check this, we focus on the doubling construction of ${\rm PSL}_n\mathbb{R}$-Fuchsian representations. In the proof of the existence of Hitchin doubles (Theorem 9.1 of \cite{LaMc09}), we can see that the double of a Fuchsian representation $\iota_n \circ \rho$ is $\iota_n \circ \hat{\rho}$, the Fuchsian representation induced by the hyperbolic double $\hat{\rho}$ of the hyperbolic holonomy $\rho$. Thus the restricted flag curve of $\iota_n \circ \rho$ is the restriction of the Veronese flag curve of $\iota_n \circ \hat{\rho}$ and Theorem 5.6 and Theorem 6.1 for non-closed surface are shown similarly.
1,108,101,565,826
arxiv
\section{Conclusion} \vspace{-5pt} \label{sec:summary and future work} In this work, we address the challenge of vehicle localization and a propose a scalable approach for accurate and efficient visual localization geared for real time performance. We first perform a large-scale analysis of GPS quality in urban areas, and generate comprehensive dataset for benchmarking vehicle localization in these areas. We then introduce a hybrid coarse-to-fine approach for accurate vehicle localization in urban environments based on efficient visual search and ego-motion. A low-dimensional global descriptor is introduced for fast retrieval of coarse localization, which is then fused with the vehicle ego-motion to regularize localization error and to provide high accuracy localization stream. Next, we introduce a large-scale dataset based on real-world dashcam and GPS data to evaluate our model on realistic driving data. Finally, we conduct an extensive evaluation of our approach in challenging urban environments and demonstrate a order of magnitude reduction in localization error. In future work we would like to explore improvements in the method's efficiency by reducing the dimension of the VL-GIST descriptor. For that, we can utilize our triplet sampling policy within any triplet architecture suggested for deep hashing (e.g., \cite{Norouzi2012HammingDM, Wang2016DeepSH, Liu2018DeepTQ}). In addition, we would like to study the relationship between the localization performance and the amount of visual data that is used for learning the VL-GIST representation \section{Introduction} \label{sec:introduction} \begin{figure}[t!] \begin{center} \includegraphics[width=\linewidth]{Figures/method_overview.png} \caption{Method Overview: Given a video stream of images, a hybrid visual search and ego-motion approach is applied to leverage both image representation and temporal information. The VL-GIST representation is applied to provide a coarse localization fix of the image, while the visual ego-motion is used to to estimate the vehicle's motion between consecutive video images. Fusing vehicle dynamics with the coarse location fixes further regularizes the localization error and yields a high accuracy location data stream. } \label{fig:method_overview} \end{center} \end{figure} Robust and accurate vehicle localization plays a key role in building safety applications based on Vehicle-to-Vehicle (V2V) networks. A V2V network allows vehicles to communicate with each other and to share their location and state, thus creating a 360-degree 'awareness' of other vehicles in proximity that goes beyond the line of sight. According to the National Highway Traffic Safety Administration (NHTS), such a V2V network offers the promise to significantly reduce crashes, fatalities, and improve traffic congestion \cite{V2VNHTSA}. The increasingly ubiquitous presence of smartphones and dashcams, with embedded GPS and camera sensors as well as efficient data connectivity, provides an opportunity to implement a cost-effective V2V ''Ground Traffic Control Network''. Such a platform would facilitate cooperative collision avoidance by providing advance V2V warnings, e.g., intersection movement assist to warn a driver when it is not safe to enter an intersection due to high collision probability with other vehicles. While GPS is widely used for navigation systems, its localization accuracy poses a critical challenge for proper operation of V2V safety networks. In some areas such as urban canyons environments, GPS signals are often blocked or partially available due to high-rise buildings \cite{noms_2016}. In \figref{fig:gps_accuracy_analysis_nyc} we show the accuracy of GPS readings from crowd-sourced data of over 250K driving hours taken in New York City (NYC). The figure demonstrates that the number of rides that suffer from urban canyon effects resulting in GPS errors of 10\,m or above is 40\%, and that of 20 meters is 20\%. In this work, we propose a hybrid coarse-to-fine approach for accurate vehicle localization in urban environments based on visual and GPS cues. \figref{fig:method_overview} shows a high level overview of the proposed solution\footnote{Part of Figure 1 was designed by macrovector/Freepik.}. First, a self-supervised approach is applied on a large-scale driving dataset to learn a compact representation, called \textit{Visual-Localization-GIST (VL-GIST)}. The representation preserves the geo-location distances between road images to facilitate robust and efficient coarse image-based localization. Then, given a driving video stream, a hybrid visual search and ego-motion approach is applied by matching the extracted descriptor in the low embedded space against a restricted set of relevant geo-tagged images to provide a coarse localization fix; the coarse fix is fused with the vehicle ego-motion to regularize localization errors and obtain a high accuracy location stream. To evaluate our model on realistic driving data, we introduce a challenging dataset based on real-world dashcam and GPS data. We collect millions of images from more than 5 million rides, focusing on the area of NYC. Our experimental results show that an efficient visual search with the VL-GIST descriptor can reduce a mobile phone's GPS location error from 50 meters (often measured in urban areas) to under 10 meters, and that incorporating visual ego-motion further reduces the error to below 5 meters. Our contributions are summarized as follows: \begin{itemize} \item We perform large-scale analysis of GPS quality in urban areas, and generate a comprehensive dataset for benchmarking vehicle localization in such areas (Sec.~\ref{sec:datasets}). \item We introduce a scalable approach for accurate and efficient localization that is geared for real-time performance (Sec.~\ref{sec:method}). \item We conduct extensive evaluation of our approach in challenging urban environments and demonstrate an order of magnitude reduction in localization error (Sec.~\ref{sec:experiment}). \end{itemize} \begin{figure}[ht!] \begin{center} \includegraphics[width=\linewidth]{Figures/raw_gps_ecc_colorbar.png} \caption{Accuracy of GPS data crowd-sourced from over 250K driving hours in NYC. The percentage of rides that experience GPS errors of 10 meters or more (likely due to urban canyons effects) is 40\%, and that of 20 meters or more is 20\%.} \label{fig:gps_accuracy_analysis_nyc} \end{center} \end{figure} \section{Experiments} \label{sec:experiment} \subsection{Implementation Details} {\bf Visual retrieval model details.} We selected a ResNet50 \cite{He2016DeepRL} backbone and trained the network using the SGD optimizer with the 1cycle policy procedure described in \cite{Smith2017SuperConvergenceVF, Smith2018Asystematic} with a maximal learning rate of 0.003, minimum momentum of 0.85, maximum momentum of 0.95 and weight decay of 1e-6. To predict the location of an image, we first set a threshold by looking at the distribution of the distances in the VL-GIST feature space, of all the image tuples in the validation set which their location is less than 10 meter (we remove outlier samples). After getting the threshold we then predict the location of the queried image by a weighted average of the location of all the key-frames, which their distance in the image VL-GIST feature space to the quarried, is smaller than the threshold. The weights are determine by the ratio between the key-frame distance and the sum of the distances in the feature space. We predict the location only in cases where there are at least 5 neighbors that passed the threshold. We extract also a confidence score according to the distribution of the location of the neighbors. \begin{figure}[t!] \begin{center} \includegraphics[width=\linewidth]{Figures/val_anchors_locatiob.png} \caption{Distribution of the location of the key-frames in the test area. Key-frames were chosen to cover the area with an approximately uniform distribution along the drivable paths to avoid biases.} \vspace{-10pt} \label{fig:validation_density} \vspace{-10pt} \end{center} \end{figure} {\bf Ego-motion model details.} To obtain an efficient implementation geared for running on mobile devices, we use a simple 8-layer CNN configuration with 2x2 fixed size filters and a layer depth sequence of $[20, 30, 40, 60, 80, 120, 160, 240]$. We train the model using an SGD optimizer with a learning rate of 0.001 and a momentum of 0.9. We use 1000 driving videos (from roughly 1000 different vehicles) as training set and 100 videos as test set. Each video is approximately 40 seconds in length and has a resolution of $1280 \times 720$. We train the ego-motion model with two consecutive frames, each resized to 256x256. The frames are taken at various time intervals ranging from 33\,ms to 1\,sec. The approach not only significantly augments the training data but also enables the model to support dynamic infer rates, e.g., reducing computation overhead for static scenes when the vehicle is idle. \subsection{Evaluation Methodology and Results} \begin{table}[t!] \begin{center} \label{results} \small \begin{tabular}{|l|ccc|c|c|} \hline \multicolumn{1}{|c|}{} & \multicolumn{3}{c|}{Accuracy} & \multicolumn{1}{c|}{ME} & \multicolumn{1}{c|}{Recall}\\ {} & {\textless 5m} & {\textless 10m} & {\textless 15m} & {} & {}\\ \hline \texttt{GPS-NN} & 0.09 & 0.24 & 0.39 & 21.5m & \textbf{0.97}\\ \hline \texttt{VL-GIST} & 0.20 & 0.41 & 0.61 & 13.5m & 0.48\\ \hline \textbf{VL-GIST*} & \textbf{0.30} & \textbf{0.63} & \textbf{0.82} & \textbf{9.7m} & 0.52 \\ \hline \end{tabular} \caption{Comparison between the three methods with 50 meter max GPS error.} \vspace{-10pt} \label{sec:exper:results2} \vspace{-15pt} \hspace{2.0cm} \end{center} \end{table} \begin{table}[t!] \begin{center} \label{results2} \small \begin{tabular}{|l|ccc|c|c|} \hline \multicolumn{1}{|c|}{} & \multicolumn{3}{c|}{Accuracy} & \multicolumn{1}{c|}{ME} & \multicolumn{1}{c|}{Recall}\\ {} & {\textless 5m} & {\textless 10m} & {\textless 15m} & {} & {}\\ \hline \texttt{GPS-NN} & 0 & 0.01 & 0.02 & 82.7m & \textbf{1}\\ \hline \texttt{VL-GIST} & 0.12 & 0.32 & 0.48 & 23.1m & 0.42\\ \hline \textbf{VL-GIST*} & \textbf{0.23} & \textbf{0.52} & \textbf{0.74} & \textbf{15.4m} & 0.41 \\ \hline \end{tabular} \caption{Comparison between the three methods with 200 meter max GPS error.} \label{sec:exper:results3} \vspace{-25pt} \hspace{2.0cm} \end{center} \end{table} \subsubsection{Visual retrieval for coarse localization} To estimate the visual localization quality we select an area of $750 \times 280$ square meters from the Image similarity dataset (see ~\secref{subsec:imageSim}). We hold out all the images from the test area (i.e., the triplet network was not trained on images from this area). We call these images key-frames (see ~\figref{fig:validation_density}). We set a maximal GPS error threshold (varies between 50-200 meter according to the experiment). For each key-frame, we randomly distorted the GPS location up to the maximal GPS error. Then we predict the location of the image, according to its VL-GIST nearest neighbors in the radius of the maximal GPS error, and compare it the the GPS location of the image. We compare the features extracted from the triplet network (VL-GIST) and the triplet network with the refinement triplets (VL-GIST*). Since image locations are not evenly distributed in our data, we also compare against naive baseline approach, called GPS-NN, of averaging the 10 nearest neighbors with respect to the geo-location distance. For each method we compare between the percentage of the errors that were less than 5,10 and 15 meters. We also compare the mean error and the recall rate for each method. As can be seen from ~\tabref{sec:exper:results2} and ~\tabref{sec:exper:results3}, even when looking at maximal error of 50 meter, the road VL-GIST distance preform much better comparing to the geographical distance. The experiments also demonstrate the value of training the networks with the refinements triplets and the affect on the final results. \vspace{-10pt} \subsubsection{Visual ego-motion for localization refinement} To estimate the visual ego-motion refinement quality, we add to the Ground-Truth (GT) location a random noise with a normal distributions, where the standard deviation varies between 3 meter to 30 meter. We then fixed the distorted location using the ego-motion and estimated the mean error relative to the GT. Running this test on various location noise values, as can be seen in ~\figref{fig:fusion_vs_noise_err}, we find that within an acceptable error range of up to 30 meters, the ego-motion fusion correction yields an approximate factor of 2-3 in improvement of the localization error. \begin{figure}[t!] \begin{center} \includegraphics[width=\linewidth]{Figures/incidents_full_paths.png} \caption{Visualization of the entire process on three example rides. Green dots show the ground truth coordinates, red dots show the raw GPS coordinates, orange dots show the VL-GIST prediction, and yellow dots show the regularized final coordinates. } \label{fig:final_waypoints} \vspace{-20pt} \end{center} \end{figure} Moreover, we compare in \figref{fig:gps_err_dist_comp} the original raw GPS coordinates' error distribution with the localization error distribution of the regularized coordinates, when combining the results from both the visual retrieval component and the ego-motion component. The normalized distributions show that we were able to reduce the variance in the localization error, and lower the mean error to be distributed compactly around 5 meters. \figref{fig:final_waypoints} shows the visualization of the entire process on three example rides in NYC. \begin{figure}[t!] \begin{center} \includegraphics[width=\linewidth]{Figures/gps_err_dist_comp.png} \caption{Comparison of the normalized distributions of the raw and regularized localization errors. The raw reported errors (blue) are aggregated from 250K different rides, and are spread out over a wide range, with under 1\% beyond the 35m error range. After regularizing the coordinates by fusing VL-GIST coarse correction with the ego-motion output, the distribution of localization errors becomes much more compact and can be approximated to a normal distribution around 5 meters.} \label{fig:gps_err_dist_comp} \vspace{-20pt} \end{center} \end{figure} \section{Related Work} \label{sec:related_work} \textbf{SfM and Visual Ego-Motion.} The Structure from Motion (SfM) approach (e.g., \cite{Schnberger2015FromSI}) uses a 3D scene model of the world constructed from the geometrical relationship of overlapping images. For a given query image, 2D-3D correspondences are established using descriptor matching (e.g., SIFT \cite{Lowe_2004_IJCV}). These matches are then used to estimate the camera pose. This approach is not always robust, especially when the query images are taken under significantly different conditions compared to the database images, or on straight roads that are not close to intersections and do not have enough perpendicular visual queues; the computational demands of this method mean it is not presently feasible to scale to millions of cars. Visual ego-motion, or visual odometry, is a well studied topic \cite{Scaramuzza2011VisualO}. Traditional methods use a complex pipeline including many steps such as feature extraction, feature matching, motion estimation, local optimisation, etc which require a great deal of manual tuning. Early attempts of solving this problem using deep learning techniques still involved complex additional steps such as computing dense optical flow \cite{Costante2016ExploringRL} or using SfM to label the data \cite{Kendall2015ConvolutionalNF} to work. Wang at al \cite{Wang2017DeepVOTE} were the first to suggest an end-to-end approach using a recurrent neural network and show competitive performance to state-of-the-art methods. Other directions use stereo images \cite{Zhan_2018_CVPR, Li2018UnDeepVOMV}, an approach that is not viable to our setup. \textbf{Retrieval Approaches} Many approaches use image retrieval techniques to find the most relevant database images for each query image \cite{state_of_the_art_survey, state_of_the_art_survey_2018}. These assume that a database of geo-tagged reference images is provided. Given this database, they estimate the position of a new query image by searching for a matching image from the database. The leading methods for image retrieval operate by constructing a vector, called descriptor, constructed in such a way that the distance between descriptors of similar images is smaller than the distance between descriptors corresponding to distinct images. All descriptors of a large database of images are recorded to a data base. To locate similar image to a query image we compute it's descriptor and then get a ranked list of images from the data base ordered by descriptors distances. Since the descriptors are often vectors of high dimension, a common practice is to apply a dimensionality reduction step of using PCA with whitening followed by L2-normalization \cite{ECCV12}. The evolution of descriptors for image retrieval problems are summarized in the survey paper of Zheng et al. \cite{Zheng2018SIFTMC}. In urban areas this problem is particularly difficult due to repetitive structures \cite{Torii:2015:VPR:2881666.2882191,Jgou2009OnTB}, changes over time because of change of seasons, day and night and change in the construction elements \cite{Torii2015247PR} and the existence of many dynamic objects that are not related to the landmark that is being searched for, like vehicles. \textbf{Traditional Descriptors.} Conventional image retrieval techniques rely on aggregation of local descriptors with methods based on "bag-of-word" representations \cite{Sivic:2003:VGT:946247.946751}, vectors of locally aggregated descriptors (VLAD) \cite{Jegou:2012:ALI:2360767.2361217}, Fisher vectors \cite{Perronnin2010LargescaleIR} and/or GIST \cite{Douze:2009:EGD:1646396.1646421}. The practical image retrieval task is composed of an initial filtering task where the descriptors in the database are ranked according to their distance to the descriptor of the query image and a second re-ranking phase which refines the ranking, using local descriptors, so to reduce ambiguities and bad matches. Such methods include query expansion \cite{QueryExpantion1, QueryExpantion2,QueryExpantion3} and spatial matching \cite{Philbin2007ObjectRW, Shen2012ObjectRA}. \textbf{Descriptor Learning.} In the last few years convolutions neural networks (CNN) proved to be a powerful image representation for various recognition tasks so several authors have proposed the use of the activations of convolutional layers as local features that can be aggregated into a descriptor suitable for image retrieval \cite{Babenko2015AggregatingLD,Razavian:2014:CFO:2679599.2679731}. However such approaches are not compatible with the geometric-aware models involved in the final re-ranking stages and thus can not compete with the state-of-the-art methods. Since we want that the distance between two descriptors of similar images will be smaller than the distance between descriptor of two distinct images, it is natural to consider network architectures developed for metric learning such as siamese \cite{RTC16} or triplet \cite{Schroff2015FaceNetAU, Wang2014LearningFI} learning networks. Arandjelovi\'c et al \cite{Arandjelovic16} propose a new training layer, NetVLAD, that can be plugged in any CNN architecture. The architecture mimics the classical approaches, that is local descriptors are extracted and then pooled in an orderless manner to finally produce a fixed size unit descriptor. A dataset for training the network was constructed by using the Google Street View Time allowing accessing multiple street-level panoramic images taken at different times at close-by spatial locations. The authors demonstrated that NetVlad descriptor outperformed state-of-the-art learned and not-learned descriptors on the Pittsburgh 250k \cite{Torii2013VisualPR} and the Tokyo 24/7 \cite{Torii2015247PR} datasets. A further step of dimensionality reduction using PCA with whitening followed by L2-normalization \cite{ECCV12} is applied to reduce the large NetVLAD, namely 16k or 32k, descriptor to a size of 4096. The R-MAC network of Tiolias et al. \cite{Tolias16} was develop to allow applying geometric aware methods for re-ranking and it does so by producing a global image representation by aggregating the activation features of a CNN in a fixed layout of spatial regions, followed by whitening with PCA. The descriptor produced by R-MAC is of compact, between 256 and 512, dimension. Gordo at al. \cite{Gordo2016DeepIR, Gordo:2017} proposed using a triplet loss to train the R-MAC architecture and a block for learning the pooling mechanism of the R-MAC descriptor. The network was trained on a large public dataset \cite{Babenko2014NeuralCF}. The dataset is very noisy and thus geometric filtering with SIFT keypoint detection were used to find positive examples. The authors demonstrated that this descriptor outperforms global descriptors and more complex systems deploying geometric verification and keypoint matching. Radenovi{\'c} et al \cite{RTC16, Radenovic2018FinetuningCI} proposed using a siamease network with the contrastive loss. The positive and negative examples are selected in an unsupervised manner, by clustering a large collection of unlabeled images, using state-of-the-art SfM system \cite{Schnberger2015FromSI}. Since SfM system use strict geometrical verification procedures, the 3D models reliably guide the selection of matching and non-matching pairs. Zho et al. \cite{AttentionPyrVisPlaceRecog18} proposed and attention-based pyramid aggregation network (APANet) consisting of a spatial pyramid pooling block, attention block and sum pooling block. They also proposed a fully unsupervised dimensioanlity and whitenings solution referred to as power PCA. The dataset used for training is the same as for NetVLAD \cite{Arandjelovic16}. \section{Datasets} \label{sec:datasets} {\bf Data collection.} Our data was collected from a large-scale deployment of connected dashcams. Each vehicle is equipped with a dashacam and a companion smartphone app that continuously captures and uploads sensor data such as GPS readings. Overall, the vehicles collected more than 5 million rides in the NYC area. From these rides, we use more than 200 million images for the image similarity dataset and more than 1000 video sequences for the ego motion dataset\footnote{The publicly available dataset can be found at: \href{https://github.com/getnexar/Nexar-Visual-Localization}{https://github.com/getnexar/Nexar-Visual-Localization}}. \subsection{Image Similarity Dataset} \label{subsec:imageSim} From the complete image similarity dataset we collect a subset of geo-tagged images for which the reported accuracy of the GPS signal is better than 10 meters. We found that at nearly all places in NYC, excluding tunnels, we have enough images with the required GPS accuracy. In fact, at least 10\% of the collected images have the required accuracy. Thus each square cell of 10x10 meters contains many images acquired with different weather and lighting conditions, different dynamic objects, e.g. vehicles or pedestrians, and different time, as demonstrated in \figref{fig:many_images_on_grid}. This dataset allows generating models that are invariant to weather, lightning, dynamic objects and addition or removal of construction elements. \begin{figure}[t!] \begin{center} \includegraphics[width=\linewidth]{Figures/grid_cell_images.png} \caption{Example images captured from the same cell. Each square cell of 10x10 meters consist of large-set of images acquired with different weather and lighting conditions as well as different dynamic objects, e.g., vehicles or pedestrians. This dataset allows generating models that are invariant to weather, lightning, dynamic objects and addition or removal of construction elements. } \label{fig:many_images_on_grid} \end{center} \end{figure} Images taken by dash-cams are frames taken from a video. Each video, in turn is a part of a full ride of a single vehicle. Since there is a large correlation between images of a single video or ride, we save also the ride ID that is further used for triplet sampling. The dataset is organized in a spatial data-structure that allows fast access to neighbouring images of each image in the data where we interpret neighbouring relation as being close geographically, and also in orientation. \subsection{Video Dataset with Sub-Meter Location Accuracy} In order obtain a benchmark with accurate location sequences of meter-level accuracy, we created a route annotation tool, which shows side-by-side the route on an aerial imagery (as a series of raw GPS points) and the corresponding driving video. A human annotator can align the ground view video with the overhead (aerial) view, and then correct the location of route points accordingly. Since this is a complex annotation task, we generated a test set for annotators and selected the top 3 experts. By checking the consistency of the route corrections across the different annotations, we observe a localization error with a mean of one meter in urban areas, and up to four meters on highways. \figref{fig:gps_vs_annotated_gt} shows an example ride with a comparison between the manually annotated location series and the raw GPS data. \begin{figure}[t!] \begin{center} \includegraphics[width=\linewidth]{Figures/gps_vs_annotated_GT.png} \caption{We created a route annotation tool which facilities location corrections by aligning a route on a aerial map with a corresponding driving video. The image shows a comparison between the manually annotated location series (green dots) and the raw GPS data (red route). } \label{fig:gps_vs_annotated_gt} \end{center} \end{figure} \section{Method} \label{sec:method} We improve the raw location data using a hybrid approach consisting of visual similarity to obtain coarse location fixes (i.e., of 10 meter accuracy) and further refinement and regularization using visual ego-motion to yield an accurate location stream (i.e., of 5 meter accuracy). \subsection{Self-Supervised Learning from Triplets} The model structure is a deep CNN followed by three small fully connected layers where the final layer is $L_2$ normalized. The network is trained in a self-supervised manner with a variant of the triplet loss: Let $f(x) \in \mathcal{R}^d$ be the output of the embedding layer for an image $x$ and let $x^a, x^p, x^n$ be the anchor, positive and negative images. Then the triplet loss is just the cross entropy loss of \[ \textup{softmax}( (D_p,D_n)) \] where $D_p = ||f(x^a)-f(x^p)||_2$, the positive distance, is the distance between the embedding of the anchor image and the embedding of the positive image and $D_n = ||f(x^a)-f(x^n)||$ is the negative distance. In order to effectively train the model with the triplet loss to produce a good embedding, we utilize our image similarity dataset as a source for our triplet sampling. In particular we produce three triplet generators: \begin{itemize} \item \textbf{Regular triplets}. This generator produces triples in which the anchor image is close to the positive image and far from the negative image. The anchor image is randomly sampled from our dataset, the positive image is sampled from all images that are close up to 10 meters to the anchor image and are oriented in the same direction (namely, the difference in the GPS heading of the two images is up to 20 degrees) while the negative image is sampled from all images that are far away, say more than 500 meters from the anchor image. Special care is taken to assure than none of the images are from the same driving video. Two examples of triplets sampled by this sampler are shown in \figref{fig:triplets} (a)-(b). \item \textbf{Random hard negative triplets}. This generator produces anchor and positive images in the same way as the regular triplet sampler but with harder negatives. More precisely, the negative image is sampled from images that are at distance between 20 to 30 meters from the anchor and roughly in the same orientation. Examples for such triples are shown in \figref{fig:triplets} (e)-(f). \item \textbf{Video sampler.} We utilize the inherent spatial ordering between consecutive images in a video. First, we sample a video from a collection of driving videos and then sample an image from this video as an anchor. The positive image is the closest frame to the anchor provided that it's distance from the anchor is less than 10 meters. The negative image is the closest image to the anchor provided that it's distance from the anchor is between 25 and 50 meters. As before, a triplet is selected only if the anchor, positive and negative images have roughly the same orientation. While this sampling procedure generate triplets that are highly correlated it is still useful, on top of the other samplers, since we have high confidence in the spatial ordering and the relevancy of the negative example as shown in \figref{fig:triplets} (c)-(d). \end{itemize} During training we randomly sample one of the above generators and use it to produce the next triplet to train. This sampling methods guarantees that the embedding layer will be invariant to weather, illumination and dynamic objects such as vehicles or pedestrians. The video sampler and random hard negative samplers refine the embedding so that the descriptors produced reflect the notion of distance to the query image. \begin{figure}[ht!] \begin{center} \includegraphics[width=\linewidth]{Figures/tripletsss.png} \caption{Examples of the three types of triplets. In each row, the leftmost two images are matching in location and heading, while the rightmost frame is the negative example of that triplet. (a) Regular triplets showing the Brooklyn Bridge from two different rides compared with a randomly sampled street. (b) Another regular triplet example, showing invariance to weather and lighting conditions. (c)+(d) Ride triplet showing two close frames and one negative frame from the same ride. (e)+(f) Hard negative triplet showing invariance to lighting conditions and and camera orientation.} \label{fig:triplets} \end{center} \end{figure} \subsection{Efficient Retrieval Inference} \label{sec:method-retrieval} The visual retrieval task boils down to comparing the descriptor of the query image to the database to obtain a ranked list of images form the database sorted by descriptors distances. A weighted average of the GPS coordinates of the k'th closest images, in descriptor space, yield a corrected GPS signal for the query image. There are several factors contributing to the performance of our retrieval pipeline: Restricting the number of images in the database to be ranked, speeding up the ranking procedure by using small descriptors and eliminating the need for additional re-ranking procedures. First, we have a geo-tagged image and thus we do not need to search the whole database for matching images. Thus we restrict our search only in an area of modest size around the query image according to the GPS accuracy. Because we rank images only in a small proximity to the query image, we discovered that we do not need any sort of re-ranking technique. The efficiency of the ranking procedure increases as the dimension of the descriptor decreases. We use a very simple triplet network \cite{Schroff2015FaceNetAU}, namely a deep CNN, followed by $L_2$ normalization and three fully connected layers that produce a small, 30 dimensional, embedding vector. This is in contrast to existing methods, see \cite{Magliani2018AnAR} which compares many methods, that report on descriptor dimensions in the range between 128 and 32k. \subsection{Visual Ego-Motion Estimation} \label{sec:method-egomotion} The visual retrieval approach provides coarse localization fixes with a noise distribution, as captured by the confidence of location prediction. We use visual ego-motion to reduce this noise term. That is, we estimate the vehicle's motion between consecutive video frames, and fuse the vehicle dynamics with the coarse fixes to regularize the location coordinates, yielding a (high-rate) data stream with lower localization error. \begin{figure}[t!] \begin{center} \includegraphics[width=\linewidth]{Figures/fusion_err_vs_noise_err.png} \caption{Localization error of the ego-motion prediction as a function of input location noise error in meters. These measurements were done by adding normally distributed noise to the ground truth at varying standard deviations, applying ego-motion, and extracting the regularized coordinates' error estimation. Using ego-motion yields a 2x-3x improvement in localization error. } \label{fig:fusion_vs_noise_err} \end{center} \end{figure} {\bf Vehicle model.} We follow Ackerman's steering model~\cite{Musleh2012VisualEM} and capture the kinematic motion of the vehicle between two time steps by two parameters: (a) a rotation, occurring around the center motion of the rear part of the vehicle and (b) a forward translation after the rotation. We use an end-to-end learning approach for ego-motion estimation, shown by recent work to be robust to image anomalies and imperfections~\cite{Costante2016ExploringRL}. We train a deep neural network, composed of CNN based feature extraction, that observes a sequences of images and aims to predict the motion of the vehicle. It takes as an input a monocular image sequence. At each time step, the two frames are resized, stacked together, and fed into the CNN to produce an effective feature for ego-motion estimation. The convolution layers are followed by two dense layers, and then split to two heads. Each head is composed of a 100 dimensional dense layer connected to a one dimensional dense layer. The network is trained using accurate location supervisory sequences (see ~\secref{sec:datasets}) with a combined loss: Let $x$ be the stacked images, and let $t$ and $r$ be the corresponding ground truth values of translation and rotation. The loss term is then defined as \[ \frac{1}{2}|f^t(x)-t| + \frac{1}{2}|f^r(x)-r| \] where $f^t(x) \in \mathcal{R}$ and $f^r(x) \in \mathcal{R}$ are the predicted translation and rotation values. We minimize the mean of the loss term across the whole training dataset. To compute the confidence of the ego-motion estimation, we split the values range of each ego motion parameter into multiple bins, and estimate the probability of a parameter to fall within a bin. Aggregating bin values around the mean yields an error range for the ego-motion predictions. \subsection{Fusion Algorithm} We use a Kalman filter to compute high accuracy location predictions. The state of the filter represents the 2D location of the vehicle in a cartesian coordinate system. The measurement inputs are the speed (translation divided by the inter-frame time) and steering of the vehicle, as computed by our ego-motion model; and the coarse 2D pose fixes from visual retrieval, each input with its noise estimation. With each new ego-motion estimation, we modify the vehicle's 2D location according to the new rotation and translation values. When a new coarse pose measurement is available, we fuse it with the current state to compute an updated location along with its uncertainty. Our Kalman filter formulation is similar to that found in Section III-B of~\cite{Pink2009Visual} with minor adjustments: we replace the pose measurements from the map matching (in~\cite{Pink2009Visual}) by pose measurements from visual retrieval (\secref{sec:method-retrieval}), and the measurements from visual odometry by those from ego-motion (\secref{sec:method-egomotion}).
1,108,101,565,827
arxiv
\section{Introduction} The goal of predicting pedestrian trajectories is to infer socially-acceptable paths based on previous steps while considering the social norms of other moving agents. Many earlier works~\cite{helbing1995social, 5459260, 5206641, yamaguchi2011you} on human trajectory prediction are based on deterministic approaches which yield the most likely single path. One of the earliest works in~\cite{helbing1995social} models a social force using attractive and repulsive forces between pedestrians. Since then, motion time-series and agent interactions have been applied to trajectory forecasting. With the development of recurrent neural networks (RNNs), pioneering works such as, Social-LSTM~\cite{alahi2016social} and Social-Attention~\cite{vemula2018social}, have adopted a social pooling and attention mechanisms between spatial neighbors. These approaches have become baseline models in areas such as spatial relation aggregation~\cite{gupta2018social, huang2019stgat, Shi2021sgcn, salzmann2020trajectron++, mohamed2020social, sun2020rsbg} and temporal future prediction~\cite{mangalam2020pecnet, sun2020reciprocal, zhao2020tnt, Lee_2017_CVPR, Marchetti_2020_CVPR, zhang2019sr}. \begin{figure}[t] \vspace{-0.5mm} \centering \includegraphics[width=1\linewidth,trim={25mm 0 25mm 0},clip]{figures/CVPR_figure_teaser_v1.pdf} \vspace{-6.5mm} \caption{An illustration of a probability distribution of stochastic trajectory prediction and selected paths from each sampling method. While the trajectories from the random sampling are biased in that they do not consider space of all possible distributions, our NPSN purposively generates the accurate route, turning to SHOP, even with its low probability.} \vspace{-2.5mm} \label{fig:teaser} \end{figure} Recently, generative models, which infer the distribution of potential future trajectories, are likely to inspire a major paradigm shift away from the single best prediction methods~\cite{gupta2018social,liang2019peeking,li2019conditional,shi2020multimodal,sadeghian2019sophie,kosaraju2019social,sun2020reciprocal,dendorfer2021mggan,zhao2019matf,tao2020dynamic,sun2020rsbg,shafiee2021Introvert,Lee_2017_CVPR,Ivanovic_2019_ICCV,salzmann2020trajectron++,huang2019stgat,mohamed2020social,liang2020garden,Shi2021sgcn,yu2020spatio,li2020Evolvegraph,mangalam2020pecnet,liu2021causal,liu2020snce}. The generative models represent all possible paths, such that pedestrians may go straight, turn left/right at an intersection or take a roundabout way to avoid obstacles. To efficiently establish this multi-modality, a stochastic process is introduced to the trajectory prediction~\cite{gupta2018social}, which models the inferred uncertainty of pedestrians' movements in every time frame. Stochastic trajectory prediction models start by generating a random hypothesis. Due to the non-deterministic nature of random sampling, the quality of the hypotheses depends on the number of samples. Ideally, an infinite number of hypotheses would be able to characterize all possible movements of pedestrians, but this is infeasible. In practice, a fixed number of multiple trajectories are randomly sampled using the Monte Carlo (MC) method, and all existing stochastic models follow this random sampling strategy. However, the number of samples is typically too small to represent socially-acceptable pedestrian trajectories because they are biased toward the random sampling, as illustrated in~\cref{fig:teaser}. In this paper, we revisit the state-of-the-art works which employ the stochastic process for multimodal prediction (\cref{fig:noise_models}-(a)$\sim$(c)). We prove that all of the expected values in the generated trajectory distributions with Generative Adversarial Networks (GANs)~\cite{gupta2018social, huang2019stgat, liu2021causal}, Conditional Variational Auto-Encoders (CVAEs)~\cite{salzmann2020trajectron++, mangalam2020pecnet, liu2020snce}, and Gaussian methods~\cite{mohamed2020social, Shi2021sgcn} are biased. Afterward, we introduce a Quasi-Monte Carlo (QMC) sampling method that effectively alleviates this problem using a low-discrepancy sequence, instead of random sampling. Lastly, we push the random sampling forward with a learnable method: Non-Probability Sampling Network (NPSN), a very small network that generates purposive sample sequences using observations and agent interactions in \cref{fig:noise_models}-(d). Without structurally modifying the existing models in any way, we achieve significant improvements in the performance of pedestrian trajectory prediction. This is accomplished by replacing one line of code on random sampling with our NPSN. Interestingly, one of the existing models using our NPSN as an auxiliary module achieves the best performance in all evaluation metrics. Unlike previous methods, the proposed approach focuses on the sampling method to generate a set of random latent vectors. To the best of our knowledge, our work is the first attempt to adopt QMC sampling and to propose a learnable method for purposive sampling in trajectory forecasting in~\cref{fig:teaser}. \begin{figure}[t] \vspace{-1mm} \centering \includegraphics[width=1\linewidth]{figures/CVPR_figure_models_v2.pdf} \vspace{-7mm} \caption{Illustrations of stochastic human trajectory prediction and our NPSN method. The red box indicates the latent vector.} \vspace{-2mm} \label{fig:noise_models} \end{figure} \vspace{-0.3mm} \section{Related Works} \vspace{-0.3mm} \subsection{Stochastic trajectory prediction} \vspace{-0.3mm} Convolutional neural network (CNN)-based approaches using Gaussian distribution have improved the efficiency of pedestrian trajectory prediction. Social-LSTM~\cite{alahi2016social}, a pioneering model in this field, predicts a bivariate Gaussian distribution consisting of five parameters for the observed trajectories of pedestrians. However, it has a limitation when inferring single paths, since it only selects the best one sample from the distribution in inference time. Follow-up works~\cite{vemula2018social,mohamed2020social,Shi2021sgcn,shi2020multimodal} predict multiple paths by sampling multiple next coordinates based on predicted distributions. As another methodology, a generative model is introduced to predict realistic future paths. Social-GAN~\cite{gupta2018social} firstly uses a generative framework that recursively infers future trajectory. The benefit of GAN is that it generates various outputs according to latent vectors. As a result, inter-personal, socially acceptable and multimodal human behaviors are accounted for in the pedestrian trajectory prediction. Such a research stream encourages to define a variety loss which calculates for the best prediction among multiple samples for diverse sample generation.~\cite{kosaraju2019social, sadeghian2019sophie, sun2020reciprocal, liu2021causal, dendorfer2021mggan, huang2019stgat}. Similarly, there have been attempts to predict diverse future generations using CVAE frameworks. DESIRE~\cite{Lee_2017_CVPR} uses a latent variable to account for the ambiguity of future paths and learns a sampling model to produce multiple hypotheses of future trajectories from given observations. This approach provides a diverse set of plausible predictions without the variety loss, and shares inspiration to objectives in many CVAE-based models~\cite{salzmann2020trajectron++, mangalam2020pecnet, yu2020spatio,liu2020snce,Ivanovic_2019_ICCV}. All of these methods include a random sampling process and are sensitive to bias, due to the fixed number of samples, as above mentioned. In addition, current state-of-the-art models with CVAE frameworks outperform Gaussian distribution-based methods~\cite{mohamed2020social, Shi2021sgcn}. In this study, we analyze these phenomena with respect to the bias of stochastic trajectory prediction, and show that the Gaussian distribution-based approaches achieve noticeable performance improvements by minimizing the bias, even better than the CVAE-based methods. Lastly, we mention a recent deterministic approach~\cite{zhao2020tnt} that predicts multiple trajectories, which is beyond the scope of this paper. \vspace{-0.7mm} \subsection{Learning latent variables} \vspace{-0.7mm} Some works account for the transformation of latent spaces by using prior trajectory information. PECNet~\cite{mangalam2020pecnet} for example uses a truncation trick in latent space to adjust the trade-off between the fidelity and the variety of samples. In their learning approach, both IDL~\cite{li2019idl} and Trajectron++~\cite{salzmann2020trajectron++} predict the mean and standard deviation of a latent distribution in an inference step. Rather than directly predicting the distribution parameters, AgentFormer~\cite{yuan2021agent} uses a linear transform of Gaussian noise to produce the latent vector. These methodologies still run the risk of bias because of the random sampling of the latent vectors. In the present work, we aim to reduce the bias using a discrepancy loss of a set of sampled latent vectors. \vspace{-0.7mm} \subsection{Graph-based approaches} \vspace{-0.7mm} Pioneering works have introduced the concepts of social-pooling~\cite{alahi2016social, gupta2018social, sun2020reciprocal} and social-attention mechanisms~\cite{vemula2018social, zhang2019sr, li2020Evolvegraph} to capture the social interactions among pedestrians in scenes. Recently, Graph Neural Network (GNN)-based approaches~\cite{huang2019stgat, kosaraju2019social, mohamed2020social, liang2020garden, Shi2021sgcn, li2020Evolvegraph, Bae_Jeon_2021} have been introduced to model agent-agent interactions with graph-based policies. In the GNN-based works, pedestrians are regarded as nodes of the graph, and their social relations are represented as edge weights. Social-STGCNN~\cite{mohamed2020social} presents a Graph Convolutional Network (GCN)~\cite{kipf2016semi}-based trajectory prediction which aggregates the spatial information of distances among pedestrians. Graph Attention Networks (GATs)~\cite{velivckovic2018graph} implicitly assign more weighting to edges with high social affinity on the pedestrian graph~\cite{huang2019stgat,kosaraju2019social,sun2020rsbg,Shi2021sgcn,yu2020spatio}. Multiverse~\cite{liang2020garden} and SimAug~\cite{liang2020simaug} utilize GATs on 2D grids to infer feasible trajectories. Unlike these previous works, where GATs are used in the encoding process, we apply a GAT framework to a sampling process on the latent space to make a decoder predict future paths more accurately. \vspace{-0.5mm} \subsection {Monte Carlo Sampling Method} \vspace{-0.5mm} (Quasi-) Monte Carlo is a computational technique for numerical experiment using random numbers. Exploiting the random numbers allows one to approximate integrals, but this is highly error prone. The error directly depends on the random sampling methods from probability distributions. QMC sampling is developed with quasi-random sequences, known as low-discrepancy sequences~\cite{low_discrepancy} and is generated in a deterministic manner. It is widely utilized for many computer vision tasks, such as depth completion~\cite{DC_MC_2}, 3D reconstruction~\cite{MC_3Drecon,MC_3DPointCloudRegist}, motion tracking~\cite{MC_motionTracking} and neural architecture search~\cite{MC_NAS_1,MC_NAS_2}. We firstly apply QMC sampling to ensure uniform coverage of the sampling spaces for pedestrian trajectory prediction. Note that the sequence is uniformly distributed if the discrepancy tends to be zero, as the number of samples goes to infinity. \begin{figure}[t] \centering \vspace{-1mm} \includegraphics[width=\columnwidth]{figures/CVPR_figure_2Ddistribution.pdf} \vspace{-9mm} \caption{An example of the probability map of the truth plausible trajectory distribution and the generated distributions with $N\!\!\!\;=\!20$ samples $\smash{\hat{Y}_{l}^{1:T_{pred}}}$ using MC and QMC method on PECNet~\cite{mangalam2020pecnet}.} \vspace{-2mm} \label{fig:2d_distribution} \end{figure} \vspace{-0.5mm} \section{Generated Trajectories Are Biased} \vspace{-0.5mm} In this section, we start with the problem definition for pedestrian trajectory prediction in~\cref{subsec:problem}. We then theoretically demonstrate that generated trajectories from stochastic trajectory prediction models are biased toward random sampling in~\cref{subsec:bias}. We also introduce a way to alleviate the bias with a low-discrepancy sequence for stochastic prediction in~\cref{subsec:QMC}. \subsection{Problem Definition} \vspace{-0.5mm} \label{subsec:problem} \microtypesetup{disable} We formulate the pedestrian trajectory prediction task as a multi-agent future trajectory generation problem conditioned on their past trajectories. To be specific, during the observation time frames $1\kern-0.6ex\leq\kern-0.6ex t\kern-0.6ex\leq\kern-0.6ex T_{obs}$, there are $L$ pedestrians in a scene. The observed trajectory sequence is represented as $\smash{X_{l}^{1:T_{obs}}}\!\!=\!\{ X_l^t|t \in [1, ..., T_{obs}] \}$ for $\forall l\!\in\![1, ..., L]$, where $x_l^t$ is the spatial coordinate of each pedestrian $l$ at time frame $t$. With the given observed sequence, the goal of the trajectory prediction is to learn potential distributions to generate $N$ plausible future sequences $\smash{\hat{Y}_{l}^{1:T_{pred}}}\!\!=\!\{\hat{Y}_{l, n}^{t} | t \in [1, ..., T_{pred}], n \in [1, ..., N]\}$ for all $L$ pedestrians. \microtypesetup{enable} \vspace{-0.5mm} \subsection{Stochastic Trajectory Prediction is Biased.} \vspace{-0.5mm} \label{subsec:bias} The generated trajectory $\smash{\hat{Y}_{l}^{1:T_{pred}}}$ comes from a distribution of possible trajectories which are constructed by pedestrians' movements based on social forces~(\cref{fig:2d_distribution}). $\mathcal{T}_{truth}$ is an expectation value computed with a plausible trajectory distribution, and $\mathcal{T}_{gen}$ is calculated with $\smash{\hat{Y}_{l}^{1:T_{pred}}}$ of $N$ which are independent and identically distributed (IID) random samples, \ie the term is random if one uses different samples to generate trajectories. The expectation $\mathcal{T}_{gen}$ is a Monte Carlo estimate of integral, \ie relevant expectation. Suppose that the expectation we want to compute from the trajectory distribution is $\smash{I(\tau) = \int_{[0,1]^s} \tau(x)q(x)dx}$ which is the expected value of $\tau(x)$ for random variable $x$ with a density $q$ on $s$-dimensional unit cube $[0,1]^s$. Then, the Monte Carlo estimator for the generated trajectory distribution with $N$ samples can be formulated as below: \noindent\vspace{-2.5mm} \begin{equation} \hat{I}(\tau) = \hat{I}_{N,s}(\tau) = \frac{1}{N} \sum_{i=1}^{N} \tau(x_i), \vspace{-2mm} \end{equation} \vspace{-2mm} \begin{equation} Pr(\lim_{n \to \infty} \hat{I}(\tau) = I(\tau)) = 1, \label{eq:probability} \vspace{-0.5mm} \end{equation} where $Pr(\cdot)$ denotes a probability. By the Strong Law of large numbers~\cite{LawofLargeNumber}, the MC estimate converges to $I(\tau)$ as the number of samples $N$ increases without bound. Now, we assume that $\tau(x)$ has a finite variance $K(\tau)$ and define the error $\alpha$ as below: \noindent\vspace{-1.5mm} \begin{equation} \alpha = \hat{I}_{N,s}(\tau) - I(\tau), \vspace{-1mm} \end{equation} \vspace{-5mm} \begin{equation} \mathbb{E}[\alpha] = 0,\quad var(\alpha) = \frac{K(\tau)}{N}, \vspace{-0.5mm} \end{equation} where $\mathbb{E}$ is an expectation and $K(t)$ is $\int (\tau(x) - I(\tau))^2q(x) dx$. Note that the $K(\tau)$ is non-negative and depends on the function being integrated. The algorithmic goal is to specify the procedure that results in lower variance estimates of the integral. Now consider a function of the generator $F$, which is sufficiently smooth, in a Monte Carlo integral $I(\tau)$. We apply the Taylor series expansion of $F(I(\tau)\!+\!\alpha)$ as follows: \noindent\vspace{-4mm} \begin{equation} F(\hat{I}_{N,s}(\tau)) = F(I(\tau)\!+\!\alpha) \qquad\qquad\qquad\qquad\qquad\quad \vspace{-2.5mm} \end{equation} \begin{equation*} \qquad\qquad\quad\; \approx F(I(\tau))\!+\!\alpha F'(I(\tau))\!+\!\alpha^2 \frac{F''(I(\tau))}{2}\!+\!O(\alpha^3). \vspace{-2mm} \end{equation*} \noindent Therefore, the expectation value of $F(\hat{I}_{N,s}(\tau))$ can be formulated as below: \noindent\vspace{-2mm} \begin{equation} \mathbb{E}[F(\hat{I}_{N,s}(\tau))] = F(I(\tau)) + \frac{M}{N} + O(\frac{1}{N^2}), \vspace{-0.5mm} \end{equation} where $M = K(\tau)(F''(I(\tau))/2)$ and the $\frac{M}{N}$ is a bias. Since the term $\mathcal{T}_{gen}$ is estimated with an MC integration, the estimate must have a bias of $\frac{M}{N} + O(\frac{1}{N^2})$. Note that the bias in the generated trajectories vanishes for $N \rightarrow \infty$, however, it is infeasible to utilize all infinite possible paths in practice. Since $M$ depends on the generator, the generated trajectories are differently biased depending on the number of generated samples as well as the generators, which is validated in \cref{subsec:method}. \subsection{Quasi-Monte Carlo for Trajectory Prediction} \label{subsec:QMC} The QMC method utilizes a low discrepancy sequence including the Halton sequence~\cite{halton} and the Sobol sequence~\cite{sobol}. Inspired by~\cite{QMC_faster_mc}, we select a Sobol sequence which not only shows consistently better performances than the Halton sequence, but also is up to 5 times faster than the MC method, even with lower error rates. From the view of numerical analysis, an inequality in~\cite{QMC} proves that low-discrepancy sequences guarantees more advanced sampling in~\cref{eq:probability} with fewer integration errors as below: \begin{equation} | \alpha | \leqq V(\tau)~D^{*}_N, \end{equation} where $V(\tau)$ is a total variation of function $\tau$ which is bounded variation, and $D^{*}_N$ is the discrepancy of a sequence for the number of samples $N$. The inequality shows that a deterministic low-discrepancy sequence can be much better than the random one, for a function with finite variation. In the mathematics community, it has been proven that the Sobol sequences have a rate of convergence close to $O((logN)^s/N)$; for a random sequence it is $O(\sqrt{log(logN)/N})$ in~\cite{QMC_faster_mc,QMC}. For faster convergence, $s$ needs to be small and $N$ large (e.g., $N\!>\!2^s$). As a result, the low discrepancy sequences have lower errors for the same number of points ($N\!=\!20$) as shown in \cref{tab:qmc_result}. As an example, since $x_i$ are IID samples from a uniformly distributed unit box for MC estimates, the samples tend to be irregularly spaced. For QMC, as $x_i$ comes from a deterministic quasi-random sequence whose point samples are independent, they can be uniformly spaced. This guarantees a suitable distribution for pedestrian trajectory prediction by successively constructing finer uniform partitions. \cref{fig:random_points} displays a plot of a moderate number of pseudo-random points in 2-dimensional space. We observe regions of empty space where there are no points generated from the uniform distribution, which produce results skewed towards the specific destinations. However, the Sobol sequence yields evenly distributed points to enforce prediction results close to socially-acceptable paths. Unfortunately, low-discrepancy sequences such as the Sobol sequence are deterministically generated and make the trajectory prediction intractable when representing an uncertainty of pedestrians' movements with various social interactions. Adding randomness into the Sobol sequence by scrambling the sequence's base digits~\cite{scramble_sobol} is a solution to this problem. The resultant sequence retains the advantage of QMC method, even with the same expected value. Accordingly, we utilize the scrambled Sobol sequence to generate pedestrian trajectories to account for the feasibility, the diversity, and the randomness of human behaviors. \begin{figure}[t] \centering \vspace{-1mm} \includegraphics[width=\columnwidth,trim={0 23mm 0 0},clip]{figures/CVPR_figure_points.pdf} \vspace{-7mm} \caption{(Top): 2D scatter plots of 1,000 points with MC, QMC and NPSN. Stars indicate coordinates of a GT destination in the sampling space. (Bottom): Stochastic trajectory prediction results with the first 20 samples of the 1,000 points from each method.} \vspace{-2mm} \label{fig:random_points} \end{figure} \vspace{-0.5mm} \section{Non-Probability Sampling Network} \vspace{-0.5mm} In this section, we propose NPSN, which extends the sampling technique for pedestrian trajectory prediction based on observed trajectory. Unlike the previous methods, which sample $N$ paths in a stochastic manner, we construct a model that effectively chooses target samples using a non-probabilistic sampling technique illustrated in~\cref{fig:noise_models}-(d). \vspace{-0.5mm} \subsection{Non-Probability Sampling on Multimodal Trajectory Prediction} \vspace{-0.5mm} In contrast to stochastic sampling, purposive sampling, one of the most common non-probability sampling techniques~\cite{black2019business}, relies on the subjective judgment of an expert to select the most productive samples rather than random selection. This approach is advantageous when studying complicated phenomena in in-depth qualitative research~\cite{samplingforqualitative}. Since most people walk to their destinations using the shortest path, a large portion of labeled attributes in public datasets~\cite{5459260, crowdsbyexample} are straight paths. Generative attribute models learn the probabilistic distributions of social affinity features for the attribute of straight paths. However, due to the multimodal nature of human paths, the models must generate as many diverse and feasible paths as possible, using only a fixed number of samples. As a possible solution, we can purposively include a variety of samples on turning left/right and detouring around obstacles. In purposive sampling, a maximum variation is beneficial for multimodal trajectory prediction, when examining the diverse ranges of pedestrians' movements. We make this process a learnable method, aiming to generate $N$ heterogeneous trajectory samples with prior knowledge of past trajectories. \subsection{NPSN Architecture} We propose NPSN which substitutes the random sampling process of existing models with a learnable method. NPSN works as purposive sampling, which relies on the past trajectories of pedestrians when selecting samples in the distribution. As a result, when predicting a feasible future trajectory, a past trajectory can be used for the sampling process while also embedding informative features as a guidance. Unlike existing works~\cite{mangalam2020pecnet, li2019idl, salzmann2020trajectron++, yuan2021agent} that impose a restriction in the sampling space by limiting a distribution, we design all of the processes in a learnable manner. \noindent\textbf{Pedestrian graph representation.}\quad NPSN first captures the social relations using a GAT to generate socially-acceptable samples. For input trajectory $\smash{X_{l}^{1:T_{obs}}}$, a pedestrian graph $\mathcal{G}\!=\!(\mathcal{V}, \mathcal{E})$ is defined as a set of pedestrian nodes $\mathcal{V}\!=\!\{ v_l\,|\,l\!\in\![1, ..., L] \}$ and their relation edges $\mathcal{E}\!=\! \{ e_{i,j}\,|\,i, j\!\in\![1, ..., L] \}$. With the node features $H\!=\!\{ h_l | l \in [1, ..., L] \}$, learned feature maps for the social relation are shared across different pedestrian nodes in a scene. We utilize an attention mechanism for modeling the social interaction, whose effectiveness is demonstrated in previous works~\cite{huang2019stgat,Shi2021sgcn}. The GAT allows NPSN to aggregate the features for neighbors by assigning different importance to their edge $e_{i,j}$. Here, the importance value is calculated using the attention score between two node features $(h_i, h_j)$. \noindent\textbf{Purposive sampling.}\quad With the interaction-aware node features, we predict $N$ samples for each pedestrian. In particular, we use three MLP layers after the GAT layer for NPSN. By learning more prior information about samples of interest, prediction models using NPSN generate better samples. Each trajectory prediction model additionally receives an $s$-dimensional random latent vector along with the observed trajectory. Therefore, the NPSN must predict a set of output $S_l\!=\![S_{l, 1}, ..., S_{l, N}]$. The output passes through a prediction model to generate $N$ final trajectories for each pedestrian. For temporal consistency, we use the same set of purposive samples for all prediction time frames $[1, ..., T_{pred}]$ of each pedestrian node. This process is repeated for all pedestrian nodes, and the output shape of the NPSN is $S=\mathbb{R}^{L \times s \times N}$. \noindent\textbf{Loss function.}\quad To optimize trajectory prediction models with our NPSN, we use two loss functions to generate well-distributed purposive samples. First, a winner-takes-all process~\cite{rupprecht2017learning}, which generates a path closest to its ground truth, is trained to regress the accurate positions of pedestrians. Similar to~\cite{gupta2018social}, we measure a $L_2$ distance between the $N$ prediction paths and the ground-truth, and use only one path with the smallest error for training: \noindent\vspace{-2mm} \begin{equation} \mathcal{L}_{dist} = \frac{1}{L} \sum_{l=1}^{L}\min_{n \in [1, ..., N]} || \hat{Y}_{l, n}^{1:T_{pred}} - Y_{l}^{1:T_{pred}} ||. \vspace{-2mm} \end{equation} \begin{table*}[t] \vspace{-1mm} \large \centering \begin{tabular}{@{}c@{}} \resizebox{\linewidth}{!}{ \begin{tabular}{c ccc cccc cccc c ccc cccc cccc c} \toprule \multirow{3}{*}{} & \multicolumn{11}{c}{Social-STGCNN~\cite{mohamed2020social}~~($s=2$)} & & \multicolumn{10}{c}{SGCN~\cite{Shi2021sgcn}~~($s=2$)} \\ \cmidrule(r){2-12} \cmidrule(r){14-24} & \multicolumn{3}{c}{MC (Baseline)} & \multicolumn{4}{c}{QMC} & \multicolumn{4}{c}{\textbf{NPSN}} & & \multicolumn{3}{c}{MC (Baseline)} & \multicolumn{4}{c}{QMC} & \multicolumn{4}{c}{\textbf{NPSN}} \\ \cmidrule(r){2-4} \cmidrule(lr){5-8} \cmidrule(lr){9-12} \cmidrule(r){14-16} \cmidrule(lr){17-20} \cmidrule(lr){21-24} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & Gain \raisebox{0.15ex}{$\uparrow$} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & Gain \raisebox{0.15ex}{$\uparrow$} & & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & Gain \raisebox{0.15ex}{$\uparrow$} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & Gain \raisebox{0.15ex}{$\uparrow$} \\ \midrule ETH~~~ & 0.650 & 1.097 & 0.510 & \tul{0.611} & \tul{1.025} & \tbf{0.579} & 6.5\% & \tbf{0.443} & \tbf{0.652} & \tul{0.565} & 40.6\% & & 0.567 & 0.997 & 0.545 & \tul{0.495} & \tul{0.810} & \tul{0.596} & 18.8\% & \tbf{0.357} & \tbf{0.588} & \tbf{0.624} & 41.0\% \\ HOTEL~~~ & 0.496 & 0.858 & 0.270 & \tul{0.342} & \tul{0.517} & \tul{0.289} & 39.8\% & \tbf{0.213} & \tbf{0.342} & \tbf{0.298} & 60.2\% & & 0.308 & 0.533 & 0.295 & \tul{0.212} & \tul{0.309} & \tul{0.314} & 42.0\% & \tbf{0.159} & \tbf{0.253} & \tbf{0.355} & 52.6\% \\ UNIV~~~ & 0.441 & 0.798 & 0.637 & \tul{0.364} & \tul{0.628} & \tul{0.725} & 21.3\% & \tbf{0.278} & \tbf{0.443} & \tbf{0.762} & 44.5\% & & 0.374 & 0.668 & 0.689 & \tul{0.310} & \tul{0.555} & \tul{0.737} & 16.9\% & \tbf{0.229} & \tbf{0.394} & \tbf{0.820} & 41.0\% \\ ZARA1~~~ & 0.341 & 0.532 & 0.710 & \tul{0.315} & \tul{0.526} & \tul{0.775} & 1.1\% & \tbf{0.248} & \tbf{0.430} & \tbf{0.802} & 19.1\% & & 0.285 & 0.508 & 0.746 & \tul{0.245} & \tul{0.446} & \tul{0.803} & 12.1\% & \tbf{0.182} & \tbf{0.318} & \tbf{0.854} & 37.3\% \\ ZARA2~~~ & 0.305 & \tul{0.482} & 0.394 & \tul{0.288} & 0.497 & \tbf{0.467} & -3.2\% & \tbf{0.217} & \tbf{0.379} & \tul{0.439} & 21.4\% & & 0.225 & 0.422 & 0.491 & \tul{0.193} & \tul{0.359} & \tul{0.503} & 14.9\% & \tbf{0.138} & \tbf{0.245} & \tbf{0.735} & 41.8\% \\ \midrule AVG~~~ & 0.447 & 0.753 & 0.504 & \tul{0.384} & \tul{0.639} & \tul{0.567} & 15.2\% & \tbf{0.280} & \tbf{0.449} & \tbf{0.573} & 37.2\% & & 0.352 & 0.626 & 0.553 & \tul{0.291} & \tul{0.496} & \tul{0.591} & 20.7\% & \tbf{0.213} & \tbf{0.360} & \tbf{0.678} & 42.5\% \\ SDD~~~ & 20.76 & 33.18 & 0.471 & \tul{19.21} & \tul{31.81} & \tul{0.498} & 4.1\% & \tbf{11.80} & \tbf{18.43} & \tbf{0.551} & 44.5\% & & 25.00 & 41.52 & 0.570 & \tul{21.97} & \tul{38.04} & \tul{0.604} & 8.4\% & \tbf{17.12} & \tbf{28.97} & \tbf{0.650} & 30.2\% \\ GCS~~~ & 14.72 & 23.87 & 0.698 & \tul{13.42} & \tul{22.18} & \tul{0.724} & 7.1\% & \tbf{9.72} & \tbf{15.69} & \tbf{0.760} & 34.3\% & & 11.18 & 20.65 & 0.777 & \tul{10.10} & \tul{18.69} & \tbf{0.795} & 9.5\% & \tbf{7.66} & \tbf{13.41} & \tul{0.789} & 35.1\% \\ \bottomrule \end{tabular} } \\ \vspace{-4mm} \\ \resizebox{\linewidth}{!}{ \begin{tabular}{c ccc cccc cccc c ccc cccc cccc c} \toprule \multirow{3}{*}{} & \multicolumn{11}{c}{Social-GAN~\cite{gupta2018social}~~($s=8$)} & & \multicolumn{10}{c}{STGAT~\cite{huang2019stgat}~~($s=16$)} \\ \cmidrule(r){2-12} \cmidrule(r){14-24} & \multicolumn{3}{c}{MC (Baseline)} & \multicolumn{4}{c}{QMC} & \multicolumn{4}{c}{\textbf{NPSN}} & & \multicolumn{3}{c}{MC (Baseline)} & \multicolumn{4}{c}{QMC} & \multicolumn{4}{c}{\textbf{NPSN}} \\ \cmidrule(r){2-4} \cmidrule(lr){5-8} \cmidrule(lr){9-12} \cmidrule(r){14-16} \cmidrule(lr){17-20} \cmidrule(lr){21-24} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & Gain \raisebox{0.15ex}{$\uparrow$} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & Gain \raisebox{0.15ex}{$\uparrow$} & & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & Gain \raisebox{0.15ex}{$\uparrow$} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & Gain \raisebox{0.15ex}{$\uparrow$} \\ \midrule ETH~~~ & 0.767 & 1.397 & \tul{0.592} & \tul{0.760} & \tul{1.379} & \tbf{0.596} & 1.3\% & \tbf{0.718} & \tbf{1.264} & 0.539 & 9.5\% & & 0.679 & 1.203 & \tul{0.576} & \tul{0.668} & \tul{1.175} & \tul{0.576} & 2.4\% & \tbf{0.612} & \tbf{1.020} & \tbf{0.602} & 15.2\% \\ HOTEL~~~ & 0.434 & 0.876 & \tul{0.322} & \tul{0.431} & \tul{0.870} & \tbf{0.323} & 0.7\% & \tbf{0.385} & \tbf{0.720} & 0.311 & 17.8\% & & 0.346 & \tul{0.661} & \tul{0.338} & \tul{0.342} & 0.663 & \tbf{0.351} & -0.3\% & \tbf{0.308} & \tbf{0.566} & 0.301 & 14.4\% \\ UNIV~~~ & 0.745 & 1.497 & 0.686 & \tul{0.744} & \tul{1.494} & \tul{0.690} & 0.2\% & \tbf{0.711} & \tbf{1.427} & \tbf{0.715} & 4.7\% & & 0.545 & 1.164 & 0.759 & \tul{0.541} & \tul{1.152} & \tul{0.763} & 1.0\% & \tbf{0.535} & \tbf{1.133} & \tbf{0.795} & 2.6\% \\ ZARA1~~~ & 0.346 & 0.693 & \tul{0.801} & \tbf{0.343} & \tul{0.686} & \tbf{0.805} & 0.9\% & \tul{0.344} & \tbf{0.683} & 0.798 & 1.4\% & & 0.345 & 0.687 & 0.791 & \tul{0.343} & \tul{0.683} & \tul{0.818} & 0.6\% & \tbf{0.338} & \tbf{0.678} & \tbf{0.822} & 1.4\% \\ ZARA2~~~ & 0.356 & 0.721 & 0.474 & \tul{0.354} & \tul{0.716} & \tul{0.476} & 0.7\% & \tbf{0.345} & \tbf{0.696} & \tbf{0.491} & 3.5\% & & \tul{0.304} & \tul{0.620} & \tul{0.508} & \tbf{0.288} & \tbf{0.588} & 0.484 & 5.2\% & \tul{0.304} & 0.621 & \tbf{0.557} & -0.1\% \\ \midrule AVG~~~ & 0.530 & 1.037 & \tul{0.575} & \tul{0.526} & \tul{1.029} & \tbf{0.578} & 0.8\% & \tbf{0.501} & \tbf{0.958} & 0.571 & 7.6\% & & 0.444 & 0.867 & 0.594 & \tul{0.436} & \tul{0.852} & \tul{0.598} & 1.7\% & \tbf{0.419} & \tbf{0.804} & \tbf{0.616} & 7.3\% \\ SDD~~~ & 13.58 & 24.59 & 0.598 & \tul{13.41} & \tul{24.24} & \tul{0.601} & 1.4\% & \tbf{13.03} & \tbf{23.04} & \tbf{0.630} & 6.3\% & & 14.85 & 28.17 & 0.590 & \tul{14.82} & \tul{28.12} & \tul{0.594} & 0.2\% & \tbf{13.67} & \tbf{25.24} & \tbf{0.613} & 10.4\% \\ GCS~~~ & 15.85 & 32.57 & 0.783 & \tul{15.80} & \tul{32.44} & \tul{0.785} & 0.4\% & \tbf{15.78} & \tbf{32.17} & \tbf{0.798} & 1.2\% & & \tul{15.57} & \tul{31.82} & \tbf{0.798} & \tbf{15.55} & \tbf{31.80} & \tbf{0.798} & 0.1\% & 15.71 & 32.12 & \tbf{0.798} & -0.9\% \\ \bottomrule \end{tabular} } \\ \vspace{-4mm} \\ \resizebox{\linewidth}{!}{ \begin{tabular}{c ccc cccc cccc c ccc cccc cccc c} \toprule \multirow{3}{*}{} & \multicolumn{11}{c}{Trajectron++~\cite{salzmann2020trajectron++}~~($s=25$)} & & \multicolumn{10}{c}{PECNet~\cite{mangalam2020pecnet}~~($s=16$)} \\ \cmidrule(r){2-12} \cmidrule(r){14-24} & \multicolumn{3}{c}{MC (Baseline)} & \multicolumn{4}{c}{QMC} & \multicolumn{4}{c}{\textbf{NPSN}} & & \multicolumn{3}{c}{MC (Baseline)} & \multicolumn{4}{c}{QMC} & \multicolumn{4}{c}{\textbf{NPSN}} \\ \cmidrule(r){2-4} \cmidrule(lr){5-8} \cmidrule(lr){9-12} \cmidrule(r){14-16} \cmidrule(lr){17-20} \cmidrule(lr){21-24} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & Gain \raisebox{0.15ex}{$\uparrow$} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & Gain \raisebox{0.15ex}{$\uparrow$} & & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & Gain \raisebox{0.15ex}{$\uparrow$} & ADE \raisebox{0.15ex}{$\downarrow$} & FDE \raisebox{0.15ex}{$\downarrow$} & TCC \raisebox{0.15ex}{$\uparrow$} & Gain \raisebox{0.15ex}{$\uparrow$} \\ \midrule ETH~~~ & 0.610 & 1.028 & 0.495 & \tul{0.591} & \tul{0.995} & \tbf{0.503} & 3.2\% & \tbf{0.518} & \tbf{0.780} & \tul{0.499} & 24.1\% & & 0.610 & 1.073 & 0.596 & \tul{0.601} & \tul{1.036} & \tul{0.602} & 3.5\% & \tbf{0.550} & \tbf{0.882} & \tbf{0.618} & 17.8\% \\ HOTEL~~~ & 0.196 & 0.284 & \tul{0.323} & \tul{0.193} & \tul{0.277} & 0.319 & 2.5\% & \tbf{0.157} & \tbf{0.266} & \tbf{0.352} & 6.2\% & & 0.222 & 0.390 & 0.335 & \tul{0.214} & \tul{0.369} & \tul{0.336} & 5.3\% & \tbf{0.188} & \tbf{0.288} & \tbf{0.359} & 26.1\% \\ UNIV~~~ & 0.304 & 0.545 & \tul{0.765} & \tul{0.297} & \tul{0.531} & \tbf{0.767} & 2.6\% & \tbf{0.266} & \tbf{0.443} & \tul{0.765} & 18.8\% & & 0.335 & 0.558 & 0.752 & \tul{0.326} & \tul{0.533} & \tul{0.759} & 4.5\% & \tbf{0.289} & \tbf{0.439} & \tbf{0.765} & 21.4\% \\ ZARA1~~~ & 0.241 & 0.413 & 0.764 & \tul{0.235} & \tul{0.401} & \tul{0.766} & 2.8\% & \tbf{0.191} & \tbf{0.358} & \tul{0.844} & 13.4\% & & 0.250 & 0.448 & 0.808 & \tul{0.241} & \tul{0.425} & \tul{0.816} & 5.1\% & \tbf{0.209} & \tbf{0.333} & \tbf{0.839} & 25.8\% \\ ZARA2~~~ & 0.175 & 0.319 & 0.639 & \tul{0.170} & \tul{0.307} & \tul{0.641} & 3.5\% & \tbf{0.161} & \tbf{0.278} & \tbf{0.684} & 12.6\% & & 0.186 & 0.332 & 0.596 & \tul{0.178} & \tul{0.310} & \tul{0.609} & 6.7\% & \tbf{0.159} & \tbf{0.252} & \tbf{0.641} & 24.0\% \\ \midrule AVG~~~ & 0.305 & 0.518 & 0.597 & \tul{0.297} & \tul{0.502} & \tul{0.599} & 3.0\% & \tbf{0.258} & \tbf{0.425} & \tbf{0.629} & 17.9\% & & 0.321 & 0.560 & 0.617 & \tul{0.312} & \tul{0.535} & \tul{0.624} & 4.6\% & \tbf{0.279} & \tbf{0.439} & \tbf{0.644} & 21.7\% \\ SDD~~~ & 11.40 & 20.12 & 0.652 & \tul{11.22} & \tul{19.69} & \tbf{0.656} & 2.1\% & \tbf{11.12} & \tbf{18.95} & \tul{0.653} & 5.8\% & & 9.97 & 15.89 & 0.647 & \tul{9.72} & \tul{15.22} & \tul{0.652} & 4.2\% & \tbf{8.56} & \tbf{11.85} & \tbf{0.665} & 25.4\% \\ GCS~~~ & 12.75 & 24.23 & \tul{0.802} & \tul{12.47} & \tul{23.50} & \tul{0.802} & 3.0\% & \tbf{12.36} & \tbf{22.98} & \tbf{0.805} & 5.2\% & & \tul{17.08} & 29.30 & 0.708 & 17.09 & \tul{29.23} & \tul{0.711} & 0.2\% & \tbf{10.13} & \tbf{17.36} & \tbf{0.717} & 40.8\% \\ \bottomrule \end{tabular} } \\ \end{tabular} \vspace{-2mm} \caption{Comparison results of MC, QMC and NPSN w.r.t. STGCNN~\cite{mohamed2020social}, SGCN~\cite{Shi2021sgcn}, SGAN~\cite{gupta2018social}, STGAT~\cite{huang2019stgat}, Trajectron++~\cite{salzmann2020trajectron++}, and PECNet~\cite{mangalam2020pecnet} in $N\!=\!20$. Models are evaluated on the ETH~\cite{5459260}, UCY~\cite{crowdsbyexample}, SDD~\cite{robicquet2016learning}, and GCS~\cite{yi2015understanding} datasets. (Gain: performance improvement w.r.t. FDE over the baseline models, Unit for ADE and FDE: meter, \textbf{Bold}:Best, \underline{Underline}:Second best)} \vspace{-3mm} \label{tab:qmc_result} \end{table*} However, we observe that all $N$ sample points are sometimes closely located near its ground-truth as learning progresses. This is a common problem in purposive sampling, because certain samples can be over-biased due to data imbalance, \ie a large portion of the trajectory moving along one direction of the walkway. For this reason, we introduce a novel discrepancy loss to keep the $N$ sample points with low-discrepancy, as below: \noindent\vspace{-2mm} \begin{equation} \mathcal{L}_{disc} = \frac{1}{LN} \sum_{l=1}^{L}\sum_{i=1}^{N} -\log \min_{\substack{j \in [1, ..., N] \\ j \neq i}} || S_{l, i} - S_{l, j} ||. \vspace{-2mm} \end{equation} The objective of discrepancy loss is to maximize distances among the closest neighbors of $N$ samples. If the distance is closer, the loss imposes a higher penalty to ensure their uniform coverage of the sampling space. The final loss function is a linear combination of both the distance and the discrepancy loss $\mathcal{L} = \mathcal{L}_{dist} + \lambda \mathcal{L}_{disc}$. We set $\lambda=1e\!-\!2$ to balance the scale of both terms. \subsection{Implementation Details} \vspace{-0.5mm} \noindent\textbf{Transformation of one distribution to another.}\quad While most human trajectory prediction models use a normal distribution, the Sobol sequence and our NPSN are designed to produce a uniform distribution. We bridge the gap by transforming between the uniform distribution and the normal distribution. There are some representative methods including Ziggurat method~\cite{JSSv005i08}, Inverse Cumulative Distribution Function (ICDF), and Box-Muller Transform~\cite{Box1958ANO}. In this work, we utilize the Box-Muller transform which is differentiable and enables an efficient execution on a GPU with the lowest QMC error, as demonstrated in~\cite{gpugems3,OKTEN20111268}. The formula of the Box-muller transform is as follows: \noindent \begin{equation} \begin{split} Z_{odd} &= \sqrt{-2\;\!\:\!ln\:\!(U_{even})}~cos(2\:\!\pi\:\!U_{odd}), \\ Z_{even} &= \sqrt{-2\;\!\:\!ln\:\!(U_{even})}~sin(2\:\!\pi\:\!U_{odd}), \end{split} \end{equation} where $U$ is an independent sample set from a uniform distribution and $Z$ is an independent random variable from a standard normal distribution. \noindent\textbf{Training Procedure.}\quad Our NPSN is embedded into the state-of-the-art pedestrian trajectory prediction models~\cite{mohamed2020social, Shi2021sgcn, gupta2018social, huang2019stgat, salzmann2020trajectron++, mangalam2020pecnet,liu2020snce,liu2021causal} by simply replacing their random sampling part. The parameters of the models are initialized using the weights provided by the authors, except for four models~\cite{huang2019stgat, mangalam2020pecnet, salzmann2020trajectron++, liu2020snce} which use weights reproduced from the authors' source codes. Our NPSN has only 5,128 learnable parameters on $s\!=\!2$ and $N\!=\!20$. We train the prediction models with NPSN using an AdamW optimizer~\cite{loshchilov2018decoupled} with a batch size of 128 and a learning rate of $1e\!-\!3$ for 128 epochs. We step down the learning rate with a gain of 0.5 at every 32 epochs. Training time takes about three hours on a machine with an NVIDIA 2080TI GPU. \vspace{-1mm} \section{Experiments} \vspace{-1mm} In this section, we conduct comprehensive experiments on public benchmark datasets to verify how the sampling strategy contributes to pedestrian trajectory prediction. We first briefly describe our experimental setup (\cref{subsec:ExperimentalSetup}), and then provide comparison results with various baselines and state-of-the-art models (\cref{subsec:method}). Moreover, we run an extensive ablation study to demonstrate the effect of each component of our method (\cref{subsec:ablation}). \vspace{-1mm} \subsection{Experimental Setup} \vspace{-1mm} \label{subsec:ExperimentalSetup} \noindent\textbf{Dataset.}\quad We evaluate the effectiveness of the QMC method and our NPSN on various benchmark datasets~\cite{5459260, crowdsbyexample, robicquet2016learning,yi2015understanding } over state-of-the-art methods. ETH~\cite{5459260} and UCY dataset~\cite{crowdsbyexample} include ETH and HOTEL, and UNIV, ZARA1 and ZARA2 scenes, respectively. Both datasets consist of various movements of pedestrians with complicated social interactions. The Stanford Drone Dataset (SDD)~\cite{robicquet2016learning} contains secluded scenes with various object types (\eg pedestrian, biker, skater, and cart), and the Grand Central Station (GCS)~\cite{yi2015understanding} dataset consists of highly congested scenes where pedestrians walk. We observe a trajectory for 3.2 seconds ($T_{obs}\!=\!8$), and then predict future paths for the next 4.8 seconds ($T_{pred}\!=\!12$). We follow a leave-one-out cross-validation evaluation strategy, which is the standard evaluation protocol used in many works~\cite{gupta2018social, huang2019stgat, mohamed2020social, Shi2021sgcn, salzmann2020trajectron++, mangalam2020pecnet}. \noindent\textbf{Evaluation metric.}\quad We measure the performance of the trajectory prediction models using three metrics: 1) Average Displacement\,Error\,(ADE) - average\,Euclidean\,distance between a prediction and ground-truth trajectory; 2) Final Displacement Error (FDE) - Euclidean distance between a prediction and ground-truth final destination; 3) Temporal Correlation Coefficient (TCC)~\cite{tao2020dynamic} - Pearson correlation coefficient of motion patterns between a prediction and ground-truth trajectory. These metrics assess the best one of $N\!=\!20$ trajectory outputs, and we report average values for all agents in each scene. In addition, to reduce the variance in the prediction results of stochastic models, we repeat the evaluation 100 times and then average them for each metric. \noindent\textbf{Baseline.}\quad We evaluate QMC and NPSN sampling methods with representative stochastic pedestrian trajectory prediction models: 1) Gaussian distribution-based model - Social-STGCNN\,\cite{mohamed2020social}, SGCN\,\cite{Shi2021sgcn}; 2) GAN-based model - Social-GAN\,\cite{gupta2018social}, STGAT\,\cite{huang2019stgat}, Causal-STGAT\,\cite{liu2021causal}; 3) CVAE-based model - Trajectron++\,\cite{salzmann2020trajectron++}, PECNet\,\cite{mangalam2020pecnet}, and NCE-Trajectron++\,\cite{liu2020snce}. To validate the effectiveness of QMC and NPSN, we replace their random sampling parts in the authors' provided codes with our QMC and NPSN sampling method. \vspace{-1mm} \subsection{Results from QMC and NPSN method} \vspace{-1mm} \label{subsec:method} \noindent\textbf{Comparison of MC and QMC.}\quad We compare MC with the QMC method by incorporating them into the sampling part of the baseline models. As shown in~\cref{tab:qmc_result,fig:boxplot}, the QMC method significantly outperforms the MC method on all the evaluation metrics. In~\cref{fig:boxplot}, we report the error distributions of the baseline models in the test phase. The QMC method achieves consistently lower errors and variations by alleviating the bias problem mentioned in~\cref{subsec:bias}. We also observe that the Gaussian-based models show a large performance gain over the GAN- and CVAE-based models. There are two reasons for the performance gains induced by the QMC method: 1) The dimension of the sampling space ($s\!=\!2$) in the Gaussian-based models is relatively smaller than other models (\ie $s\!=\!8$, $16$ or $25$). According to~\cite{drawbackQMC}, for large dimensions $s$ and a small number of samples $N$, the sampling results from a low-discrepancy generator may not be good enough over randomly generated samples. The Gaussian-based model thus yields promising results compared to one which has larger sampling dimensions. 2) The performance improvements depend on the number of layers in networks (shallower is better): The CVAE and GAN-based models are composed of multiple layers. By contrast, the Gaussian-based models have only one layer which acts as a linear transformation between the predicted trajectory coordinates and final coordinates. To be specific, in the transformation, sampled independent 2D points are multiplied with the Cholesky decomposed covariance matrix and shifted by the mean matrix. Here, the shallow layer of the Gaussian-based models directly reflects the goodness of the QMC sampling method, rather than deeper layers which can barely be influenced by the random latent vector in the inference step. \begin{figure}[t] \centering \vspace{-0.5mm} \includegraphics[width=\linewidth,trim={0 11mm 0 0},clip]{figures/CVPR_figure_boxplot_v3.pdf} \vspace{-7mm} \caption{Box plots of average ADE, FDE and TCC measured for each stochastic model on both MC and QMC. (a) STGCNN~\cite{mohamed2020social}, (b) SGCN~\cite{Shi2021sgcn}, (c) SGAN~\cite{gupta2018social}, (d) STGAT~\cite{huang2019stgat}, (e) Trajectron++~\cite{salzmann2020trajectron++}, and (f) PECNet~\cite{mangalam2020pecnet}.} \vspace{-2mm} \label{fig:boxplot} \end{figure} \begin{figure}[t] \centering \includegraphics[width=\columnwidth,trim={0 43mm 0 0},clip]{figures/CVPR_figure_qualitative_v1.pdf} \vspace{-7mm} \caption{Visualization of probabilistic distributions and the best predictions among sampled trajectories with MC, QMC, and NPSN in SGCN~\cite{Shi2021sgcn}.} \vspace{-3mm} \label{fig:qualitative_result} \end{figure} \noindent\textbf{Evaluation of NPSN.}\quad We apply NPSN to all three types of stochastic trajectory prediction models. As shown in~\cref{tab:qmc_result}, there are different performance gains according to the types. Particularly, the Gaussian distribution approaches (Social-STGCNN~\cite{mohamed2020social}, SGCN~\cite{Shi2021sgcn}) show the highest performance improvement (up to 60\%), which can be analyzed by the advantages of the QMC method when $s=2$. So far, the performance of the Gaussian distribution approaches has been underestimated due to the disadvantage of being easily affected by the sampling bias. Our NPSN maximizes the capability of the Gaussian distribution approaches through a purposive sampling technique. In the CVAE based approaches, PECNet~\cite{mangalam2020pecnet} shows a larger performance improvement (up to 41\%) than that of Trajectron++~\cite{salzmann2020trajectron++}. Since PECNet directly predicts a set of destinations through the latent vector, NPSN is compatible with its inference step. On the other hand, NPSN seems to produce less benefit with the inference step of Trajectron++ because it predicts the next step recurrently and its sample dimension is relatively large ($s=25$). The generative models with variety loss, Social-GAN and STGAT, show relatively small performance improvements, compared to the others. For some datasets, the FDE values of STGAT are lower than those of MC and QMC when using our NPSN. This seems to suggest that NPSN fails to learn samples close to ground-truth trajectories due to the common entanglement problem of latent space~\cite{infogan,stylegan}. \begin{table}[t] \Large \centering \resizebox{\linewidth}{!}{ \begin{tabular}{ccccccc} \toprule & ETH & HOTEL & UNIV & ZARA1 & ZARA2 & AVG \\ \midrule Social-GAN~\cite{gupta2018social} & 0.87 / 1.62 & 0.67 / 1.37 & 0.76 / 1.52 & 0.35 / 0.68 & 0.42 / 0.84 & 0.61 / 1.21 \\ STGAT~\cite{huang2019stgat} & 0.65 / 1.12 & 0.35 / 0.66 & 0.52 / 1.10 & 0.34 / 0.69 & 0.29 / 0.60 & 0.43 / 0.83 \\ Causal-STGAT~\cite{liu2021causal} & 0.60 / 0.98 & 0.30 / 0.54 & 0.52 / 1.10 & 0.32 / 0.64 & 0.28 / 0.58 & 0.40 / 0.77 \\ Social-STGCNN~\cite{mohamed2020social} & 0.64 / 1.11 & 0.49 / 0.85 & 0.44 / 0.79 & 0.34 / 0.53 & 0.30 / 0.48 & 0.44 / 0.75 \\ PECNet~\cite{mangalam2020pecnet} & 0.61 / 1.07 & 0.22 / 0.39 & 0.34 / 0.56 & 0.25 / 0.45 & 0.19 / 0.33 & 0.32 / 0.56 \\ Trajectron++~\cite{salzmann2020trajectron++} & 0.61 / 1.03 & 0.20 / 0.28 & 0.30 / 0.55 & 0.24 / 0.41 & 0.18 / 0.32 & 0.31 / 0.52 \\ NCE-Trajectron++~\cite{liu2020snce} & 0.56 / 1.02 & 0.17 / 0.27 & 0.28 / 0.54 & 0.22 / 0.41 & \tul{0.16} / 0.31 & 0.28 / 0.51 \\ SGCN~\cite{Shi2021sgcn} & 0.57 / 1.00 & 0.31 / 0.53 & 0.37 / 0.67 & 0.29 / 0.51 & 0.22 / 0.42 & 0.35 / 0.63 \\ \cmidrule(lr){1-7} \tbf{NPSN-SGAN} & 0.72 / 1.26 & 0.38 / 0.72 & 0.71 / 1.43 & 0.34 / 0.68 & 0.34 / 0.70 & 0.50 / 0.96 \\ \tbf{NPSN-STGAT} & 0.61 / 1.02 & 0.31 / 0.57 & 0.53 / 1.13 & 0.34 / 0.68 & 0.30 / 0.62 & 0.42 / 0.80 \\ \tbf{NPSN-Causal-STGAT} & 0.56 / 0.90 & 0.25 / 0.40 & 0.51 / 1.09 & 0.32 / 0.65 & 0.27 / 0.56 & 0.38 / 0.72 \\ \tbf{NPSN-STGCNN} & 0.44 / 0.65 & 0.21 / 0.34 & 0.28 / 0.44 & 0.25 / 0.43 & 0.22 / 0.38 & 0.28 / 0.45 \\ \tbf{NPSN-PECNet} & 0.55 / 0.88 & 0.19 / 0.29 & 0.29 / 0.44 & 0.21 / \tul{0.33} & \tul{0.16} / \tbf{0.25} & 0.28 / 0.44 \\ \tbf{NPSN-Trajectron++} & 0.52 / 0.78 & \tul{0.16} / 0.27 & \tul{0.27} / 0.44 & \tul{0.19} / 0.36 & \tul{0.16} / \tul{0.28} & 0.26 / 0.42 \\ \tbf{NPSN-NCE-Trajectron++} & \tul{0.40} / \tul{0.62} & \tbf{0.15} / \tbf{0.24} & \tbf{0.23} / \tul{0.41} & \tul{0.19} / 0.35 & \tbf{0.14} / \tbf{0.25} & \tul{0.22} / \tul{0.37} \\ \tbf{NPSN-SGCN} & \tbf{0.36} / \tbf{0.59} & \tul{0.16} / \tul{0.25} & \tbf{0.23} / \tbf{0.39} & \tbf{0.18} / \tbf{0.32} & \tbf{0.14} / \tbf{0.25} & \tbf{0.21} / \tbf{0.36} \\ \bottomrule \end{tabular} } \vspace{-3mm} \caption{Comparison of NPSN with other state-of-the-art stochastic model (ADE/FDE, Unit: meter). The evaluation results are directly referred from~\cite{liu2021causal}. \textbf{Bold}:Best, \underline{Underline}:Second Best.} \vspace{-2mm} \label{tab:npsn_sota} \end{table} \noindent\textbf{Qualitative results.}\quad \cref{fig:qualitative_result} shows several cases where there are differences between the predictions of NPSN and other methods. Since NPSN takes an observation trajectory along with the low-discrepancy characteristics of the QMC method, the predicted paths from NPSN are closer to socially-acceptable paths compared to other methods. As we described in the~\cref{fig:random_points}, the QMC method generates a more realistic trajectory distribution than the MC method. However, due to the limitations of the dataset, the generated trajectories of the baseline network are biased toward a straight path. On the other hand, NPSN sampling method alleviates the problem by selecting the point near the ground-truth in the latent space. As a result, the human trajectory model with NPSN not only generates well-distributed samples with finite sampling pathways, but also represents the feasible range of human's movements. \noindent\textbf{Comparison with the state-of-the-art models.}\quad We push the state-of-the-art models with our NPSN, a purposive sampling technique. As shown in~\cref{tab:qmc_result}, our NPSN shows a significant performance improvement on all the baseline networks. NPSN provides better overall accuracy by taking fully advantage of the low-discrepancy characteristics of the QMC method. In addition, we report a benchmark result on ETH/UCY dataset in~\cref{tab:npsn_sota}. It is noticeable that all the baseline models exhibit better performances with our NPSN. In particular, when NPSN is incorporated into the combinational approach of Trajectron++~\cite{salzmann2020trajectron++} and NCE~\cite{liu2020snce}, it achieves the best performances on the benchmark. Our NPSN is trained to only control the latent vector samples for the baseline models, and synergizes well with the inference step that comes after both the initial prediction of Trajectron++ and the collision avoidance of NCE. \begin{figure}[t] \centering \vspace{-1mm} \includegraphics[width=\columnwidth,trim={0 17mm 0 0},clip]{figures/CVPR_figure_changeN_v3.pdf} \vspace{-7mm} \caption{Averaged ADE/FDE/TCC results on ETH/UCY datasets w.r.t. the number of samples for SGCN~\cite{Shi2021sgcn} (Gaussian distribution approach), STGAT~\cite{huang2019stgat} (GAN-based approach) and PECNet~\cite{mangalam2020pecnet} (CVAE-based approach). We quantify the performance change w.r.t. the sampling methods including MC, QMC and NSPN. We also report the results of deterministic trajectory prediction when $N\!=\!1$ in gray colored regions.} \vspace{-3.5mm} \label{fig:graph_change_N} \end{figure} \vspace{-0.5mm} \subsection{Ablation Studies} \vspace{-0.5mm} \label{subsec:ablation} \noindent\textbf{Evaluation of different number of samples.}\quad To check the effectiveness of the density of sampled paths in human trajectory prediction, we randomly generate trajectories by changing the number of samples $N$. As shown in~\cref{fig:graph_change_N}, the performance gap between the MC and the QMC method is marginal when the number of samples goes to infinity. As mentioned above, it follows the Strong Law of large numbers in the MC integration. The Gaussian-based model, SGCN~\cite{Shi2021sgcn}, achieves superior performance and improves more than 30\% performance gain over the classic policy ($N\!=\!20$). Since the sample dimension is small, the effectiveness and convergence of our NPSN are enlarged. Note that a performance drop over sparse conditions due to the discrepancy property: For small $N$ and a comparably large sample space dimension (\ie, $N < 2^s$), the discrepancy of the QMC method may not be less than that of a random sequence. We overcome these limitations with a learnable sampling method by sampling a feasible latent vector with low-discrepancy characteristics. \noindent\textbf{Deterministic trajectory prediction.}\quad Since the stochastic model is trained to predict multi-modal future paths, it outputs diverse paths at each execution, which is undesirable for deterministic human trajectory prediction, which infers only one feasible pathway ($N\!=\!1$). By replacing the conventional probability process with a learnable sampling, NPSN allows the stochastic models to infer the most feasible trajectory in a deterministic manner. As shown in~\cref{fig:graph_change_N} (gray colored regions), NPSN outperforms QMC and the conventional methods on all the metrics at $N\!=\!1$. \begin{table}[t] \vspace{-0.5mm} \centering \resizebox{\linewidth}{!}{ \begin{tabular}{ccccccc} \toprule & ETH & HOTEL & UNIV & ZARA1 & ZARA2 & AVG \\ \midrule ~~~Baseline~~~ & 0.57 / 1.00 & 0.31 / 0.53 & 0.37 / 0.67 & 0.29 / 0.51 & 0.22 / 0.42 & 0.35 / 0.63 \\ \cmidrule(lr){1-7} w/o $\mathcal{L}_{dist}$ & 0.39 / 0.61 & 0.23 / 0.45 & \tul{0.26} / \tul{0.47} & \tul{0.20} / \tul{0.36} & \tul{0.16} / 0.31 & 0.25 / 0.44 \\ w/o $\mathcal{L}_{disc}$ & \tul{0.38} / 0.61 & \tbf{0.16} / \tbf{0.25} & \tbf{0.23} / \tbf{0.39} & \tbf{0.18} / \tbf{0.32} & \tbf{0.14} / \tbf{0.25} & \tul{0.22} / \tul{0.37} \\ w/o GAT & \tbf{0.36} / \tbf{0.57} & \tul{0.17} / \tul{0.28} & \tbf{0.23} / \tbf{0.39} & \tbf{0.18} / \tbf{0.32} & \tbf{0.14} / \tul{0.26} & \tul{0.22} / \tul{0.37} \\ +NPSN & \tbf{0.36} / \tul{0.59} & \tbf{0.16} / \tbf{0.25} & \tbf{0.23} / \tbf{0.39} & \tbf{0.18} / \tbf{0.32} & \tbf{0.14} / \tbf{0.25} & \tbf{0.21} / \tbf{0.36} \\ \bottomrule \end{tabular} } \vspace{-3mm} \caption{Ablation study\,on\,each\,component\,of\,NPSN in SGCN.} \vspace{-5mm} \label{tab:npsn_ablation} \end{table} \noindent\textbf{Effectiveness of each component.}\quad Lastly, we examine the effectiveness of each component in our NPSN, whose result is reported in~\cref{tab:npsn_ablation}. Here, SGCN~\cite{Shi2021sgcn} is selected as the baseline model because it shows the most significant performance improvements with NPSN. First, our two loss functions work well. Particularly, the discrepancy loss guarantees sample diversity by generating low-discrepancy samples, and the distance loss enforces generating samples close to the ground-truth trajectory. The GAT captures the agent-aware interaction for socially-acceptable trajectory prediction, except for the secluded ETH scene. \vspace{-0.5mm} \section{Conclusion} \vspace{-1.5mm} In this work, we numerically analyze the limitations of the conventional sampling process in stochastic pedestrian trajectory prediction, by using the concept of discrepancy as a measure of the sampling quality. To overcome this limitation, we then introduce a novel, light-weight and learnable sampling strategy, inspired by the Quasi-Monte Carlo method. Unlike conventional random sampling, our learnable method considers both observations and the social norms of pedestrians in scenes. In addition, our method can be inserted into stochastic pedestrian trajectory predictions as a plug-and-play module. With the proposed learnable method, all of the state-of-art models achieve performance improvements. In particular, the Gaussian-based models show the best results on the benchmark. \vspace{3mm} \fontsize{7.24}{8}\selectfont{\noindent\textbf{Acknowledgement} This work is in part supported by the Institute of Information $\&$ communications Technology Planning $\&$ Evaluation (IITP) grant funded by the Korea government (MSIT) (No.2019-0-01842, Artificial Intelligence Graduate School Program (GIST), No.2021-0-02068, Artificial Intelligence Innovation Hub), Vehicles AI Convergence Research $\&$ Development Program through the National IT Industry Promotion Agency of Korea (NIPA) funded by the Ministry of Science and ICT(No.S1602-20-1001), the National Research Foundation of Korea (NRF) grant funded by the Korea government (MSIT) (No.2020R1C1C1012635), and the GIST-MIT Collaboration grant funded by the GIST in 2022.} \clearpage {\small \bibliographystyle{ieee_fullname}
1,108,101,565,828
arxiv
\section{Introduction} Periods of $t$-modules play a central role in number theory in positive characteristic, and questions about their algebraic independence are of major interest. The most prominent period is the Carlitz period \[ \tilde{\pi}=\lambda_\theta \theta \prod_{j \geq 1} (1 - \theta^{1-q^j})^{-1} \in K_\infty(\lambda_\theta), \] where $\lambda_\theta\in \bar{K}$ is a $(q-1)$-th root of $-\theta$. Here, $K=\FF_q(\theta)$ is the rational function field over the finite field $\FF_q$, $\bar{K}$ its algebraic closure, and $K_\infty$ is the completion of $K$ with respect to the absolute value $|\cdot|_\infty$ given by $|\theta|_\infty=q$. The Carlitz period is the function field analog of the complex number $2\pi i$, and it was already proven by Wade in 1941 that $\tilde{\pi}$ is transcendental over $K$ (see~\cite{liw:cqtg}). For proving algebraic independence of periods (and other ``numbers'' like zeta values and logarithms) the ABP-criterion (cf.~\cite[Thm.~3.1.1]{ga-wb-mp:darasgvpc}) and a consequence of it - which is part of the proof of \cite[Thm.~5.2.2]{mp:tdadmaicl} - turned out to be very useful. To state this consequence, let $\CC_\infty$ denote the completion of the algebraic closure of $K_\infty$, and $\powser$ the power series ring over $\CC_\infty$, as well as $\TT=\tate$ the subring consisting of those power series which converge on the closed unit disc $|t|_\infty\leq 1$. Finally, let $\mathbb{E}$ be the subring of entire functions, i.e.~of those power series which converge for all $t\in \CC_\infty$ and whose coefficients lie in a finite extension of $K_\infty$. On $\TT$ we consider the inverse Frobenius twist $\sigma$ given by \[ \sigma( \sum_{i=0}^\infty x_it^i)=\sum_{i=0}^\infty (x_i)^{1/q}t^i, \] which will be applied on matrices entry-wise. \begin{thm} (See proof of \cite[Thm.~5.2.2]{mp:tdadmaicl}) \label{thm:conseq-of-abp} \footnote{Note that the difference equation in \cite{mp:tdadmaicl} is given as $\sigma(\Psi)=\Phi\Psi$ from which our version is obtained by transposing the matrices. We use this transposed version as it fits better to our convention on notation (cf.~Sect.~\ref{subsec:conventions}).} Let $\Phi\in \Mat_{r\times r}(\bar{K}[t])$ be a matrix with determinant $\det(\Phi)=c(t-\theta)^s$ for some $c\in \bar{K}^\times$ and $s\geq 1$. If $\Psi\in \GL_r(\TT)\cap \Mat_{r\times r}(\mathbb{E})$ is a matrix such that \[ \sigma(\Psi)=\Psi \Phi, \] then the transcendence degree of $\bar{K}(t)(\Psi)$ over $\bar{K}(t)$ is the same as the transcendence degree of $\bar{K}(\Psi(\theta))$ over $\bar{K}$.\\ Here, $\bar{K}(t)(\Psi)$ denotes the field extension of $\bar{K}(t)$ generated by the entries of $\Psi$, and $\bar{K}(\Psi(\theta))$ denotes the field extension of $\bar{K}$ generated by the entries of $\Psi(\theta)$, the evaluation of the entries of $\Psi$ at $t=\theta$. \end{thm} Actually, the matrix $\Phi$ occurs as a matrix which represents the $\sigma$-action on a dual $t$-motive ${\mathfrak{M}}$ with respect to some $\bar{K}[t]$-basis of ${\mathfrak{M}}$, and $\Psi$ is the corresponding rigid analytic trivialization. Using this statement, one can also reprove the transcendence of $\pitilde$ by using the power series \[ \Omega(t)= \lambda_\theta^{-q} \prod_{j \geq 1} (1 - \frac{t}{\theta^{q^j}}) \in \mathbb{E}. \] This power series satisfies the difference equation $\sigma(\Omega)=\Omega\cdot (t-\theta)$ and is indeed the rigid analytic trivialization of the dual Carlitz motive $\mathfrak{C}$. The function $ \Omega$ is transcendental over $\bar{K}(t)$ - as it has infinitely many zeros - and \[ \Omega(\theta)=\Omega|_{t=\theta}= \lambda_\theta^{-q} \prod_{j \geq 1} (1 - \frac{\theta}{\theta^{q^j}})=-\frac{1}{\theta \lambda_\theta} \prod_{j \geq 1} (1 - \theta^{1-q^j}) =-\frac{1}{\pitilde}. \] Hence by the criterion, $\pitilde$ is transcendental over $\bar{K}$. Several proofs on algebraic independence (see e.g.~\cite{cc-mp:aipldm},\cite{ym:aicppcmv},\cite{mp:tdadmaicl}) follow the strategy to construct dual $t$-motives such that for the rigid analytic trivialization $\Psi$ of this module, the inverse of its specialization $\Psi(\theta)^{-1}$ has the desired values as entries. Then one shows algebraic independence for the corresponding entries of $\Psi$ or $\Psi^{-1}$ using different methods (like the Galois theoretical methods developed in \cite{mp:tdadmaicl}) and deduces algebraic independence of the desired values. \medskip The main theorem in the present paper is about the periods of the $n$-th tensor power of the Carlitz module. The $n$-th tensor power $E=C^{\otimes n}$ of the Carlitz module is a uniformizable $t$-module of dimension $n$ and rank $1$. Hence, the period lattice for $E$ is an $\FF_q[\theta]$-submodule of $\Lie(E)(\CC_\infty)\cong \CC_\infty^n$ of rank $1$, and we will show the following. \begin{thm} (see Thm.~\ref{thm:algebraic-independence})\\ Let $n\in\NN$ be prime to $q$, let $C^{\otimes n}$ be the $n$-th tensor power of the Carlitz module and let \[ \begin{pmatrix} z_1 \\ \vdots \\ z_n \end{pmatrix} \in \CC_\infty^n\] be a generator for the period lattice. Then $z_1,z_2,\ldots, z_n$ are algebraically independent over $\bar{K}$. \end{thm} The first step will be the definition of an appropriate dual $t$-motive such that the specialization at $t=\theta$ of the inverse of the rigid analytic trivialization contains such coordinates $z_1,\ldots, z_n$. As this is a special case of a general construction of new $t$-motives from old ones, we present this construction in detail. Actually, the main part of the paper is devoted to this construction which we call \textit{prolongation}, due to its similarities to prolongations in differential geometry. In Section \ref{sec:prolongations-of-t-motives}, we start by defining the prolongations of (non-dual) $t$-motives, since they are often defined over a smaller base field than the dual $t$-motives, and we show various properties which transfer from the original $t$-motive to its prolongation. We also give the explicit descriptions with matrices for abelian $t$-motives. In Section \ref{sec:prolongations-of-dual-t-motives}, we transfer the definition of prolongation and the explicit description to dual $t$-motives, and in Section \ref{sec:prolongations-of-t-modules}, we transfer it to $t$-modules, too. \medskip For the definition of prolongations, we make use of hyperdifferential operators (also called iterative higher derivations) with respect to the variable $t$. These are the family of $\CC_\infty$-linear maps $(\hde{n})_{n\geq 0}$ given by \[ \hd{n}{\sum_{i=i_0}^\infty x_it^i} =\sum_{i=i_0}^\infty \binom{i}{n} x_it^{i-n} \] for Laurent series $\sum_{i=i_0}^\infty x_it^i\in \laurent$, where $\binom{i}{n}\in \FF_p\subset \FF_q$ is the residue of the usual binomial coefficient. One should think of the $n$-th hyperdifferential operator $\hde{n}$ as $\frac{1}{n!}(d/dt)^n$, although in characteristic $p$, we can't divide by $n!$, if $n\geq p$. In characteristic zero, however, $\hde{n}$ would be exactly $\frac{1}{n!}(d/dt)^n$. As a warning to the reader, we would like to note that in the literature usually the hyperdifferential operators with respect to $\theta\in K$ are used (e.g.~in \cite{db:lidddm-I}, \cite{db:meagli}, \cite{db-ld:lidddm-II}), and hence the operation on power series is by hyperdifferentiating the coefficients. In this article, we will not use those hyperdifferential operators, but exclusively the hyperdifferentiation by $t$. \medskip In the proof of the main theorem, the Anderson-Thakur function $\omega(t)$ and its hyperderivatives appear, as $\omega$ is related to $\Omega$ via \[ \omega=\frac{1}{(t-\theta)\Omega}. \] In Section \ref{sec:omega-hypertranscendental}, we show a property of $\omega$ which is of interest on its own, namely we show \begin{thm} (see Thm.~\ref{thm:hypertranscendence}) The Anderson-Thakur function $\omega(t)$ is hypertranscendental over $\bar{K}(t)$, i.e.~the set $\{\hd{n}{\omega} \mid n\geq 0\}$ is algebraically independent over $\bar{K}(t)$. \end{thm} This will be deduced from properties of specializations of $\omega$ and its hyperderivatives at roots of unity which were investigated in \cite{ba-fp:ugtsls} and \cite{am-rp:iddbcppte}. This statement has also been given in \cite{fp:vscce} whose proof uses different methods. \medskip \subsection*{Acknowledgement} I would like to thank R.~Perkins who turned my attention to the work of Angl\'es-Pellarin \cite{ba-fp:ugtsls}, in which our common paper \cite{am-rp:iddbcppte} resulted, and which marked the beginning of the investigations presented in this article. I would also like to thank F.~Pellarin for interesting discussions on the hypertranscendence of $\omega$. \section{Generalities} \subsection{Base rings and operators} Let $\FF_q$ be the finite field with $q$ elements, and $K$ a finite extension of the rational function field $\FF_q(\theta)$ in the variable $\theta$. We choose an extension to $K$ of the absolute value $|\cdot |_\infty$ which is given on $\FF_q(\theta)$ by $|\theta|_\infty=q$. Furthermore, $K_\infty\supseteq \FF_q\ls{\frac{1}{\theta}}$ denotes the completion of $K$ at this infinite place, and $\CC_\infty$ the completion of an algebraic closure of $K_\infty$. Furthermore, let $\bar{K}$ be the algebraic closure of $K$ inside $\CC_\infty$. All the commutative rings occuring will be subrings of the field of Laurent series $\laurent$, like the polynomial rings $K[t]$ and $\bar{K}[t]$, the power series ring $\powser$ and the Tate algebra $\TT=\tate$, i.e.~the algebra of series which are convergent for $|t|_\infty\leq 1$. On $\laurent$ we have several operations which will induce operations on these subrings. First of all, there is the twisting $\tau:\laurent\to \laurent$ given by \[ f^{\tau} :=\sum_{i=i_0}^\infty (x_i)^qt^i \] for $f=\sum_{i=i_0}^\infty x_it^i\in \laurent$, and the inverse twisting $\sigma:\laurent\to \laurent$ given by \[ f^\sigma := \sum_{i=i_0}^\infty (x_i)^{1/q}t^i \] for $f=\sum_{i=i_0}^\infty x_it^i\in \laurent$. While the twisting restricts to endomorphisms on all subrings of $\laurent$ which occur in this paper, the inverse twisting is only defined for perfect coefficient fields, in particular not on $K[t]$, but on $\bar{K}[t]$. On the Laurent series ring $\laurent$ we furthermore have an action of the hyperdifferential operators with respect to $t$, i.e.~the sequence of $\CC_\infty$-linear maps $(\hde{n})_{n\geq 0}$ given by \[ \hd{n}{\sum_{i=i_0}^\infty x_it^i} =\sum_{i=i_0}^\infty \binom{i}{n} x_it^{i-n}. \] The image $\hd{n}{f}$ of some $f\in \laurent$ is called the $n$-th hyperderivative of $f$. The hyperdifferential operators satisfy $\hd{0}{f}=f$ for all $f\in \laurent$, \[ \hd{n}{fg}=\sum_{i=0}^n \hd{i}{f}\hd{n-i}{g} \quad \text{for all }f,g\in \laurent, n\in \NN \] as well as \[ \hd{n}{\hd{m}{f}}=\binom{n+m}{n}\hd{n+m}{f}\quad \text{for all }f\in \laurent, n,m\in \NN. \] It is not hard to verify that the subrings $\powser$, $\TT$, $L[t]$, and $L(t)$ (for any subfield $L$ of $\CC_\infty$) are stable under all the hyperdifferential operators. It is also obvious that the hyperdifferential operators commute with the twistings $\tau$ and~$\sigma$. Another way to obtain these hyperdifferential operators is to consider the $\CC_\infty$-algebra map $\mathcal{D} : \powser \rightarrow \powser[\![X]\!]$ \[f(t) \mapsto f(t+X) = \sum_{n \geq 0} f_n(t) X^n \] given by replacing the variable $t$ in the power series expansion for $f$ by $t+X$, expanding each $(t+X)^n$ using the binomial theorem, and rearranging to obtain a power series in $X$. Then, one has \[\hd{n}{f}=f_n. \] Since, $\hd{0}{f}=f$ for all $f\in \powser$, the homomorphism $\mathcal{D}$ can be extended to a $\CC_\infty$-algebra map $\mathcal{D} : \laurent \rightarrow \laurent[\![X]\!]$, and we still have the identity \[ \mathcal{D}(f)= \sum_{n \geq 0} \hd{n}{f} X^n. \] For more background on hyperdifferential operators (iterative higher derivations) see for example \cite[\S 27]{hm:crt}. \footnote{As already mentioned in the introduction, these hyperdifferential operators are not the one commonly used for constructing $t$-modules. } \medskip When we apply the twisting operators $\tau$ and $\sigma$ as well as the hyperdifferential operators to matrices it is meant that we apply them entry-wise. \medskip We will frequently use the following family (in $n \geq 0$) of homomorphisms of $\CC_\infty$-algebras $\rho_{[n]} : \laurent \rightarrow \Mat_{(n+1) \times (n+1)}(\laurent)$ defined by \begin{equation}\label{eq:rho_n} \rho_{[n]}(f) := \begin{pmatrix} f & \hd{1}{f} & \cdots & \hd{n}{f} \\ 0 & f & \ddots & \vdots \\ \vdots & \ddots & \ddots & \hd{1}{f} \\ 0 & \cdots & 0 & f \end{pmatrix}, \end{equation} which already appears in \cite{am-rp:iddbcppte}. This map arises from the homomorphism $\mathcal{D}$ by evaluation of $X$ at the $(n+1) \times (n+1)$ nilpotent matrix \[ N= \begin{pmatrix} 0 & 1 & 0 & \cdots & 0 \\ \vdots & \ddots & \ddots & \ddots & \vdots \\ \vdots && \ddots & \ddots & 0 \\ \vdots && & \ddots & 1 \\ 0 & \cdots &\cdots &\cdots & 0 \end{pmatrix}. \] We will also apply $\rho_{[n]}$ to square matrices $\Theta\in \Mat_{r\times r}(\laurent)$. In that case, $\rho_{[n]}(\Theta)$ is defined to be the block square matrix \begin{equation}\label{eq:rho_n-matrix} \rho_{[n]}(\Theta):= \begin{pmatrix} \Theta & \hd{1}{\Theta} & \hd{2}{\Theta} & \cdots& \hd{n}{\Theta} \\ 0 & \Theta & \hd{1}{\Theta} & \ddots & \vdots \\ \vdots &\ddots & \ddots & \ddots & \hd{2}{\Theta}\\ \vdots & & \ddots & \Theta & \hd{1}{\Theta} \\ 0 & \cdots & \cdots & 0 & \Theta \end{pmatrix} \end{equation} in the ring of $r(n+1)\times r(n+1)$-matrices. As mentioned before $\hd{1}{\Theta}$ etc. is the matrix where we apply the hyperdifferential operators coefficient-wise. It is not hard to check that $\rho_{[n]}:\Mat_{r\times r}(\laurent)\to \Mat_{r(n+1)\times r(n+1)}(\laurent)$ is a ring homomorphism, too. As the hyperdifferential operators commute with twisting, $\rho_{[n]}$ also commutes with twisting. \subsection{Convention on notation}\label{subsec:conventions} In the following sections, we will deal with $t$-modules, $t$-motives and dual $t$-motives. We use the definitions of these terms as given in the survey article \cite{db-mp:ridmtt}. For the convenience of the reader, we repeat these definitions below, but refer the reader to ibid. for more details. For recognizing the objects at first glance, $t$-modules will be denoted by italic letters, like $E$, $t$-motives with serif-less letters, like ${\mathsf{M}}$, and dual $t$-motives in Fraktur font, like~${\mathfrak{M}}$. Bases of finitely generated free modules (over some ring) will always be written as row vectors $\vect{e}=(e_1,\ldots, e_r)$ such that one obtains the familiar identification of the module with a module of column vectors by writing an arbitrary element $x=\sum_{i=1}^r x_ie_i$ as \[ \vect{e}\cdot \begin{pmatrix} x_1\\ \vdots \\ x_r \end{pmatrix}. \] A {\bf $t$-module} $(E,\Phi)$ (or shortly, $E$) consists of an algebraic group $E$ over $K$ which is isomorphic to $\GG_a^d$ for some $d>0$, and an $\FF_q$-algebra homomorphism \[ \Phi:\FF_q[t]\to \End_{{\rm grp},\FF_q}(E)\cong \Mat_{d\times d}(K\{\tau\}), \] with the additional property that $\Phi(t)-\theta\cdot \id_E$ induces a nilpotent endomorphism on $\Lie(E)$. In other terms, if one writes \[ \Phi(t)= A_0+A_1\tau+\ldots +A_s\tau^s\in \Mat_{d\times d}(K\{\tau\}) \] with respect to some isomorphism $\End_{{\rm grp},\FF_q}(E)\cong \Mat_{d\times d}(K\{\tau\})$, then the matrix $A_0-\theta\cdot \mathds{1}_d\in \Mat_{d\times d}(K)$ is nilpotent. \medskip A {\bf $t$-motive} ${\mathsf{M}}$ is a left $K[t]\{\tau\}$-module which is free and finitely generated as $K\{\tau\}$-module, and such that \[ (t-\theta)^\ell({\mathsf{M}})\subseteq K[t]\cdot \tau({\mathsf{M}}) \] for some $\ell\in \NN$. A $t$-motive ${\mathsf{M}}$ is called {\bf abelian} if it is also finitely generated as $K[t]$-module in which case it is even free as $K[t]$-module. An abelian $t$-motive ${\mathsf{M}}$ is called {\bf pure}, if there exists a $K\ps{1/t}$-lattice $H$ inside ${\mathsf{M}}\otimes_{K[t]} K\ls{1/t}$ and $u,v\geq 1$ such that \[ t^uH= K\ps{1/t}\cdot \tau^v H. \] The fraction $w=\frac{u}{v}$ is called the {\bf weight} of ${\mathsf{M}}$. Given an abelian $t$-motive ${\mathsf{M}}$ with $K[t]$-basis $\vect{e}=(e_1,\ldots, e_r)$, then there is a matrix $\Theta\in \Mat_{r\times r}(K[t])$ representing the $\tau$-action on ${\mathsf{M}}$ with respect to $\{e_1,\ldots, e_r\}$, i.e.~ \[ \tau( e_j)= \sum_{h=1}^r \Theta_{hj}e_h \] for all $j=1,\ldots, r$. This will be written in matrix notation as \[ \tau (\vect{e})= \vect{e}\cdot \Theta.\] For an arbitrary element $x=\sum_{i=1}^r x_ie_i$ one therefore has \[ \tau(x)=\vect{e}\cdot \Theta \cdot \begin{pmatrix} x_1\\ \vdots \\ x_r \end{pmatrix}^\tau. \] \medskip Writing the basis as a row vector instead of a column vector, as for example in \cite{mp:tdadmaicl}, causes the difference equations for the rigid analytic trivializations to have a different form which we will review now. However, the usual form is obtained by taking transposes of the matrices given here: Given an abelian $t$-motive ${\mathsf{M}}$ with $K[t]$-basis $\vect{e}=(e_1,\ldots, e_r)$ and $\Theta\in \Mat_{r\times r}(K[t])$ such that \[ \tau (\vect{e})= \vect{e}\cdot \Theta,\] a {\bf rigid analytic trivialization} (if it exists) is a matrix $\Upsilon\in \GL_{r}(\TT)$ such that $\tau(\vect{e}\cdot \Upsilon)=\vect{e}\cdot \Upsilon$, i.e.~such that \[ \Theta\cdot \Upsilon^\tau=\Upsilon. \] If $\Upsilon$ exists, ${\mathsf{M}}$ is called {\bf rigid analytically trivial}. In \cite{ga:tm}, Anderson associated to a $t$-module $E$ a $t$-motive $\mathsf{E}:=\Hom_{{\rm grp},\FF_q}(E,\GG_a)$ with $t$-action given by composition with $\Phi_t\in \End_{{\rm grp},\FF_q}(E)$ and left-$K\{\tau\}$-action given by composition with elements in $K\{\tau\}\cong \End_{{\rm grp},\FF_q}(\GG_a)$. A $t$-module is then called {\bf abelian} if the associated $t$-motive is abelian, and Anderson proved (cf.~\cite[Thm.~1]{ga:tm}) that this correspondence induces an anti-equivalence of categories between abelian $t$-modules and abelian $t$-motives. However, the proof even shows that it induces an anti-equivalence of categories between $t$-modules and $t$-motives. \medskip A {\bf dual $t$-motive} ${\mathfrak{M}}$ is a left $\bar{K}[t]\{\sigma\}$-module that is free and finitely generated as $\bar{K}\{\sigma\}$-module, and such that \[ (t-\theta)^\ell({\mathfrak{M}})\subseteq \sigma({\mathfrak{M}}) \] for some $\ell\in \NN$. A dual $t$-motive is called {\bf $t$-finite} if it is also finitely generated as $\bar{K}[t]$-module in which case it is even free as $\bar{K}[t]$-module. For a $t$-finite dual $t$-motive ${\mathfrak{M}}$ with $K[t]$-basis $\vect{e}=(e_1,\ldots, e_r)$ and $\tilde{\Theta}\in \Mat_{r\times r}(K[t])$ such that \[ \sigma (\vect{e})= \vect{e}\cdot \tilde{\Theta}\] a {\bf rigid analytic trivialization} (if it exists) is a matrix $\Psi\in \GL_{r}(\TT)$ such that $\sigma(\vect{e}\cdot \Psi^{-1})=\vect{e}\cdot \Psi^{-1}$, i.e.~such that \[ \Psi\cdot \tilde{\Theta}=\Psi^\sigma. \] If $\Psi$ exists, ${\mathfrak{M}}$ is called {\bf rigid analytically trivial}. Similar, as for $t$-motives, Anderson associated to a $t$-module $E$ over $\bar{K}$ a dual $t$-motive $\mathfrak{E}:=\Hom_{{\rm grp},\FF_q}(\GG_a,E)$ with $t$-action given by composition with $\Phi_t\in \End_{{\rm grp},\FF_q}(E)$ and left-$K\{\sigma\}$-action given by composition with elements in $K\{\sigma\}\cong K\{\tau\}^{\rm op}\cong \End_{{\rm grp},\FF_q}(\GG_a)^{\rm op}$. Anderson showed (cf.~\cite{uh-akj:pthshcff}) that this induces an equivalence of categories between $t$-modules over $\bar{K}$ and $t$-motives. \section{Prolongations of $t$-motives}\label{sec:prolongations-of-t-motives} In this section, we introduce a construction of new $t$-motives from old ones which we call \textit{prolongation}. The construction is taken from \cite{mk:tffo} where prolongations of difference modules are described. We also show (see Theorems \ref{thm:prolongation-motive-abelian} and \ref{thm:pure-and-r-a-t}) that the prolongations inherit the properties of abelianness, rigid analytic triviality as well as pureness from the original $t$-motive. \begin{defn}\label{def:prolongation} For a $K[t]\{\tau\}$-module ${\mathsf{M}}$ and $k\geq 0$, the {\bf $k$-th prolongation} of ${\mathsf{M}}$ is the $K[t]$-module $\rho_k{\mathsf{M}}$ which is generated by symbols $D_im$, for $i=0,\ldots, k$, $m\in {\mathsf{M}}$, subject to the relations \begin{enumerate} \item $D_i(m_1+m_2)=D_im_1+D_im_2$, \item\label{item:second-relation} $D_i(a\cdot m)=\sum_{i_1+i_2=i} \hd{i_1}{a}\cdot D_{i_2}m$, \end{enumerate} for all $m,m_1,m_2\in {\mathsf{M}}$, $a\in K[t]$ and $i=0,\ldots, k$. The semi-linear $\tau$-action on $\rho_k{\mathsf{M}}$ is given by \[ \tau( a\cdot D_im)=a^{\tau}\cdot D_i(\tau(m)). \] for $a\in K[t]$, $m\in {\mathsf{M}}$. \end{defn} One should think of $D_im$ as being the formal $i$-th hyperderivative of the element~$m$. \begin{rem}\label{rem:prolongation-is-extension} It is not difficult to verify that the definition of the $\tau$-action is well-defined. Hence, the $k$-th prolongation $\rho_k{\mathsf{M}}$ is again a $K[t]\{\tau\}$-module. Furthermore, $\rho_0{\mathsf{M}}$ is naturally isomorphic to ${\mathsf{M}}$ (via $D_0m\mapsto m$), and for $0\leq l<k$ the $l$-th prolongation $\rho_l{\mathsf{M}}$ naturally is a $K[t]\{\tau\}$-submodule of $\rho_k{\mathsf{M}}$. For $0\leq l<k$, we even obtain a short exact sequence of $K[t]\{\tau\}$-modules \[ 0\longrightarrow \rho_l{\mathsf{M}} \longrightarrow \rho_k{\mathsf{M}} \xrightarrow{{\rm pr}} \rho_{k-l-1}{\mathsf{M}} \to 0 \] where ${\rm pr}(D_im):= D_{i-l-1}m$ for $i>l$ and all $m\in {\mathsf{M}}$, as well as ${\rm pr}(D_im):=0$ for $i\leq l$ and all $m\in {\mathsf{M}}$. In particular, taking $l=k-1$ and using the identification $\rho_0{\mathsf{M}}\cong{\mathsf{M}}$, we obtain the short exact sequence \begin{equation}\label{eq:short-exact-sequence} 0\longrightarrow \rho_{k-1}{\mathsf{M}} \longrightarrow \rho_k{\mathsf{M}} \longrightarrow {\mathsf{M}} \to 0 . \tag{*} \end{equation} Inductively, we see that $\rho_k{\mathsf{M}}$ is a $(k+1)$-fold extension of ${\mathsf{M}}$ with itself. From this description as a $(k+1)$-fold extension of ${\mathsf{M}}$ with itself, we will be able to transfer several additional properties of ${\mathsf{M}}$ to the prolongation $\rho_k{\mathsf{M}}$ (see Theorem \ref{thm:prolongation-motive-abelian} and Theorem \ref{thm:pure-and-r-a-t}). \end{rem} \begin{lem}\label{lem:prolongation-as-K-v-s} As a $K$-vector space, the $k$-th prolongation $\rho_k{\mathsf{M}}$ is generated by the symbols $D_im$, for $i=0,\ldots, k$, $m\in {\mathsf{M}}$, subject to the relations \[ D_i(x_1m_1+x_2m_2)= x_1\cdot D_im_1+x_2\cdot D_im_2 \] for all $m_1,m_2\in {\mathsf{M}}$, $x_1,x_2\in K$ and $i=0,\ldots, k$. The actions of $t$ and $\tau$ are described by \begin{eqnarray*} t\cdot D_im &=& D_i(tm)-D_{i-1}m \\ \tau( D_im ) &=& D_i(\tau(m)) \end{eqnarray*} for $m\in {\mathsf{M}}$, $i=0,\ldots, k$ where we set $D_{-1}m:=0$. \end{lem} \begin{proof} Applying relation \eqref{item:second-relation} above to $a=t$, leads to \[ D_i(tm)= t\cdot D_im + 1\cdot D_{i-1}m \] for all $m\in {\mathsf{M}}$. Hence, $ t\cdot D_im=D_i(tm)-D_{i-1}m$.\\ This shows that $K[t]$-multiples of the $D_im$ are in the $K$-span of all $D_i(m')$, and therefore $\rho_k{\mathsf{M}}$ is generated by all $D_im$ as a $K$-vector space. Restricting relation \eqref{item:second-relation} to $a\in K$, we obtain $D_i(a\cdot m)=a\cdot D_im$ for all $a\in K$ and $m\in {\mathsf{M}}$. Hence, the relations above reduce to \[ D_i(x_1m_1+x_2m_2)= x_1\cdot D_im_1+x_2\cdot D_im_2 \] for all $m_1,m_2\in {\mathsf{M}}$, $x_1,x_2\in K$ and $i=0,\ldots, k$. The given actions are clear from the equation above and the definition of~$\rho_k{\mathsf{M}}$. \end{proof} \begin{thm}\label{thm:prolongation-motive-abelian} Let ${\mathsf{M}}$ be a $t$-motive. Then the $k$-th prolongation $\rho_k{\mathsf{M}}$ is a $t$-motive for all $k\geq 0$. If ${\mathsf{M}}$ is abelian, then so is $\rho_k{\mathsf{M}}$. \end{thm} \begin{proof} By Remark \ref{rem:prolongation-is-extension}, we have an exact sequence of $K[t]\{\tau\}$-modules \[ 0\longrightarrow \rho_{k-1}{\mathsf{M}} \longrightarrow \rho_k{\mathsf{M}} \longrightarrow {\mathsf{M}} \to 0 \] using the identification $\rho_0{\mathsf{M}}\cong{\mathsf{M}}$ (see Equation \eqref{eq:short-exact-sequence}). Hence, it follows by induction on $k$ that $\rho_k{\mathsf{M}}$ is free and finitely generated as $K\{\tau\}$-module if ${\mathsf{M}}$ is. Furthermore, if $\ell\in \NN$ is such that \[ (t-\theta)^\ell ({\mathsf{M}})\subseteq K[t] \cdot \tau({\mathsf{M}}), \] we obtain \[ (t-\theta)^\ell (\rho_k{\mathsf{M}})\subseteq K[t] \cdot \tau (\rho_k{\mathsf{M}})+ \rho_{k-1}{\mathsf{M}}, \] and hence, inductively, \[ (t-\theta)^{\ell\cdot (k+1)} (\rho_k{\mathsf{M}})\subseteq K[t] \cdot \tau (\rho_k{\mathsf{M}}).\] Therefore, $\rho_k{\mathsf{M}}$ is a $t$-motive. If ${\mathsf{M}}$ is abelian, i.e.~free and finitely generated as a $K[t]$-module, then $\rho_k{\mathsf{M}}$ is free and finitely generated as a $K[t]$-module, since it is a $(k+1)$-fold extension of copies of~${\mathsf{M}}$. \end{proof} \begin{lem}\label{lem:k-tau-basis-of-prolongation} Let ${\mathsf{M}}$ be a $t$-motive, and $\vect{b}=(b_1,\ldots,b_d)$ be a $K\{\tau\}$-basis of ${\mathsf{M}}$. Then a $K\{\tau\}$-basis of $\rho_k{\mathsf{M}}$ is given by \[ \vect{Db}=(D_0b_1,\ldots, D_0b_d, D_1b_1,\ldots, D_1b_d,\ldots,\ldots, D_kb_1,\ldots, D_kb_d).\] \end{lem} \begin{proof} From the short exact sequence \eqref{eq:short-exact-sequence} we see that a $K\{\tau\}$-basis of $\rho_k{\mathsf{M}}$ is given by the join of a $K\{\tau\}$-basis of $\rho_{k-1}{\mathsf{M}}$ and the preimage of a basis of ${\mathsf{M}}$. As such a preimage is given by $(D_kb_1,\ldots, D_kb_d)$ the proof follows by induction. \end{proof} We are now going to explicitly describe the $t$-motive $\rho_k{\mathsf{M}}$ as $K[t]$-module with $\tau$-action in the abelian case, i.e.~we give a basis as $K[t]$-module as well as a matrix representation of the $\tau$-action with respect to this $K[t]$-basis. \smallskip Assume that ${\mathsf{M}}$ is an abelian $t$-motive, and let $\vect{e}=(e_1,\ldots, e_r)$ be a $K[t]$-basis of ${\mathsf{M}}$. As in the previous lemma, from the short exact sequence \eqref{eq:short-exact-sequence} in Remark \ref{rem:prolongation-is-extension} we obtain that $\vect{De}=(D_0e_1,\ldots, D_0e_r, D_1e_1,\ldots, D_1e_r,\ldots,\ldots, D_ke_1,\ldots, D_ke_r)$ is a $K[t]$-basis of $\rho_k{\mathsf{M}}$. Let $\Theta\in \Mat_{r\times r}(K[t])$ be the matrix representing the $\tau$-action on ${\mathsf{M}}$ with respect to $\vect{e}=(e_1,\ldots, e_r)$, i.e.~ \[ \tau( e_j)= \sum_{h=1}^r \Theta_{hj}e_h \] for all $j=1,\ldots, r$, or in matrix notation \[ \tau (\vect{e})= \vect{e}\cdot \Theta.\] Then $\tau$ acts on $D_ie_j\in \rho_k{\mathsf{M}}$ as \[ \tau(D_ie_j)= D_i(\tau(e_j))=D_i(\sum_{h=1}^r \Theta_{hj}e_h) =\sum_{h=1}^r \sum_{i_1+i_2=i} \hd{i_1}{\Theta_{hj}}\cdot D_{i_2}e_h. \] In block matrix notation this reads as \[ \tau(\vect{De}) = \vect{De}\cdot \begin{pmatrix} \Theta & \hd{1}{\Theta} & \hd{2}{\Theta} & \cdots& \hd{k}{\Theta} \\ 0 & \Theta & \hd{1}{\Theta} & \ddots & \vdots \\ \vdots &\ddots & \ddots & \ddots & \hd{2}{\Theta}\\ \vdots & & \ddots & \Theta & \hd{1}{\Theta} \\ 0 & \cdots & \cdots & 0 & \Theta \end{pmatrix} = \vect{De}\cdot \rho_{[k]}(\Theta), \] where we use the homomorphism $\rho_{[k]}$ defined in Equation \eqref{eq:rho_n-matrix}. \begin{thm}\label{thm:pure-and-r-a-t} Let ${\mathsf{M}}$ be an abelian $t$-motive, $k\geq 0$ and $\rho_k{\mathsf{M}}$ the $k$-th prolongation of ${\mathsf{M}}$. \begin{enumerate} \item If ${\mathsf{M}}$ is rigid analytically trivial, then $\rho_k{\mathsf{M}}$ is rigid analytically trivial. \item If ${\mathsf{M}}$ is pure of weight $w$, then $\rho_k{\mathsf{M}}$ is pure of weight $w$. \end{enumerate} \end{thm} \begin{proof} Let ${\mathsf{M}}$ be given with respect to a basis $\vect{e}=(e_1,\ldots, e_r)$ by the $\tau$-action \[ \tau (\vect{e})= \vect{e}\cdot \Theta\] for some $\Theta\in \Mat_{r\times r}(K[t])$. Assume that ${\mathsf{M}}$ is rigid analytically trivial, and that $\Upsilon\in \GL_r(\TT)$ is a rigid analytic trivialization of ${\mathsf{M}}$, i.e.~$\Upsilon$ satisfies the difference equation \[ \Upsilon = \Theta \Upsilon^\tau .\] Since twisting commutes with $\rho_{[k]}$ and $\rho_{[k]}$ is a ring homomorphism, we have \[ \rho_{[k]}( \Theta)\left(\rho_{[k]}(\Upsilon )\right)^\tau=\rho_{[k]}( \Theta \Upsilon^\tau ) = \rho_{[k]}(\Upsilon ). \] Since the $\tau$-action on $\rho_k{\mathsf{M}}$ with respect to $\vect{De}$ from above is given by $\tau(\vect{De}) = \vect{De}\cdot \rho_{[k]}(\Theta)$, this just means that $ \rho_{[k]}(\Upsilon )\in \GL_{r(k+1)}(\TT)$ is a rigid analytic trivialization of $\rho_k{\mathsf{M}}$. Assume that ${\mathsf{M}}$ is pure of weight $w$, and let $H$ be a $K\ps{1/t}$-lattice inside ${\mathsf{M}}\otimes_{K[t]} K\ls{1/t}$ such that \[ t^u H=K\ps{1/t}\cdot \tau^vH \] for appropriate $u,v\geq 1$. After choosing a $K\ps{1/t}$-basis $\vect{b}=(b_1,\ldots, b_r)$ of $H$, we have \[ \tau^v(\vect{b})=\vect{b}\cdot t^uA \] for some $A\in \GL_r(K\ps{1/t})$. By the explicit description of the $\tau$-action on $\rho_k{\mathsf{M}}$, we therefore get \begin{eqnarray*} \tau^v(\vect{Db})&=&\vect{Db}\cdot \rho_{[k]}(t^uA)\\ &=& \vect{Db}\cdot t^u \begin{pmatrix} A & t^{-u}\hd{1}{t^uA} & t^{-u}\hd{2}{t^uA} & \cdots& t^{-u}\hd{k}{t^uA} \\ 0 & A & t^{-u}\hd{1}{t^uA} & \ddots & \vdots \\ \vdots &\ddots & \ddots & \ddots & t^{-u}\hd{2}{t^uA}\\ \vdots & & \ddots & A & t^{-u}\hd{1}{t^uA} \\ 0 & \cdots & \cdots & 0 & A \end{pmatrix} . \end{eqnarray*} For Laurent series $f=\sum_{j=j_0}^\infty x_j t^{-j}$ in $1/t$ we have \[ \hd{n}{f} =\sum_{j=j_0}^\infty \binom{-j}{n} x_j t^{-j-n}. \] In particular, for any power series $f=\sum_{j=0}^\infty x_j t^{-j}\in K\ps{1/t}$ and $u\in \ZZ$, \begin{eqnarray*} t^{-u}\cdot \hd{n}{t^uf} &=& t^{-u}\cdot \hd{n}{\sum_{j=0}^\infty x_j t^{-j+u}} = t^{-u}\cdot \sum_{j=0}^\infty \binom{-j+u}{n} x_j t^{-j+u-n}\\ &=& \sum_{j=0}^\infty \binom{-j+u}{n} x_j t^{-j-n}\in t^{-n}K\ps{1/t} \subseteq K\ps{1/t}. \end{eqnarray*} Hence, the block upper triangular matrix above has entries in $ K\ps{1/t} $, and is moreover invertible over $ K\ps{1/t}$, as $A$ is invertible. Hence, by choosing $\rho_kH$ to be the $K\ps{1/t}$-lattice inside $\rho_k{\mathsf{M}}\otimes_{K[t]} K\ls{1/t}$ generated by $\vect{Db}$ we obtain \[ K\ps{1/t}\cdot \tau^v(\rho_kH)= t^u\rho_kH. \] Hence, $\rho_k{\mathsf{M}}$ is pure of weight $\frac{u}{v}=w$. \end{proof} \begin{rem} Starting with a Drinfeld module, the associated $t$-motive is abelian, pure and rigid analytically trivial. Hence, by taking its prolongations we obtain new abelian, pure and rigid analytically trivial $t$-motives of arbitrary dimension. \end{rem} \section{Prolongations of dual $t$-motives}\label{sec:prolongations-of-dual-t-motives} Since we will use the dual $t$-motives in the proof in Section \ref{sec:algebraic-independence}, we review the construction and explicit descriptions in this case. For the definition of a prolongation of a dual $t$-motive ${\mathfrak{M}}$ we just transfer the definition for the $t$-motives above. \begin{defn}\label{def:prolongation-dual-motive} For a dual $t$-motive ${\mathfrak{M}}$ over $\bar{K}[t]$ and $k\geq 0$, the {\bf $k$-th prolongation} of ${\mathfrak{M}}$ is the $\bar{K}[t]$-module $\rho_k{\mathfrak{M}}$ which is generated by symbols $D_im$, for $i=0,\ldots, k$, $m\in {\mathfrak{M}}$, subject to the relations \begin{enumerate} \item $D_i(m_1+m_2)=D_im_1+D_im_2$, \item $D_i(a\cdot m)=\sum_{i_1+i_2=i} \hd{i_1}{a}\cdot D_{i_2}m$, \end{enumerate} for all $m,m_1,m_2\in {\mathfrak{M}}$, $a\in \bar{K}[t]$ and $i=0,\ldots, k$. The semi-linear $\sigma$-action on $\rho_k{\mathsf{M}}$ is given by \[ \sigma( a\cdot D_im)=a^{\sigma}\cdot D_i(\sigma(m)). \] for $a\in \bar{K}[t]$, $m\in {\mathfrak{M}}$. \end{defn} We obtain similar explicit descriptions as for abelian $t$-motives. \begin{prop} Let ${\mathfrak{M}}$ be a $t$-finite dual $t$-motive with $\bar{K}[t]$-basis $\vect{e}=(e_1,\ldots, e_r)$ and $\tilde{\Theta}\in \Mat_{r\times r}(\bar{K}[t])$ the matrix such that \[ \sigma(\vect{e})=\vect{e}\cdot \tilde{\Theta}. \] Then $\vect{De}=(D_0e_1,\ldots, D_0e_r, D_1e_1,\ldots, D_1e_r,\ldots,\ldots, D_ke_1,\ldots, D_ke_r)$ is a $\bar{K}[t]$-basis of $\rho_k{\mathfrak{M}}$ and \[ \sigma(\vect{De}) =\vect{De}\cdot \rho_{[k]}(\tilde{\Theta}). \] If ${\mathfrak{M}}$ is rigid analytically trivial with rigid analytic trivialization $\Psi$, i.e.~ $\Psi^\sigma = \Psi\cdot \tilde{\Theta}$, then $\rho_k{\mathfrak{M}}$ is rigid analytically trivial and $\rho_{[k]}(\Psi)$ is a rigid analytic trivialization with respect to $\vect{De}$. \end{prop} \begin{proof} The proof is along the same lines as for $t$-motives. \end{proof} \section{Prolongations of $t$-modules}\label{sec:prolongations-of-t-modules} \begin{defn} Let $(E,\Phi)$ be a $t$-module, and $\mathsf{E}$ the corresponding $t$-motive. Then we define the $k$-th prolongation $(\rho_kE,\rho_k\Phi)$ of $(E,\Phi)$ to be the $t$-module associated to $\rho_k\mathsf{E}$. \end{defn} \begin{thm} Let $(E,\Phi)$ be a $t$-module of dimension $d$, and \[ \Phi_t=A_0+A_1\tau+\ldots +A_s\tau^s \in \Mat_{d\times d}(K\{\tau\}) \] with repect to some isomorphism $E\cong \GG_a^d$. Then the $k$-th prolongation $(\rho_kE,\rho_k\Phi)$ of $(E,\Phi)$ is of dimension $d(k+1)$ and $(\rho_k\Phi)_t$ is given in block diagonal form as \[ (\rho_k\Phi)_t= \begin{pmatrix} A_0 & 0 & \cdots &\cdots & 0 \\ -\mathds{1}_d & \ddots & \ddots && \vdots \\ 0 & \ddots & \ddots & \ddots& \vdots \\ \vdots & \ddots & \ddots & \ddots& 0\\ 0&\cdots &0 & -\mathds{1}_d & A_0 \end{pmatrix}+ \diag(A_1)\tau+\ldots + \diag(A_s)\tau^s, \] where $\mathds{1}_d$ is the $(d\times d)$-identity matrix, and $\diag(A_i)$ is the block diagonal matrix with diagonal entries all equal to $A_i$ for $i=1,\ldots, s$. \end{thm} \begin{proof} Let $\vect{e}=(e_1,\ldots,e_d)$ be the basis of $E$ corresponding to the isomorphism $E\cong \GG_a^d$, and hence the $t$-action is given by \[ t(\vect{e})=\vect{e}\cdot \Phi_t. \] Then a $K\{\tau\}$-basis for the $t$-motive $\mathsf{E}$ is given by the dual basis $\vect{e^\vee}=(e_1^\vee,\ldots,e_d^\vee)$ and the $t$-action on $\mathsf{E}$ is given by \[ t(\vect{e^\vee})=\vect{e^\vee}\cdot \transp{\Phi_t}. \] By Lemma \ref{lem:k-tau-basis-of-prolongation}, a $K\{\tau\}$-basis of $\rho_k\mathsf{E}$ is given by \[ \vect{De^\vee}=(D_0e_1^\vee,\ldots, D_0e_d^\vee, D_1e_1^\vee,\ldots, D_1e_d^\vee,\ldots,\ldots, D_ke_1^\vee,\ldots, D_ke_d^\vee),\] and we have \[ t(D_ie_j^\vee)= D_i(te_j^\vee) - D_{i-1}e_j^\vee \] for $i=0,\ldots,k$ and $j=1,\ldots, d$, where we set $D_{-1}e_j^\vee=0$. In block matrix notation this is just \[ t(\vect{De^\vee})=\vect{De^\vee}\cdot \begin{pmatrix} \transp{\Phi_t} & -\mathds{1}_d & 0 & \cdots & 0 \\ 0 & \transp{\Phi_t} & \ddots & \ddots & \vdots \\ \vdots & \ddots& \ddots & \ddots & 0 \\ \vdots & & \ddots& \ddots & -\mathds{1}_d\\ 0 & \cdots& \cdots & 0& \transp{\Phi_t} \end{pmatrix}. \] This finally shows that $\rho_kE$ is isomorphic to $\GG_a^{d(k+1)}$ with basis $\vect{De}$, the dual basis of $\vect{De^\vee}$, and the $t$-action is given by \[ t(\vect{De})= \vect{De}\cdot \begin{pmatrix} \Phi_t & 0 & \cdots& \cdots & 0 \\ -\mathds{1}_d & \Phi_t & \ddots & & \vdots \\ 0 & \ddots & \ddots & \ddots &\vdots \\ \vdots & \ddots& \ddots & \ddots & 0 \\ 0 & \cdots& 0 & -\mathds{1}_d & \Phi_t \end{pmatrix}. \] Hence, \[ (\rho_k\Phi)_t= \begin{pmatrix} A_0 & 0 & \cdots &\cdots & 0 \\ -\mathds{1}_d & \ddots & \ddots && \vdots \\ 0 & \ddots & \ddots & \ddots& \vdots \\ \vdots & \ddots & \ddots & \ddots& 0\\ 0&\cdots &0 & -\mathds{1}_d & A_0 \end{pmatrix}+ \diag(A_1)\tau+\ldots + \diag(A_s)\tau^s. \] \end{proof} \section{Prolongations of tensor powers of the Carlitz motive}\label{sec:carlitz-case} In this section, we apply the constructions of prolongations to the tensor powers of the Carlitz module, the Carlitz motive, as well as the dual Carlitz motive. Let us first recall the (dual) Carlitz motive and its tensor powers. The Carlitz module $(C,\phi)$ is given by $C\cong \GG_a$ and \[ \phi:A\to \End(\GG_{a,K})=K\{\tau\}, f\mapsto \phi_f \] given by $\phi_t=\theta+\tau$. The Carlitz motive $\textsf{C}=\Hom_K(C,\GG_a)\cong K\{\tau\}$ is also free of rank $1$ as $K[t]$-module, and with respect to the basis element $e=1\in K\{\tau\}\cong \textsf{C}$ the $\tau$-action is given by $\tau(e)=e\cdot (t-\theta).$ The $n$-th tensor power of the Carlitz motive $\textsf{C}$ is the $K[t]$-module \[ \textsf{C}^{\otimes n}=\underbrace{\textsf{C}\otimes_{K[t]}\ldots \otimes_{K[t]}\textsf{C}}_{n-\text{times}} \] with diagonal $\tau$-action. I.e.~on the canonical basis element $e_{\otimes n}$, we have \[ \tau(e_{\otimes n})=e_{\otimes n}\cdot (t-\theta)^n. \] Let $\omega\in \TT$ be the Anderson-Thakur function. Then a rigid analytic trivialization for $\textsf{C}$ is given by $\frac{1}{\omega}$, since $\omega$ satisfies the difference equation $\omega^\tau=(t-\theta)\omega$. Hence, a rigid analytic trivialization for $\textsf{C}^{\otimes n}$ is given by $\omega^{-n}$. \bigskip The dual Carlitz motive $\mathfrak{C}$ is the $\bar{K}[t]$-module of rank $1$ with $\sigma$-action given by \[ \sigma(e)=e\cdot (t-\theta), \] with respect to some basis element $e\in \mathfrak{C}$, and its $n$-th tensor power $\mathfrak{C}^{\otimes n}$ has $\sigma$-action given by \[ \sigma(e_{\otimes n})=e_{\otimes n}\cdot (t-\theta)^n. \] The entire function $\Omega(t):=\frac{1}{(t-\theta)\omega(t)}$ is a rigid analytic trivialization of the Carlitz dual $t$-motive $\mathfrak{C}$, since \[ \Omega^\sigma=\left( \frac{1}{\omega^\tau}\right)^\sigma= \frac{1}{\omega} = (t-\theta)\Omega. \] Therefore, $\Omega(t)^n$ is a rigid analytic trivialization for the $n$-th tensor power $\mathfrak{C}^{\otimes n}$. \begin{prop}\label{prop:prolong-tensor-power} The $k$-th prolongation of the motive $\textsf{C}^{\otimes n}$ is the $K[t]$-module $\rho_k(\textsf{C}^{\otimes n}):=K[t]^{k+1}$ with $\tau$-action given by \[ \tau\begin{pmatrix} f_0\\ f_1\\ \vdots \\ f_k \end{pmatrix} = \begin{pmatrix} t-\theta & 1 & 0 & \cdots & 0 \\ 0 & t-\theta & \ddots & \ddots & \vdots \\ \vdots & \ddots& \ddots & \ddots & 0 \\ \vdots & & \ddots& \ddots & 1\\ 0 & \cdots& \cdots & 0& t-\theta \end{pmatrix}^n \cdot \begin{pmatrix} f_0^\tau\\ f_1^\tau\\ \vdots \\ f_k^\tau \end{pmatrix} .\] Its rigid analytic trivialization is given by \[ \Upsilon = \rho_{[k]}(\omega^{-n})= \begin{pmatrix} \omega & \hd{1}{\omega} & \cdots & \hd{k}{\omega} \\ 0 & \omega & \ddots & \vdots \\ \vdots & \ddots & \ddots & \hd{1}{\omega} \\ 0 & \cdots & 0 & \omega \end{pmatrix}^{-n} . \] \end{prop} \begin{proof} This follows from the general description in Section \ref{sec:prolongations-of-t-motives}. One just has to recognize that $\rho_{[k]}(t-\theta)$ is just the matrix \[ \begin{pmatrix} t-\theta & 1 & 0 & \cdots & 0 \\ 0 & t-\theta & \ddots & \ddots & \vdots \\ \vdots & \ddots& \ddots & \ddots & 0 \\ \vdots & & \ddots& \ddots & 1\\ 0 & \cdots& \cdots & 0& t-\theta \end{pmatrix}. \] \end{proof} \begin{prop}\label{prop:prolong-dual-tensor-power} The $k$-th prolongation of the dual motive $\mathfrak{C}^{\otimes n}$ is the $\bar{K}[t]$-module $\rho_k(\mathfrak{C}^{\otimes n}):=\bar{K}[t]^{k+1}$ with $\sigma$-action given by \[ \sigma\begin{pmatrix} f_0\\ f_1\\ \vdots \\ f_k \end{pmatrix} = \begin{pmatrix} t-\theta & 1 & 0 & \cdots & 0 \\ 0 & t-\theta & \ddots & \ddots & \vdots \\ \vdots & \ddots& \ddots & \ddots & 0 \\ \vdots & & \ddots& \ddots & 1\\ 0 & \cdots& \cdots & 0& t-\theta \end{pmatrix}^n \cdot \begin{pmatrix} f_0^\sigma\\ f_1^\sigma\\ \vdots \\ f_k^\sigma \end{pmatrix} .\] Its rigid analytic trivialization is given by \[ \Psi = \rho_{[k]}(\Omega^{n}) = \begin{pmatrix} \Omega^n & \hd{1}{\Omega^n} & \hd{2}{\Omega^n} & \cdots&\hd{k}{\Omega^n} \\ 0 & \Omega^n & \hd{1}{\Omega^n} & \ddots & \vdots \\ \vdots &\ddots & \ddots & \ddots & \hd{2}{\Omega^n}\\ \vdots & & \ddots & \Omega^n & \hd{1}{\Omega^n} \\ 0 & \cdots & \cdots & 0 & \Omega^n \end{pmatrix}. \] \end{prop} For the description of the corresponding $t$-modules we restrict to the prolongations of the Carlitz module, and let the descriptions for the tensor powers as an exercise for the reader. \begin{prop} The $k$-th prolongation $(\rho_kC,\rho_k\phi)$ of the Carlitz module is the $t$-module of dimension $k+1$ with \[ \rho_k\phi:\FF_q[t]\to \Mat_{(k+1)\times(k+1)}(K)\{\tau\} \] given by \[ ( \rho_k\phi)_t = \begin{pmatrix} \theta & 0 & \cdots &\cdots & 0 \\ -1 &\theta & \ddots && \vdots \\ 0 & \ddots & \ddots & \ddots& \vdots \\ \vdots & \ddots & \ddots & \ddots& 0\\ 0&\cdots &0 & -1 & \theta \end{pmatrix}+ \mathds{1}_{k+1}\cdot \tau. \] \end{prop} \begin{proof} This follows from the general description in Section \ref{sec:prolongations-of-t-modules}. \end{proof} \section{Hypertranscendence of the Anderson-Thakur function}\label{sec:omega-hypertranscendental} In this section, we show that the Anderson-Thakur function $\omega$ is hypertranscendental, i.e.~that $\omega$ and all its hyperderivatives $\hd{n}{\omega}$ ($n>0$) are algebraically independent over the field $\bar{K}(t)$. This fact is also given by F.~Pellarin in \cite[Prop.~27]{fp:vscce} by different methods. We first recall a fact about the evaluations of the Anderson-Thakur function $\omega$ and its hyperderivatives at roots of unity given in \cite{ba-fp:ugtsls} and \cite{am-rp:iddbcppte}. The evaluation of $\hd{n}{\omega}$ at $t=\zeta$ will be shortly denoted by $\hd{n}{\omega}(\zeta)$. Moreover, in this section, $K$ will denote the field $\FF_q(\theta)$. \begin{thm} Let $\zeta\in \bar{\FF}_q$, let $\pfrak\in \FF_q[t]$ be the minimal polynomial of $\zeta$, and let $d=\deg(\pfrak)$ be its degree. For $n\geq 0$, the Carlitz $\pfrak^{n+1}$-torsion extension of $K(\zeta)$ is generated by $\hd{n}{\omega}(\zeta)$, i.e.~ \[ K(\zeta)(C[\pfrak^{n+1}])=K(\zeta,\hd{n}{\omega}(\zeta)). \] The minimal polynomial of $\omega(\zeta)$ over $K(\zeta)$ is given by \[ X^{q^d-1}- \beta(\zeta) \in K(\zeta)[X], \] where $\beta(t)=\prod_{h=0}^{d-1} (t-\theta^{q^h})\in K[t]\subseteq \TT$. For $n\geq 1$, the minimal polynomial of $\hd{n}{\omega}(\zeta)$ over $K(\zeta)(C[\pfrak^{n}])$ is given by \[ X^{q^d}-\beta(\zeta)X-\xi_n(\zeta)\in K(\zeta)(C[\pfrak^{n}])[X], \] where \[ \xi_n(t)= \sum_{l=1}^n \hd{l}{\beta}\cdot \hd{n-l}{\omega}\in \TT. \] \end{thm} \begin{proof} The first part is shown in \cite[Thm.~3.3]{ba-fp:ugtsls} where also the minimal polynomials occur. The minimality of these polynomials, however, is shown in \cite[Thm.~3.8 \& Rem.~3.9]{am-rp:iddbcppte}. \end{proof} \begin{thm}\label{thm:hypertranscendence} The Anderson-Thakur function $\omega(t)$ is hypertranscendental over $\bar{K}(t)$, i.e.~the set $\{\hd{n}{\omega} \mid n\geq 0\}$ is algebraically independent over $\bar{K}(t)$. \end{thm} \begin{proof} Since $\bar{K}(t)$ is algebraic over $K(t)$, it suffices to show algebraic independence over $K(t)$. Now, assume for the contrary, that $\omega$ and its hyperderivatives satisfy some algebraic relation. Choose $n$ minimal such that $\omega, \hd{1}{\omega},\ldots, \hd{n}{\omega}$ are algebraically dependent, and choose a polynomial $0\ne F(X_0,\ldots, X_n)\in K(t)[X_0,\ldots,X_n]$ such that $F(\omega, \hd{1}{\omega},\ldots, \hd{n}{\omega})=0$. Write $F=\sum_{j=0}^k f_jX_n^j$ with $f_j\in K(t)[X_0,\ldots,X_{n-1}]$ and $f_k\ne 0$. After rescaling we can even assume that the coefficients of the $f_j$ are polynomials in $t$, i.e.~$f_j\in K[t][X_0,\ldots,X_{n-1}]$.\\ As we have chosen $n$ to be minimal, and as $f_k\ne 0$, we also have \[ f_k(\omega,\hd{1}{\omega},\ldots,\hd{n-1}{\omega})\ne 0\in \TT. \] Since every nonzero element of $\TT$ has only finitely many zeros in the closed unit disc, for almost all $\zeta\in \bar{\FF}_q^{\,\times}$ we have: $f_k(\omega, \hd{1}{\omega},\ldots, \hd{n-1}{\omega})|_{t=\zeta}\ne 0\in \CC_\infty$. Hence, for such $\zeta$, $\hd{n}{\omega}(\zeta)$ is a root of the nonzero polynomial \[ \sum_{j=0}^kf_j(\omega, \hd{1}{\omega},\ldots,\hd{n-1}{\omega})|_{t=\zeta}X_n^j \in \CC_\infty[X_n] \] of degree $k$. By construction, the coefficients lie in $K(C[\pfrak^n])(\zeta)$ where $\pfrak\in \FF_q[t]$ is the minimal polynomial of $\zeta$ over $ \FF_q$. By the theorem above, the minimal polynomial of $\hd{n}{\omega}(\zeta)$ over $K(C[\pfrak^n])(\zeta)$ has degree $q^{\deg(\pfrak)}=\# \FF_q(\zeta)$ (resp.~degree $q^{\deg(\pfrak)}-1$ if $n=0$). Therefore, if we choose $\zeta$ such that $\# \FF_q(\zeta)-1 >k$, this leads to a contradiction. \end{proof} \section{Algebraic independence of periods}\label{sec:algebraic-independence} In this section, we prove our main theorem on the algebraic independence of the periods. \begin{thm}\label{thm:algebraic-independence} Let $n\in\NN$ be prime to $q$, let $C^{\otimes n}$ be the $n$-th tensor power of the Carlitz module and let \[ \begin{pmatrix} z_1 \\ \vdots \\ z_n \end{pmatrix} \in \CC_\infty^n\] be a generator for the period lattice. Then $z_1,z_2,\ldots, z_n$ are algebraically independent over $\bar{K}$. \end{thm} \begin{rem} As already noted in \cite{ga-dt:tpcmzv}, if $n$ is a power of the characteristic $p=\ch(\FF_q)$, then all but the last coordinate are $0$. We will make a precise statement in the case that $p$ divides $n$ at the end of this section. \end{rem} For proving the theorem, we first give a formula for these coordinates using evaluations of hyperderivatives. \begin{lem}\label{lem:description-of-coordinates} Let the generator above be chosen such that $z_n=\tilde{\pi}^n$. Then the coordinates $z_1,z_2,\ldots, z_n$ fulfill the equalities \[ z_i= (-1)^n \hd{n-i}{\phantom{\Big( \!\!\!} (t-\theta)^n\omega(t)^{n}}|_{t=\theta}, \] i.e.~$z_i$ is the $(n-i)$-th hyperderivative of the function $(\theta-t)^n\omega(t)^{n}$ evaluated at $t=\theta$. \end{lem} \begin{proof} As $\omega$ has a pole of order $1$ at $t=\theta$, $\omega^n$ has a pole of order $n$. Building on work of Anderson and Thakur \cite[\S 2.5]{ga-dt:tpcmzv}, we write $\omega^n$ as a Laurent series in $(t-\theta)$, \[ \omega^n=\sum_{j=-n}^\infty c_j (t-\theta)^j\in \CC_\infty\ls{t-\theta}. \] Then the coordinates are explicitly given by \[ z_i= (-1)^n c_{-i} \] for $i=1,\ldots, n$ (see \cite[Cor.~2.5.8]{ga-dt:tpcmzv}, and be aware that $\bar{\pi}$ ibid. equals $-\pitilde$). On the other hand, for any $0\leq k\leq n$: \begin{eqnarray*} \hd{k}{\phantom{\Big( \!\!\!} (t-\theta)^n\omega(t)^{n}} &=& \hd{k}{ \sum_{j=-n}^\infty c_j (t-\theta)^{j+n}} \\ &=& \sum_{j=k-n}^\infty c_j \binom{j+n}{k} (t-\theta)^{j+n-k}. \end{eqnarray*} Hence for $i=1,\ldots, n$: \begin{eqnarray*} (-1)^n \hd{n-i}{\phantom{\Big( \!\!\!} (t-\theta)^n\omega(t)^{n}}|_{t=\theta} &=& (-1)^n \sum_{j=-i}^\infty c_j \binom{j+n}{n-i} (t-\theta)^{j+i}|_{t=\theta} \\ &=& (-1)^n c_{-i} =z_i. \end{eqnarray* \end{proof} A second ingredient is a relation between hyperderivatives of functions and hyperderivatives of powers of that function. \begin{lem}\label{lem:finite-extension} Let $0\neq f\in \laurent$, $k\geq 0$ and let $E$ be the field extension of $K(t)$ generated by the entries of $\rho_{[k]}(f)$, i.e.~generated by $f,\hd{1}{f},\ldots, \hd{k}{f}$. For $n\in \NN$ prime to $q$, let $F$ be the field extension of $K(t)$ generated by the entries of $\rho_{[k]}(f^n)$, i.e.~generated by $f^n,\hd{1}{f^n},\ldots, \hd{k}{f^n}$. Then $E$ is generated over $F$ by $f$, and in particular, $E$ is finite algebraic over $F$. \end{lem} \begin{proof} We only have to show that $\hd{j}{f}\in F(f)$ for $1\leq j\leq k$. Let $p=\ch(\FF_q)$, and $s\in \NN$ such that $p^s>k$. Then for all $j$ not divisible by $p^s$, one has $\hd{j}{f^{p^s}}=0$, since \[ \mathcal{D}(f^{p^s})=\mathcal{D}(f)^{p^s} \] is a power series in $X^{p^s}$. In particular, we have $\hd{j}{f^{p^s}}=0$ for all $1\leq j\leq k$, and therefore $\rho_{[k]}(f^{p^s})$ is the scalar matrix with diagonal entries equal to $f^{p^s}$. As $n$ was prime to $q$, and hence prime to $p$, there are $a,b\in \ZZ$ such that $ap^s+bn=1$. Therefore, \[ \rho_{[k]}(f)=\rho_{[k]}(f^{ap^s+bn}) = \rho_{[k]}(f^{p^s})^a\cdot \rho_{[k]}(f^n)^b =(f^{p^s})^a\cdot \rho_{[k]}(f^n)^b \] has entries in $F(f)$. \end{proof} \begin{proof}[Proof of Thm.~\ref{thm:algebraic-independence}] By Prop.~\ref{prop:prolong-dual-tensor-power}, a rigid analytic trivialization of $\rho_{n-1}(\mathfrak{C}^{\otimes n})$, the $(n-1)$-th prolongation of $\mathfrak{C}^{\otimes n}$, is given by the matrix \[ \rho_{[n-1]}(\Omega^n)= \begin{pmatrix} \Omega^n & \hd{1}{\Omega^n} & \hd{2}{\Omega^n} & \cdots&\hd{n-1}{\Omega^n} \\ 0 & \Omega^n & \hd{1}{\Omega^n} & \ddots & \vdots \\ \vdots &\ddots & \ddots & \ddots & \hd{2}{\Omega^n}\\ \vdots & & \ddots & \Omega^n & \hd{1}{\Omega^n} \\ 0 & \cdots & \cdots & 0 & \Omega^n \end{pmatrix}. \] Let $F$ be the field generated by the entries of $\rho_{[n-1]}(\Omega^n)$ over $\bar{K}(t)$. As, \[ \rho_{[n-1]}(\Omega^n)= \rho_{[n-1]}\left( (t-\theta)^{-n}\omega(t)^{-n}\right) = \left( \rho_{[n-1]} (t-\theta)\right)^{-n} \cdot \left(\rho_{[n-1]}(\omega^n)\right)^{-1} \] and $ \rho_{[n-1]} (t-\theta)\in \GL_n(K(t))$, the field $F$ is also generated over $\bar{K}(t)$ by the entries of $\rho_{[n-1]}(\omega^{n})$, and in particular is a subfield of finite index of the field generated by the entries of $\rho_{[n-1]}(\omega)$, as shown in Lemma \ref{lem:finite-extension}. Since $\omega$ is hypertranscendental (see Theorem \ref{thm:hypertranscendence}), the latter has transcendence degree over $\bar{K}(t)$ equal to $n$. Hence, the field $F$ has transcendence degree $n$ over $\bar{K}(t)$. Let $L$ be the field extension of $\bar{K}$ generated by the entries of $\rho_{[n-1]}(\Omega^n)|_{t=\theta}$. Then by the proof of \cite[Thm. 5.2.2]{mp:tdadmaicl} (see Thm.~\ref{thm:conseq-of-abp}), the transcendence degree of $L/\bar{K}$ is the same as the transcendence degree of $F/\bar{K}(t)$, i.e.~equals $n$. On the other hand, $L$ is also generated as a field by the entries of the inverse of $\rho_{[n-1]}(\Omega^n)|_{t=\theta}$, and using Lemma \ref{lem:description-of-coordinates}, we get \begin{eqnarray*} \left(\rho_{[n-1]}(\Omega^n)|_{t=\theta}\right)^{-1} &=& \left( \rho_{[n-1]}(\Omega^n)^{-1}\right)|_{t=\theta} =\rho_{[n-1]}(\Omega^{-n})|_{t=\theta} \\ &=& \rho_{[n-1]}((t-\theta)^n\omega^n)|_{t=\theta}\\ &=& (-1)^n \cdot\begin{pmatrix} z_n & z_{n-1} & z_{n-2} & \cdots & z_1 \\ 0 & z_n & z_{n-1} & \ddots & \vdots \\ \vdots &\ddots & \ddots & \ddots & z_{n-2}\\ \vdots & & \ddots & z_n & z_{n-1} \\ 0 & \cdots & \cdots & 0 & z_n \end{pmatrix}. \end{eqnarray*} Hence, $z_1,\ldots, z_n$ are algebraically independent over $\bar{K}$. \end{proof} In the case that the characteristic $p$ divides $n$, we can also make a precise statement on the algebraic independence. \begin{cor} Let $n\in\NN$ be arbitrary, let $C^{\otimes n}$ be the $n$-th tensor power of the Carlitz module and let \[ \begin{pmatrix} z_1 \\ \vdots \\ z_n \end{pmatrix} \in \CC_\infty^n\] be the generator for the period lattice with $z_n=\pitilde^n$. If $p^s$ is the exact power of $p$ dividing $n$, then $z_i\neq 0$ precisely, when $p^s$ divides $i$, and all nonzero coordinates are algebraically independent over $\bar{K}$. \end{cor} \begin{proof} The hyperdifferential operators on $\laurent$ satisfy \[ \hd{i}{f^{p^s}}=\left\{ \begin{array}{cl} 0 & \text{if }p^s\text{ does not divide } i\\ \left(\hd{i/p^s}{f}\right)^{p^s} & \text{if }p^s\text{ divides }i, \end{array}\right. \] for all $f\in \laurent$, as one readily sees by using the homomorphism $\mathcal{D}$. Applying this to $f=\Omega^{n/p^s}$, we see that the nonzero entries in $\rho_{[n-1]}(\Omega^n)$ are the $\hd{i}{\Omega^n}$ with $p^s$ divides $i$ and those are equal to $\left(\hd{i/p^s}{\Omega^{n/p^s}}\right)^{p^s}$. By specializing the inverse of $\rho_{[n-1]}(\Omega^n)$ to $t=\theta$ as in the proof of Theorem \ref{thm:algebraic-independence}, we see that the coordinates $z_i$ where $p^s$ does not divide $i$ are equal to zero, and that the other coordinates are just the $p^s$-powers of the coordinates of a period lattice generator for the $n/p^s$-th tensor power of the Carlitz module. Hence, by Theorem \ref{thm:algebraic-independence}, they are algebraically independent over $\bar{K}$. \end{proof} \bibliographystyle{plain} \def$'${$'$}
1,108,101,565,829
arxiv
\section{Introduction} Mobile edge computing (MEC) represents a promising technology to reduce the latency for 5G and beyond networks \cite{ Y_Mao, Zeng_VTM20}. It allows users to offload their computational intensive task to servers in close proximity, and thus, significantly enhancing their computation capacities and prolonging their lifespan. The whole process in general consists of the following three sequential phases \cite{ A_WCL5}: 1) uplink data transmission; 2) task processing at the MEC server; and 3) results feedback through the downlink. The uplink and downlink transmission phases involve the allocation of wireless resources, while the task processing phase concerns the computational resources. As a result, a joint allocation of wireless and computational resources is required to deliver a decent system performance\cite{BarbarossaSPM14}. Initially, MEC was applied to single-user systems, where the optimization variables include the transmission power, the offloading decision and ratio as well as the central processing unit (CPU) frequency for computing \cite{W_zhang_TWC13, Y_wang_COM16}. The single-user systems were then extended to the multi-user ones \cite{Xchen, Cyou, Ming_PIMRC}, where how to share the wireless and computational resources among the users directly affect the system performance. Recently, non-orthogonal multiple access is envisioned as a promising access technology \cite{Hao_TCOM19}, and its application to MEC has been considered, for example in \cite{Zeng_Adhoc20}. Note that the above mentioned works only apply to single-cell systems. The general scenario of multi-cell systems is attracting great attention recently \cite{Proa_INF19, Yang_Access19, Zeng_WCL2019}. Compared with single-cell systems, a new problem that emerges in multi-cell systems is how to match the users to the access points (APs). In this paper we consider a multi-AP multi-user MEC system, where each user can access multiple APs and utilize their computation resource. This generalizes the previous works \cite{Proa_INF19, Yang_Access19, Zeng_WCL2019}, where each user only offloads to one AP. Meanwhile, as in \cite{Zeng_WCL2019}, we consider the general case with flexible bandwidth allocation across both the APs and users. The system objective is to minimize the sum energy consumption under response time constraints. The formulated problem is shown to be non-convex. To handle it, we first investigate the complexity of optimization of a part of the system parameters. On this basis, we propose an iterative resource allocation procedure that converges to local optimum. To evaluate the proposed iterative solution, we compare it with the lower and upper bounds defined by less or more flexible multi-cell MEC architectures. Presented results validate the necessity of free selection of APs. Meanwhile, binary allocation, where all users select the AP with the highest share of their load provides a performance close to parallel processing. This facilitates its application to large systems, where the level of parallel processing is low. \section{System Model} \label{systemmodel} We consider a multi-cell MEC system that consists of $K$ users, and $M$ APs, each equipped with a MEC server. We denote the set of users by $\mathcal{K} = \{1, \cdots, K\}$, and APs by $\mathcal{M} = \{1, \cdots, M\}$. We consider that each user $i \in \mathcal{K}$ generates a computationally intensive {\color{black} and delay sensitive} task, which is characterized by {\color{black} three} parameters, the size $L_i$ of the input data, the number $W_i$ of CPU cycles required to perform the computation, and the completion time constraint $D_i$. To save energy consumption at the user for processing the tasks, and satisfy the delay constraint, {\color{black}each user offloads its computing task to one or multiple APs for processing. That is, each user $i \in \mathcal{K}$ offloads part of its input data, i.e., $L_{i,j}$ to AP $j, j \in \mathcal{M}$, satisfying $L_{i,j}\geq 0$ and $\sum_{j=1}^M L_{i,j}=L_i$.} For simplicity we assume that this parallel processing has no communication or computation overhead \cite{Ref_27}. The objective of the considered MEC system is to minimize the energy consumption for data transmission under the delay constraint, by jointly allocating the data to be sent to the APs, the wireless and the computing resources. \subsection{Wireless resource management} The overall system bandwidth is $B$ Hz, which should be appropriately shared among the users. We consider flat fading channel and orthogonal access with frequency division multiple access. Denote the corresponding channel gain for user $i$ to AP $j$ by $h_{i,j}$. Then, the achievable data rate at user $i$ to AP $j$ is given by {\color{black} \begin{equation} \label{rate} R_{i,j}=x_{i,j} \log_2 \left( 1+\frac{P_{i,j} h_{i,j}}{x_{i,j} N_0} \right), \end{equation} where $P_{i,j}$ is the corresponding transmission power, while $x_{i,j}$ denotes the allocated bandwidth, satisfying $\sum_{i \in \mathcal{K}} \sum_{j \in \mathcal{M}} x_{i,j} =B$. Besides, $N_0$ is the noise power spectral density coefficient. Accordingly, the transmission time and the resulting transmission energy consumption are respectively given by \begin{equation} \label{T_E} T_{i,j}=\frac{L_{i,j}}{R_{i,j}}~ {\rm{and}}~ E_{i,j}=\frac{L_{i,j} P_{i,j}}{R_{i,j}}. \end{equation} \subsection{Computing resource management} Let us denote the computational capacity of the MEC server for AP $j, j\in \mathcal{M}$ by $C_j$. We denote the computing resource allocated to user $i$ as $q_{i,j}$, satisfying $\sum_{i \in \mathcal{K}} q_{i,j} = C_j$. Then, the computational time of user $i$'s task is given by $Q_{i,j}=\frac{W_{i,j}}{q_{i,j}}$. We assume that there is a linear relation between $L_{i,j}$ and $W_{i,j}$, i.e., $W_{i,j}=\eta L_{i,j}$, where $\eta$ is the coefficient \cite{Ref_4}. Then, we have \begin{equation} \label{Q_ij} Q_{i,j}=\frac{\eta L_{i,j}}{q_{i,j}}. \end{equation} \section{Problem Formulation and General Results} We consider the problem of total transmission energy minimization, under the constraint on the completion time of the computational tasks. That is, for each user $i$, the sum of the transmission and computational times should not violate the maximum delay $D_i$, i.e., $T_{i,j}+Q_{i,j} \leq D_i, \forall j \in \mathcal{M}$. {\color{black}We disregard the time needed for the downlink transmission of the results, since it concerns usually small amounts of data \cite{L_IoT11, Latency, F_TWC9, Zeng_WoWMoM19}. Additionally, we do not consider the energy consumption of the computation at the MEC servers, since it is independent from the resource allocation (i.e., all computing needs to be performed at the MEC servers anyway, and consumes the same energy \cite{ F_TWC9}). } The delay constraint then can be turned into the following rate requirement: \begin{equation} R_{i,j} \geq \frac{L_{i,j}}{D_i - Q_{i,j}}, \forall i \in \mathcal{K}, j \in \mathcal{M}. \end{equation} The resource allocation concerns the allocation of bandwidth, power, the computing resource and the data for each user on each AP. The energy minimization problem can be formulated as \begin{subequations} \label{P1} \begin{align} \text{P1}:& ~ \underset{\mbf{P},\mbf{x},\mbf{q}, \mbf{L}}{\text{min}} \sum_{i \in \mathcal{K}} \sum_{j \in \mathcal{M}} E_{i,j} \\ \text{s.t.}~~ & ~R_{i,j} \geq \frac{L_{i,j}}{D_i - Q_{i,j}}, \forall i \in \mathcal{K}, j \in \mathcal{M} \\ & ~\sum_{i \in \mathcal{K}} \sum_{j \in \mathcal{M}} x_{i,j} =B \\ & ~ \sum_{i \in \mathcal{K}} q_{i,j} = C_j, \forall j \in \mathcal{M} \\ &~ \sum_{j=1}^M L_{i,j}=L_i, \forall i \in \mathcal{K} \end{align} \end{subequations} where $\mbf{P} \in \mathbb{R}^{K},\mbf{x} \in \mathbb{R}^{K},\mbf{q} \in \mathbb{R}^{K}, \mbf{L} \in \mathbb{R}^{K \times M}$ are the matrix of allocated powers $P_{i,j}$, bandwidth $x_{i,j}$, computational resource $q_{i,j}$ and data size $L_{i,j}$, respectively. Inequality constraints (\ref{P1}b) reflect the minimum data rate requirement for each user on each AP. Constraints (\ref{P1}c) limit the bandwidth, while (\ref{P1}d) restrict the computing resource. Constraints (\ref{P1}e) limit the data size. To solve P1, we need to jointly allocate the wireless and computing resources and data, which are coupled in a non-linear way through the delay constraint. To progress, we first state the following theorem. \begin{theorem}\label{theorem_power} Under any given bandwidth, computing resource and data size allocation $\mbf{x},\mbf{q}$, $\mbf{L}$, the energy consumption is minimized when $T_{i,j}+Q_{i,j}=D_i$, $\forall i \in \mathcal{K}, j \in \mathcal{M}$ holds and the transmission power is set as \begin{equation}\label{eq:pum_OMA} P_{i,j}=\frac{ N_0 x_{i,j}}{h_{i,j}} { \left(2^{\frac{R_{i,j}^{\min}}{x_{i,j}}}-1 \right)}, \forall i \in \mathcal{K}, j \in \mathcal{M} \end{equation} where $R_{i,j}^{\rm{min}}$ is the minimum rate that still fulfills the delay requirement, i.e., $R_{i,j}^{\rm{min}}=\frac{L_{i,j}}{D_i-Q_{i,j}}$. \end{theorem} \begin{IEEEproof} When $x_{i,j}$, $q_{i,j}$ and $L_{i,j}$ are given, the energy consumption of the users is independent, and minimizing the total energy consumption is equivalent to minimizing that of each user on each AP. Without loss of generality, we look at $E_{i,j}$, which can be reformulated as \begin{equation} \label{power_consumption} E_{i,j}= \frac{L_{i,j} P_{i,j} }{R_{i,j}}=\frac{L_{i,j} P_{i,j} }{x_{i,j} \log_2 \left( 1+ \frac{P_{i,j} h_{i,j}}{x_{i,j} N_0} \right)}. \end{equation} According to \eqref{power_consumption}, it is clear that $E_{i,j}$ increases with $P_{i,j}$. Therefore, $E_{i,j}$ is minimized when the minimum power is used. Meanwhile, to satisfy the delay constraint, we have $R_{i,j}= x_{i,j} \log_2 \left( 1+ \frac{P_{i,j} h_{i,j}}{x_{i,j} N_0} \right) \geq R_{i,j}^{\rm{min}}$, i.e., $P_{i,j} \geq {(2^{R_{i,j}^{\rm{min}}/x_{i,j}} -1) N_0 x_{i,j}}/{ h_{i,j}}$. At equality the achieved rate is $R_{i,j}^{\rm{min}}$, which in turn results a transmission time of $T_{i,j}=D_i-Q_{i,j}$. This concludes the proof. \end{IEEEproof} Based on Theorem \ref{theorem_power}, Problem P1 can be simplified as \begin{subequations} \label{P2} \begin{align} \text{P2}:& ~ \underset{\mbf{x},\mbf{q}, \mbf{L}}{\text{min}} \sum_{i \in \mathcal{K}} \sum_{j \in \mathcal{M}} \frac{ N_0 x_{i,j}}{h_{i,j}} \left(D_{i}-\frac{\eta L_{i,j}}{q_{i,j}} \right) \left(2^{\frac{L_{i,j}/x_{i,j}} {D_{i}-\frac{\eta L_{i,j}}{q_{i,j}}}}-1 \right)\\ \text{s.t.}~~ & ~\sum_{i \in \mathcal{K}} \sum_{j \in \mathcal{M}} x_{i,j} =B \\ & ~ \sum_{i \in \mathcal{K}} q_{i,j} = C_j, \forall j \in \mathcal{M} \\ &~ \sum_{j=1}^M L_{i,j}=L_i, \forall i \in \mathcal{K} \end{align} \end{subequations} This is the problem that we will evaluate in detail. \begin{theorem}\label{theorem_main_conv} Problem P2, that is, the optimal joint allocation of bandwidth $\mbf{x}$, computing resource $\mbf{q}$ and data size $\mbf{L}$ is a non-convex problem. \end{theorem} \begin{IEEEproof} P2 requires to jointly allocate bandwidth, computing resource and data size, and is difficult to handle. In the following, we show that P2 is actually non-convex. Without loss of generality, we only consider $E_{i,j}$. We first consider $E_{i,j}$ over $x_{i,j}$ and $L_{i,j}$. Denote its Hessian matrix as $\mbf{H}_{i,j}$, which is \begin{align} \mbf{H}_{i,j} = & \frac{N_0}{h_i} 2 ^{\frac{L_{i,j}/x_{i,j}}{D_{i}-\eta L_{i,j} /q_{i,j}}} \ln2^2 \\ \nonumber & \times \begin{bmatrix} \frac{L_{i,j}}{(D_{i}-\eta L_{i,j} /q_{i,j}) x_{i,j}^3} & - \frac{D_{i} L_{i,j}}{(D_{i}-\eta L_{i,j} /q_{i,j})^3 x_{i,j}^2} \\ - \frac{D_{i} L_{i,j}}{(D_{i}-\eta L_{i,j} /q_{i,j})^3 x_{i,j}^2} & \frac{D_{i}^2}{(D_{i}-\eta L_{i,j} /q_{i,j})^3 x_{i,j}} \end{bmatrix}. \end{align} It can be verified that $\rm{det}(\mbf{H}_{i,j}) \geq 0$ does not always hold. {\color{black}For example, by setting $ D_i=0.1, \eta=0.5, L_{i,j}=0.1, x_{i,j}=0.1, q_{i,j}=1, \frac{N_0}{h_i}=1 $, we obtain $\rm{det}(\mbf{H}_{i,j})=-3.1437 \times 10^{13}<0 $.} Since all leading principle minors of a convex function should be greater or equal than $0$, this indicates P2 is non-convex. \end{IEEEproof} {\color{black} \section{Complexity of Subproblems and the Joint Resource Allocation Algorithm} Problem P2 considers three decision variables, $\mbf{L},$ $\mbf{q}$ and $\mbf{x}$. In this section we provide a discussion about the complexity of the subproblems, where some of the variables are considered as given input parameters. This discussion helps us to find ways for decomposing the optimization problem into tractable subproblems. It also guides future system design, where the network, the computing resources and the application may be controlled by three different parties. \subsection{Complexity of Subproblems} } Let us first consider subproblems of P2 with a single free decision variable. \begin{theorem}\label{theorem-singleparam} Subproblems of P2, where two of the three variables of $\mbf{L},$ $\mbf{q}$ and $\mbf{x}$ are fixed, are convex problems. \end{theorem} \begin{IEEEproof} To prove the theorem, we need to consider the following three subcases: \begin{enumerate}[label=\alph*)] \item Optimizing data size $\mbf{L}$. According to $\mbf{H}_{i,j}$, we have $$\frac{\partial^2 E_{i,j}}{\partial L_{i,j}^2}= \frac{N_0 D_{i}^2}{h_{i,j}x_{i,j}(D_{i}-\eta L_{i,j} /q_{i,j})^3} 2 ^{\frac{L_{i,j}/x_{i,j}}{D_{i}-\eta L_{i,j} /q_{i,j}}} \ln2^2 >0.$$ Therefore, the considered problem is convex. \item Optimizing bandwidth $\mbf{x}$. According to $\mbf{H}_{i,j}$, we have $$\frac{\partial^2 E_{i,j}}{\partial x_{i,j}^2}= \frac{L_{i,j}}{h_i(D_{i}-\eta L_{i,j} /q_{i,j}) x_{i,j}^3} 2 ^{\frac{N_0 L_{i,j}/x_{i,j}}{ D_{i}-\eta L_{i,j} /q_{i,j}}} \ln2^2>0. $$ Therefore, the considered problem is also convex. \item Optimizing computing resource $\mbf{q}$. We replace variables $q_{i,j}$ with $t_{i,j}=D_i-{\eta L_{i,j}}/{q_{i,j}} $ to get \begin{subequations} \label{Pq} \begin{align} \underset{ {\mbf{t}}}{\text{min}} & ~ \sum_{i \in \mathcal{K}} \frac{ N_0 }{h_{i,j}} x_{i,j} t_{i,j} \left(2^{\frac{L_{i,j}}{x_{i,j} t_{i,j}}}-1 \right) \\ \text{s.t.} & ~ \sum_{i \in \mathcal{K} } \frac{\eta L_{i,j}}{D_i-t_{i,j}} = C_j, \forall j \in \mathcal{M} \end{align} \end{subequations} In \eqref{Pq}, equality (\ref{Pq}b) is clearly not affine, and thus, the feasible set is non-convex. To address it, we relax the equality constraint and substitute (\ref{Pq}b) with \begin{equation} \label{modified C2_new_q} \sum_{i \in \mathcal{K}} \frac{\eta L_{i,j}}{D_i-t_{i,j}} \leq C_j, \forall j \in \mathcal{M} \end{equation} As a consequence of Theorem 1, the energy consumption decreases if $q_{i,j}$ is increased. Thus, for the optimal solution, equality is achieved in \eqref{modified C2_new_q}, which means substituting (\ref{Pq}b) with \eqref{modified C2_new_q} will not change the solution. Then, for inequality constraint \eqref{modified C2_new_q}, its second derivative is $\sum_{i \in \mathcal{K} } \frac{2 \eta L_{i,j}}{(D_i -t_{i,j})}>0$, and thus, it is convex. For the objective function, we have $$\frac{\partial^2 E_{i,j}}{\partial t_{i,j}^2}= \frac{N_0 L_{i,j}^2}{h_{i,j}x_{i,j} t_{i,j}^3} 2^{\frac{L_{i,j}}{x_{i,j} t_{i,j}} } \ln2^2>0 .$$ Therefore, \eqref{Pq} with the relaxed constraint \eqref{modified C2_new_q} is convex. \end{enumerate} \end{IEEEproof} Now let us consider the joint optimization problems, where one of the variables is fixed, while the other two are optimized jointly. \begin{theorem}\label{theorem-L-opt} Subproblems of P2, where $\mbf{L}$ is one of the decision variables, are non-convex problems. \end{theorem} \begin{IEEEproof} \begin{enumerate}[label=\alph*)] \item Optimizing data size $\mbf{L}$ and transmission bandwidth $\mbf{x}$ under fixed processing power allocation $\mbf{q}$. {\color{black} This has been shown in Theorem \ref{theorem_main_conv}, namely, the proof of the non-convexity of P2.} \item Optimizing data size $\mbf{L}$ and processing power $\mbf{q}$ under fixed transmission bandwidth allocation $\mbf{x}$. In this case, the Hessian matrix is given by \[ \mbf{\vec{H}}_{i,j} = \frac{ N_0 x_{i,j}}{h_{i,j}} \begin{bmatrix} \mbf{\vec{H}}_{i,j}(1,1) & \mbf{\vec{H}}_{i,j}(1,2) \\ \mbf{\vec{H}}_{i,j}(2,1) & \mbf{\vec{H}}_{i,j}(2,2) \end{bmatrix}, \] where $ \mbf{\vec{H}}_{i,j}(1,1) = \frac{ D_{i}^2}{x_{i,j}^2(D_{i}-\eta L_{i,j} /q_{i,j})^3} 2 ^{\frac{L_{i,j}/x_{i,j}}{D_{i}-\eta L_{i,j} /q_{i,j}}} \ln2^2 $, $\mbf{\vec{H}}_{i,j}(1,2) =\mbf{\vec{H}}_{i,j}(2,1)= ( \frac{\eta }{q_{i,j}^2 } + \frac{ \eta^2 L_{i,j}^2 \ln2/x_{i,j} - \eta D_i L_{i,j} q_{i,j} \ln2/x_{i,j} }{q_{i,j} (D_i q_{i,j} - \eta L_{i,j} )^2 } - \frac{\eta D_i L_{i,j}^2 q_{i,j} \ln2^2/x_{i,j}^2 }{(D_i q_{i,j}- \eta L_{i,j} )^3 } ) 2 ^{\frac{L_{i,j}/x_{i,j}}{D_{i}-\eta L_{i,j} /q_{i,j}}} - \frac{\eta}{q_{i,j}^2} $ and $\mbf{\vec{H}}_{i,j}(2,2) = ( -\frac{2 \eta L_{i,j} }{q_{i,j}^3 } + \frac{ 2\eta D_i L_{i,j}^2 \ln2/x_{i,j} }{q_{i,j} (D_i q_{i,j} - \eta L_{i,j} )^2 } - \frac{\eta^2 L_{i,j}^4 \ln2^2/x_{i,j}^2 }{q_{i,j}(D_i q_{i,j}- \eta L_{i,j} )^3 } ) 2 ^{\frac{L_{i,j}/x_{i,j}}{D_{i}-\eta L_{i,j} /q_{i,j}}} + \frac{2 \eta L_{i,j}}{q_{i,j}^3} $. It can be verified that $\rm{det}(\mbf{\vec{H}}_{i,j}) \geq 0$ does not always hold, which indicates the problem is non-convex. For example, by setting $ D_i=1, \eta=1, L_{i,j}=2, x_{i,j}=1, q_{i,j}=2.1, \frac{N_0}{h_i}=1 $, we obtain $\rm{det}(\mbf{\vec{H}}_{i,j})=-28.7<0 $. \end{enumerate} \end{IEEEproof} However, the third subproblem of two decision variables, that is, the case when the size of the data blocks is fixed, is more tractable, according to the following theorem. \begin{theorem}\label{theorem-L-fixed} The subproblem of P2, where $\mbf{L}$ is fixed and the transmission bandwidth $\mbf{x}$ and processing power $\mbf{q}$ allocation needs to be optimized, is a convex problem. \end{theorem} \begin{IEEEproof} We replace variables $q_{i,j}$ with $t_{i,j}=D_i-{\eta L_{i,j}}/{q_{i,j}} $ to get \begin{subequations} \label{Pq_2} \begin{align} \underset{ {\mbf{t}}}{\text{min}} & ~ \sum_{i \in \mathcal{K}} \frac{ N_0 }{h_{i,j}} x_{i,j} t_{i,j} \left(2^{\frac{L_{i,j}}{x_{i,j} t_{i,j}}}-1 \right) \\ \text{s.t.} & ~\sum_{i \in \mathcal{K}} x_{i} =B \\ & ~ \sum_{i \in \mathcal{K} } \frac{\eta L_{i,j}}{D_i-t_{i,j}} = C_j, \forall j \in \mathcal{M} \end{align} \end{subequations} First, equality constraint (\ref{Pq_2}b) is affine. (\ref{Pq_2}c) can be relaxed to \eqref{modified C2_new_q}, which is convex. Last, let us consider the objective function (\ref{Pq_2}a). The Hessian matrix is given by \[ \mbf{\hat{H}}_{i,j} = \frac{N_0}{h_{i,j}} \cdot \begin{bmatrix} \mbf{\hat{H}}_{i,j} (1,1) & \mbf{\hat{H}}_{i,j} (1,2) \\ \mbf{\hat{H}}_{i,j} (2,1) & \mbf{\hat{H}}_{i,j} (2,2) \end{bmatrix}, \] where $\mbf{\hat{H}}_{i,j} (1,1)=\ln2^2 \cdot 2^{\frac{L_{i,j}}{ t_{i,j} x_{i,j}} } \cdot \frac{L_{i,j}^2}{t_{i,j} x_{i,j}^3}$, while $\mbf{\hat{H}}_{i,j} (2,2)=\ln2^2 \cdot 2^{\frac{L_{i,j}}{x_{i,j} t_{i,j}} } \cdot \frac{L_{i,j}^2}{x_{i,j} t_{i,j}^3}$. Besides, $\mbf{\hat{H}}_{i,j} (1,2)=\mbf{\hat{H}}_{i,j} (2,1)=2^{\frac{L_{i,j}}{x_{i,j} t_{i,j}} }-1 + \ln2^2 \cdot \frac{L_{i,j}^2}{x_{i,j}^2t_{i,j}^2} 2^{\frac{L_{i,j}}{x_{i,j} t_{i,j}} }- \ln 2 \cdot \frac{L_{i,j}}{x_{i,j} t_{i,j}} 2^{\frac{L_{i,j}}{x_{i,j} t_{i,j}} }$. After some algebraic manipulations, it can be verified that $\rm{det}(\mbf{\hat{H}}_{i,j})>0$ holds for all $\frac{L_{i,j}}{x_{i,j} t_{i,j}}>0$, which indicates (\ref{Pq_2}a) is convex. This completes the proof. \end{IEEEproof} \begin{corollary}\label{col-single-fixed-AP} Consider the case, when each node can connect to a single AP only, and this AP is predefined (e.g., the one with best SNR). Then, the problem of transmission bandwidth and processing resource allocation is convex. \end{corollary} \begin{IEEEproof} This is a special case addressed by Theorem \ref{theorem-L-fixed}, where $\mbf{L}_i$ has only one non-zero element $\forall i \in \mathcal{K}$. \end{IEEEproof} \subsection{Iterative Resource Allocation for Multi-AP Processing} \begin{algorithm} \caption{Iterative Resource Allocation for Multi-AP Processing} \label{alg:iterative} \begin{algorithmic}[1] \State {\textbf{Initialization:}} $L_{i,j} \leftarrow L_{i,j}^{(0)}, \forall i \in \mathcal{K}, j \in \mathcal{M}$; \State Update $x_{i,j}, q_{i,j}, \forall i \in \mathcal{K}, j \in \mathcal{M}$ based on BCAA, and calculate $\sum_{i=1}^K \sum_{j=1}^M E_{i,j}^0$; \State $\sum_{i=1}^K \sum_{j=1}^M E_{i,j}^1 \leftarrow \sum_{i=1}^K \sum_{j=1}^M E_{i,j}^0+ 2 \epsilon$; \State \textbf{while} $\sum_{i=1}^K \sum_{j=1}^M E_{i,j}^1 - \sum_{i=1}^K \sum_{j=1}^M E_{i,j}^0 > \epsilon$ \textbf{do} \State \hspace{20pt} Update $L_{i,j}$ based on DAA, and recalculate $\sum_{i=1}^K \sum_{j=1}^M E_{i,j}^1$; \State \hspace{20pt} Update $x_{i,j}, q_{i,j}$ based on BCAA, and recalculate $\sum_{i=1}^K \sum_{j=1}^M E_{i,j}^0$; \State \textbf{end while} \end{algorithmic} \end{algorithm} In this section, an Iterative Resource Allocation algorithm is proposed to solve the non-convex joint resource allocation problem P2. As shown in \text{Algorithm \ref{alg:iterative}}, the proposed algorithm follows two iterative steps: i) the Data Allocation Algorithm (DAA) updates $\mbf{L}$ to allocate the data, for given bandwidth and computing resource allocation $\mbf{x}$ and $\mbf{q}$, and ii) the Bandwidth and Computing resource Allocation Algorithm (BCAA) updates $\mbf{x}$ and $\mbf{q}$ to allocate bandwidth and computing resource, for given $\mbf{L}$. We denote by $E_i^{t}$ and $E_i^{x}$ the energy consumption of user $i$ after optimizing $t_i$ and $x_i$, respectively, and $\epsilon$ is the stop condition. Additionally, $L_{i,j}^{(0)}$ denotes the initial value for $L_{i,j}$, and it can be obtained using a fixed allocation, e.g., equal allocation, or a random allocation, e.g., following a uniform distribution. {\bf{The Data Allocation Algorithm (DAA):}} The data allocation problem under given bandwidth and computing resource is shown to be convex in {\color{black} Theorem~\ref{theorem-singleparam}.} Therefore, we can use the Karush-Kuhn-Tucker (KKT) condition to derive the optimal $L_{i,j}$. The KKT condition for data $L_{i,j}$ is given by \eqref{lambda} at the top of next page. \begin{figure*}[!t] \normalsize \begin{equation} \label{lambda} g( L_{i,j})=\frac{N_0 x_{i,j} }{ h_{i,j} } \left[ \left( \frac{D_{i} \ln2}{x_{i,j}(D_{i}-\eta L_{i,j} /q_{i,j})} - \frac{\eta}{q_{i,j}} \right) 2 ^{\frac{L_{i,j}/x_{i,j}}{D_{i}-\eta L_{i,j} /q_{i,j}}} + \frac{\eta}{q_{i,j}} \right]+ \lambda=0. \end{equation} \end{figure*} Note that in \eqref{lambda} $\lambda$ is the Lagrange dual variable. For given $\lambda$, the above equation can be used to obtain $L_{i,j}$. Specifically, we have $\frac{\partial g( L_{i,j})}{\partial L_{i,j} }= \frac{N_0 D_{i}^2}{h_{i,j}x_{i,j}(D_{i}-\eta L_{i,j} /q_{i,j})^3} 2 ^{\frac{L_{i,j}/x_{i,j}}{D_{i}-\eta L_{i,j} /q_{i,j}}} \ln2^2 >0.$, which indicates that $g( L_{i,j})$ grows with $L_{i,j}$, and thus a bisection search can be used to obtain $L_{i,j}$ by comparing $g(L_{i,j})$ with 0. Now the problem lies in how to obtain $\lambda$. When $\lambda$ is increased, $L_{i,j}, \forall j \in \mathcal{M}$ will decrease to ensure $g(L_{i,j})=0$. Meanwhile, $\sum_{j=1}^M L_{i,j}= L_i$ needs to hold. Consequently, $\lambda$ can also be obtained with bisection search, by comparing $\sum_{j=1}^M L_{i,j}$ with $L_{i}$. The resulting DAA consists of two loops: an outer loop to find the value of $\lambda$ and an inner loop to determine the data allocation ${\bf{L}}_{i}$. The computational complexity is $O \left( K M \log_2(\lambda ) \log_2(L_i) \right)$ {\bf{The Bandwidth and Computing resource Allocation Algorithm (BCAA):}} {\color{black} Under given data allocation, the joint bandwidth and computing resource allocation problem is shown to be a convex one in Theorem~\ref{theorem-L-fixed}, and thus, the optimal solution can be obtained using convex optimization tools, e.g., interior-point method. Nonetheless, considering that the bandwidth allocation is global {\color{black}for the entire network}, while the computing resource allocation is local at each AP, we propose {\color{black}to apply} an iterative algorithm to solve the joint bandwidth and computing resource allocation. {\color{black}The algorithm is presented in detail in our previous work \cite{Zeng_WCL2019}, here we summarize it briefly.} The proposed algorithm consists of two iterative steps: i) the Bandwidth Allocation Algorithm (BAA) updates $\mbf{x}$ to allocate bandwidth across and within the APs, for given $\mbf{t}$ ($t_{i,j}=D_i-{\eta L_{i,j}}/{q_{i,j}} $), and ii) the Computation resource Allocation Algorithm (CAA) updates $\mbf{t}$ to allocate the computing resource at each AP, for given bandwidth allocation $\mbf{x}$. {\bf{Bandwidth Allocation Algorithm (BAA):}} Assume that the computing resource allocation $\mbf{t}$ is given. Then the KKT condition for $x_{i,j}$ is given by \begin{align} \label{beta} f(x_{i,j}) &= \frac{ N_0 t_{i,j}}{h_{i,j}} \left[ 2^{\frac{L_{i,j}}{t_{i,j} x_{i,j}}} - \frac{ L_{i,j}}{t_{i,j} x_{i,j}} 2^{\frac{L_{i,j}}{t_{i,j} x_{i,j}} } \ln2-1 \right] + \beta \\ \nonumber &=0, \end{align} where $\beta$ is the introduced auxiliary variable, satisfying $\beta>0$. As for $L_{i,j}$ in \eqref{lambda}, the bisection method can be used to obtain $\beta$ and $x_{i,j}$. Likewise, the resulting BAA consists of two loops: an outer loop to find the value of $\beta$ and an inner loop to determine the bandwidth allocation $\bf{x}$. The computational complexity is $O( KM \log_2(B) \log_2(\beta ))$. {\bf{Computing resource Allocation Algorithm (CAA):}} Under given bandwidth allocation, the computing resource allocation is independent across the APs. Thus, the energy minimization for each AP is equivalent to that of the overall system. Let us consider AP $j$, $j\in \mathcal{M}$. The KKT condition for $t_{i,j}$ is given by \begin{align*} v(t_{i,j})& = \frac{ N_0 x_{i,j}}{ h_{i,j}} \left[ 2^{\frac{L_{i,j}}{x_{i,j} t_{i,j}}} - \frac{L_{i,j}}{x_{i,j} t_{i,j}} 2^{\frac{L_{i,j}}{x_{i,j} t_{i,j}}} \ln2 -1 \right] \\ &~~+ \frac{W_{i,j} }{(D_{i}-t_{i,j})^2} \mu_j \\ &=0, \end{align*} where $\mu_j$ is the introduced auxiliary variable, satisfying $\mu_j \geq 0$. As for $L_{i,j}$ and $x_{i,j}$, the bisection method can be used to obtain $\mu_j$ and $t_{i,j}$. Likewise, the resulting CAA consists of two loops: an outer loop to find the value of $\mu_j$ and an inner loop to determine the computing resource allocation $t_{i,j}$. The computational complexity is $O( KM \log_2(D_i) \log_2(\mu_j ))$. {\color{black}BAA and CAA have to be repeated until convergence}. Since the bandwidth and computing resource allocation problem is convex, convergence is guaranteed and the converged local optimum is also the global optimum. } {\bf{Convergence and Complexity:}} \begin{theorem} The Iterative Resource Allocation algorithm converges in finite steps. \end{theorem} \begin{IEEEproof} In both lines 9 and 10 of Algorithm \ref{alg:iterative}, the energy consumption decreases, or remains unchanged. Since there is a lower bound for the energy consumption, e.g., 0, the Iterative Resource Allocation algorithm always terminates, either by reaching the lower bound, or by achieving a decrease less than $\epsilon$. Therefore, convergence is guaranteed. \end{IEEEproof} Denote the number of iterations required for Algorithm \ref{alg:iterative} {\color{black}and BCAA to converge by $I$ and $I_0$, respectively. The total computational complexity is $O( I KM [ \log_2(L_i) \log_2(\lambda ) + I_0 \log_2(B) \log_2(\beta)+ I_0\log_2(D_i) \log_2(\mu_j) ] $. \begin{figure} \centering \includegraphics[width=1\linewidth]{eps/iteration_4AP.pdf} \caption{Convergence of the Iterative Resource Allocation algorithm under different network scenarios, and different initial user data allocation. {\color{black}The markers show the termination of the iteration for the corresponding algorithms.}}\label{iteration} \end{figure} \section{Numerical Results} In this section numerical results are presented to evaluate the effectiveness of the proposed energy minimization algorithm. Specifically, first we evaluate the convergence of the Iterative Resource Allocation algorithm. Then we evaluate what is the level of distributed processing under the optimized resource allocation, to see whether this possibility leads to significant performance gains. We consider a small cell multi-AP multi-user scenario, where image processing tasks are offloaded to the edge computing servers. The network parameters are set as for example in \cite{3gpp_pl, Cyou, Ymao}, while the task requirements follow \cite{5G_Ini, Cyou, A_kiani}. More specifically, the number of APs and users are set to $M=4$ and $K=8$. The users are placed uniformly randomly within the entire coverage region. The pathloss model follows $30.6+36.7\log_{10}(d)$, where $d$ is the distance in meter. The total bandwidth is 10 MHz, while the thermal noise density is -174 dBm/Hz. The data size of the computing task is 1.5 Mbits, while the delay constraint is 0.5 s. The computing coefficient $\eta$ is $10^3$, and thus the computing need is 1.5 G CPU cycles. The computing capacity at each AP is 25 G CPU cycles/s. Let us first investigate how the proposed iterative algorithm, described in Algorithm 1, converges. We consider three different initializations of the user data: {\emph{Prop-1} denotes the case when the data for each user are equally allocated among the APs; \emph{Prop-2} is obtained following a uniformly random data size allocation; and finally \emph{Prop-3} represents the case when $90\%$ of the data are allocated to the AP with the best channel condition, while the rest are equally allocated to the remaining APs. The stop condition is $\epsilon=10^{-2}$ mJ. According to the results, the algorithm converges to the same solution, independently from the initial data allocation. This indicates that the proposed algorithm is robust to the initialization. Moreover, \emph{Prop-3} {\color{black}requires the least number of iterations to approach the converged solution in all scenarios}, which implies the proposed algorithm converges to a solution where users associate most of the data to their best APs. \begin{figure} \centering \includegraphics[width=1\linewidth]{eps/delay_box_4AP8UE.pdf} \caption{Energy consumption versus delay under different network scenarios.} \label{Delay} \end{figure} We then plot the energy consumption versus the delay requirement in Fig.~\ref{Delay}. As expected, the energy consumption decreases as the delay requirement becomes looser, for all considered scenarios. Lastly, we investigate how much the opportunity of parallel processing is utilized in the allocations. Fig.~\ref{Load} considers only the users that connect to more than one AP, and shows what is the largest share of load sent to one of them, {\color{black}namely $\underset{ j} {\max}~ L_{i,j}/L_i $}. The values are rather high for all cases, showing that most of the users have a preferred AP. This validates why an unbalanced initialization as \emph{Prop-3} on Fig. \ref{iteration} leads to fast convergence for the Iterative Resource Allocation. \begin{figure} \centering \includegraphics[width=1\linewidth]{eps/delay_load_4AP8UE.pdf} \caption{Ratio of largest load share versus delay under different network scenarios.} \label{Load} \end{figure} \section{Conclusion} In this paper we investigated the join wireless and computing resource allocation for a multi-AP multi-user MEC network. The general case with parallel processing and global bandwidth sharing was considered, and the system objective was to minimize the sum transmission energy under response time constraints. The formulated problem was shown to be non-convex. To address it, we first investigated the complexity of optimizing a part of the system parameters, and based on the results proposed an Iterative Resource Allocation procedure with guaranteed convergence. Presented numerical results show that the proposed iterative algorithm converges rapidly to the local optimum. Moreover, by comparing the proposed iterative algorithm with the lower and upper bounds, it is clear that free selection of APs is crucial for obtaining decent system performance. \balance \bibliographystyle{IEEEtran}
1,108,101,565,830
arxiv
\section{Introduction} \label{sec:intro} Despite massive efforts to improve the security of computer systems, security breaches are only becoming more frequent and damaging, as more sensitive data is processed in the cloud~\cite{malekos-smith:csis20:hidden-costs-cybercrime,ibm20:breach-cost-report}. Current encryption technology is of limited help, because servers must decrypt data before processing it. Once data is decrypted, it is vulnerable to breaches. Fully Homomorphic Encryption (FHE) is a class of encryption schemes that address this problem by enabling \emph{generic computation on encrypted data}. \autoref{fig:overview} shows how FHE enables secure offloading of computation. The client wants to compute an expensive function $f$ (e.g., a deep learning inference) on some private data $x$. To do this, the client encrypts $x$ and sends it to an untrusted server, which computes $f$ on this encrypted data \emph{directly} using FHE, and returns the encrypted result to the client. FHE provides ideal security properties: even if the server is compromised, attackers cannot learn anything about the data, as it remains encrypted throughout. \figOverview FHE is a young but quickly developing technology. First realized in 2009~\cite{gentry09}, early FHE schemes were about 10$^9$ times slower than performing computations on unencrypted data. Since then, improved FHE schemes have greatly reduced these overheads and broadened its applicability~\cite{albrecht:hesg18:standard,peikert2016decade}. FHE has inherent limitations---for example, data-dependent branching is impossible, since data is encrypted---so it won't subsume all computations. Nonetheless, important classes of computations, like deep learning inference~\cite{cheon:ictaci17:homomorphic,dathathri:pldi19:chet,dathathri:pldi20:eva}, linear algebra, and other inference and learning tasks~\cite{han:aaai19:logistic} are a good fit for FHE. This has sparked significant industry and government investments~\cite{ibm,intel,dprive} to widely deploy FHE. Unfortunately, FHE still carries substantial performance overheads: despite recent advances~\cite{dathathri:pldi19:chet, dathathri:pldi20:eva, roy:hpca19:fpga-he, brutzkus:icml19:low, polyakov:17:palisade}, FHE is still 10,000$\times$ to 100,000$\times$ slower than unencrypted computation when executed in carefully optimized software. Though this slowdown is large, it can be addressed with hardware acceleration: \emph{if a special- ized FHE accelerator provides large speedups over software execution, it can bridge most of this performance gap and enable new use cases.} For an FHE accelerator to be broadly useful, it should be programmable, i.e., capable of executing arbitrary FHE computations. While prior work has proposed several FHE accelerators, they do not meet this goal. Prior FHE accelerators~\cite{cousins:hpec14:fpga-he,cousins:tetc17:fpga-he,doroz:tc15:accelerating-fhe,roy:hpca19:fpga-he,riazi:asplos20:heax,turan:tc20:heaws} target individual FHE operations, and miss important ones that they leave to software. These designs are FPGA-based, so they are small and miss the data movement issues facing an FHE ASIC accelerator. These designs also overspecialize their functional units to specific parameters, and cannot efficiently handle the range of parameters needed within a program or across programs. In this paper we present F1\xspace, the first programmable FHE accelerator. F1\xspace builds on an in-depth architectural analysis of the \mbox{characteristics} of FHE computations, which exposes the main challenges and reveals the design principles a programmable FHE architecture should exploit. \paragraph{Harnessing opportunities and challenges in FHE:} F1\xspace is tailored to the three defining characteristics of FHE: \noindent \textbf{\emph{(1) Complex operations on long vectors:}} FHE encodes information using very large vectors, several thousand elements long, and processes them using modular arithmetic. F1\xspace employs \emph{vector processing} with \emph{wide functional units} tailored to FHE operations to achieve large speedups. The challenge is that two key operations on these vectors, the Number-Theoretic Transform (NTT) and automorphisms, are not element-wise and require complex dataflows that are hard to implement as vector operations. To tackle these challenges, F1\xspace features specialized NTT units and the first vector implementation of an automorphism functional unit. \noindent \textbf{\emph{(2) Regular computation:}} FHE programs are dataflow graphs of arithmetic operations on vectors. All operations and their dependences are known ahead of time (since data is encrypted, branches or dependences determined by runtime values are impossible). F1\xspace exploits this by adopting \emph{static scheduling}: in the style of Very Long Instruction Word (VLIW) processors, all components have fixed latencies and the compiler is in charge of scheduling operations and data movement across components, with no hardware mechanisms to handle hazards (i.e., no stall logic). Thanks to this design, F1\xspace can issue many operations per cycle with minimal control overheads; combined with vector processing, F1\xspace can issue tens of thousands of scalar operations per cycle. % \noindent \textbf{\emph{(3) Challenging data movement:}} In FHE, encrypting data increases its size (typically by at least 50$\times$); data is grouped in long vectors; and some operations require large amounts (tens of MBs) of auxiliary data. Thus, we find that data movement is \emph{the key challenge} for FHE acceleration: despite requiring complex functional units, in current technology, limited on-chip storage and memory bandwidth are the bottleneck for most FHE programs. Therefore, F1\xspace is primarily designed to minimize data movement. First, F1\xspace features an explicitly managed on-chip memory hierarchy, with a heavily banked scratchpad and distributed register files. Second, F1\xspace uses mechanisms to decouple data movement and hide access latencies by loading data far ahead of its use. Third, F1\xspace uses new, FHE-tailored scheduling algorithms that maximize reuse and make the best out of limited memory bandwidth. Fourth, F1\xspace uses relatively \emph{few functional units with extremely high throughput}, rather than lower-throughput functional units as in prior work. This \emph{reduces the amount of data that must reside on-chip simultaneously}, allowing higher reuse. In summary, F1\xspace brings decades of research in architecture to bear, including vector processing and static scheduling, and combines them with new specialized functional units (\autoref{sec:FUs}) and scheduling algorithms (\autoref{sec:scheduler}) to design a programmable FHE accelerator. We implement the main components of F1\xspace in RTL and synthesize them in a commercial 14nm/12nm process. With a modest area budget of 151\,mm$^2$, our F1\xspace implementation provides 36 tera-ops/second of 32-bit modular arithmetic, 64\,MB of on-chip storage, and a 1\,TB/s high-bandwidth memory. We evaluate F1\xspace using cycle-accurate simulation running complete FHE applications, and demonstrate speedups of 1,200$\times$--17,000$\times$ over state-of-the-art software implementations. These dramatic speedups counter most of FHE's overheads and enable new applications. For example, F1\xspace executes a deep learning inference that used to take 20 minutes in 240 milliseconds, enabling secure real-time deep learning in the cloud. \section{Background}\label{sec:background} Fully Homomorphic Encryption allows performing arbitrary arithmetic on encrypted plaintext values, via appropriate operations on their ciphertexts. Decrypting the resulting ciphertext yields the same result as if the operations were performed on the plaintext values ``in the clear.'' Over the last decade, prior work has proposed multiple \emph{FHE schemes}, each with somewhat different capabilities and performance tradeoffs. BGV~\cite{brakerski:toct14:leveled}, B/FV~\cite{brakerski:crypto12:fully,fan:iacr12:somewhat}, GSW~\cite{gentry:crypto13:homomorphic}, and CKKS~\cite{cheon:ictaci17:homomorphic} are popular FHE schemes.\footnote{These scheme names are acronyms of their authors' last names. For instance, BGV is Brakerski-Gentry-Vaikuntanathan.}~Though these schemes differ in how they encrypt plaintexts, they all use the same data type for ciphertexts: polynomials where each coefficient is an integer modulo $Q$. This commonality makes it possible to build a single accelerator that supports multiple FHE schemes; F1\xspace supports BGV, GSW, and CKKS. We describe FHE in a layered fashion: \autoref{sec:fhe_mapping} introduces FHE's programming model and operations, i.e., FHE's \emph{interface}; \autoref{sec:fhe_operation} describes how FHE operations are \emph{implemented}; \autoref{sec:fhe_optimizations} presents implementation \emph{optimizations}; and \autoref{sec:fhe_analysis} performs an \emph{architectural analysis} of~a~representative FHE kernel to reveal acceleration opportunities. For concreteness, we \emph{introduce FHE using the BGV scheme}, and briefly discuss other FHE schemes in \autoref{sec:fhe_others}. \subsection{FHE programming model and operations} \label{sec:fhe_mapping} FHE programs are \emph{dataflow graphs}: directed acyclic graphs where nodes are operations and edges represent data values. Data values are inputs, outputs, or intermediate values consumed by one or more operations. All operations and dependences are known in advance, and data-dependent branching is impossible. In FHE, unencrypted (plaintext) data values are always \emph{vectors}; in BGV~\cite{brakerski:toct14:leveled}, each vector consists of $N$ integers modulo an integer $t$. BGV provides three operations on these vectors: element-wise \emph{addition} (mod $t$), element-wise \emph{multiplication} (mod $t$), and a small set of particular vector \emph{permutations}. We stress that this is BGV's \emph{interface}, not its implementation: it describes \emph{unencrypted} data, and the homomorphic operations that BGV implements on that data in its encrypted form. In \autoref{sec:fhe_operation} we describe how BGV represents encrypted data and how each operation is implemented. At a high level, FHE provides a vector programming model with restricted operations where individual vector elements cannot be directly accessed. This causes some overheads in certain algorithms. For example, summing up the elements of a vector is non-trivial, and requires a sequence of permutations and additions. Despite these limitations, prior work has devised reasonably efficient implementations of key algorithms, including linear algebra~\cite{halevi:crypto14:algorithms}, neural network inference~\cite{brutzkus:icml19:low, gilad:icml16:cryptonets}, logistic regression~\cite{han:iacr18:efficient}, and genome processing~\cite{blatt:nas20:secure}. These implementations are often coded by hand, but recent work has proposed FHE compilers to automate this translation for particular domains, like deep learning~\cite{dathathri:pldi19:chet,dathathri:pldi20:eva}. Finally, note that not all data must be encrypted: BGV provides versions of addition and multiplication where one of the operands is unencrypted. Multiplying by unencrypted data is cheaper, so algorithms can trade privacy for performance. For example, a deep learning inference can use encrypted weights and inputs to keep the model private, or use unencrypted weights, which does not protect the model but keeps inputs and inferences private~\cite{brutzkus:icml19:low}. \subsection{BGV implementation overview} \label{sec:fhe_operation} We now describe how BGV represents and processes encrypted data (ciphertexts). The implementation of each computation on ciphertext data is called a \emph{homomorphic operation}. For example, the \emph{homomorphic multiplication} of two ciphertexts yields another ciphertext that, when decrypted, is the element-wise multiplication of the encrypted plaintexts. \paragraph{Data types:} BGV encodes each plaintext vector as a polynomial with~$N$ coefficients mod~$t$. We denote the plaintext space as~$R_t$, so \[\mathfrak{a} = a_0 + a_1x + ... + a_{N-1}x^{N-1} \in R_t\] is a plaintext. Each plaintext is encrypted into a ciphertext consisting of two polynomials of~$N$ integer coefficients modulo some $Q \gg t$. Each ciphertext polynomial is a member of~$R_Q$. \paragraph{Encryption and decryption:} Though encryption and decryption are performed by the client (so F1\xspace need not accelerate~them), they are useful to understand. In BGV, the \textit{secret key} is a polynomial $\mathfrak{s} \in R_Q$. To encrypt a plaintext $\mathfrak{m} \in R_t$, one samples a uniformly random $\mathfrak{a} \in R_Q$, an \emph{error} (or \emph{noise}) $\mathfrak{e} \in R_Q$ with small entries, and computes the ciphertext $ct$ as \begin{equation*} ct = (\mathfrak{a}, \mathfrak{b} = \mathfrak{a}\mathfrak{s} + t \mathfrak{e} + \mathfrak{m}). \end{equation*} Ciphertext $ct = (\mathfrak{a}, \mathfrak{b})$ is decrypted by recovering $\mathfrak{e}' = t\mathfrak{e} + \mathfrak{m} = \mathfrak{b} - \mathfrak{a} \mathfrak{s} \bmod{Q}$, and then recovering $\mathfrak{m} = \mathfrak{e}' \bmod t$. Decryption is correct as long as~$\mathfrak{e}'$ does not ``wrap around'' modulo~$Q$, i.e., its coefficients have magnitude less than~$Q/2$. The security of any encryption scheme relies on the ciphertexts not revealing anything about the value of the plaintext (or the secret key). Without adding the noise term $\mathfrak{e}$, the original message $\mathfrak{m}$ would be recoverable from $ct$ via simple Gaussian elimination. Including the noise term entirely hides the plaintext (under cryptographic assumptions)~\cite{lyubashevsky:tact10:ideal}. As we will see, homomorphic operations on ciphertexts increase their noise, so we can only perform a limited number of operations before the resulting noise becomes too large % and makes decryption fail. We later describe \emph{noise management strategies} (Sec. \ref{noisemgmt}) % to keep this noise bounded and thereby allow unlimited operations. \subsubsection{Homomorphic operations} \paragraph{\\Homomorphic addition} of ciphertexts $ct_0 = (\mathfrak{a}_{0}, \mathfrak{b}_{0})$ and $ct_1 = (\mathfrak{a}_{1}, \mathfrak{b}_{1})$ is done simply by adding their corresponding polynomials: $ct_{\text{add}} = ct_0 + ct_1 = (\mathfrak{a}_0 + \mathfrak{a}_1, \mathfrak{b}_0 + \mathfrak{b}_1)$. \paragraph{Homomorphic multiplication} requires two steps. First, the four input polynomials are multiplied and assembled: \begin{equation*} ct_{\times} = (\mathfrak{l}_2, \mathfrak{l}_1, \mathfrak{l}_0) = (\mathfrak{a}_0\mathfrak{a}_1, \mathfrak{a}_0\mathfrak{b}_1 + \mathfrak{a}_1 \mathfrak{b}_0, \mathfrak{b}_0\mathfrak{b}_1) . \end{equation*} This $ct_{\times}$ can be seen as a special intermediate ciphertext encrypted under a different secret key. The second step performs a \emph{key-switch\-ing op\-era\-tion} to produce a ciphertext encrypted under the original secret key~$\mathfrak{s}$. More specifically, $\mathfrak{l}_2$ undergoes this key-switching process to produce two polynomials $(\mathfrak{u}_1, \mathfrak{u}_0) = \textrm{KeySwitch}(\mathfrak{l}_2)$. The final output ciphertext is $ct_{\text{mul}} = (\mathfrak{l}_1 + \mathfrak{u}_1, \mathfrak{l}_0 + \mathfrak{u}_0)$. As we will see later (\autoref{sec:fhe_analysis}), key-switching is an expensive operation that dominates the cost of a multiplication. \paragraph{Homomorphic permutations} permute the~$N$ plaintext values (coefficients) that are encrypted in a ciphertext. Homomorphic permutations are implemented using \emph{automorphisms}, which are special permutations of the coefficients of the ciphertext polynomials. There are~$N$ automorphisms, denoted $\sigma_k(\mathfrak{a})$ and $\sigma_{-k}(\mathfrak{a})$ for all positive odd $k<N$. Specifically, % \begin{equation*} \sigma_k(\mathfrak{a}): a_i \rightarrow (-1)^{s} a_{ik \textrm{ mod } N} \text{ for } i=0,...,N-1, \end{equation*} where $s=0$ if $ik \textrm{ mod } 2N < N$, and $s=1$ otherwise. For example, $\sigma_{5}(\mathfrak{a})$ permutes $\mathfrak{a}$'s coefficients so that $a_0$ stays at position 0, $a_1$ goes from position 1 to position 5, and so on (these wrap around, e.g., with $N=1024$, $a_{205}$ goes to position~1, since $205\cdot5 \textrm{ mod } 1024 = 1$). To perform a homomorphic permutation, we first compute an automorphism on the ciphertext polynomials: $ct_{\sigma} = (\sigma_k(\mathfrak{a}), \sigma_k(\mathfrak{b}))$. Just as in homomorphic multiplication, $ct_{\sigma}$ is encrypted under a different secret key, requiring an expensive key-switch to produce the final output $ct_{\text{perm}} = (\mathfrak{u}_1, \sigma_{k}(\mathfrak{b}) + \mathfrak{u}_0)$, where $(\mathfrak{u}_1, \mathfrak{u}_0) = \text{KeySwitch}(\sigma_k (\mathfrak{a}))$. We stress that the permutation applied to the ciphertext \emph{does not} induce the same permutation on the underlying plaintext vector. For example, using a single automorphism and careful indexing, it is possible to homomorphically \emph{rotate} the vector of the $N$ encrypted plaintext values. \subsubsection{Noise growth and management}\label{noisemgmt} \textrm{\\Recall} that ciphertexts have noise, which limits the number of operations that they can undergo before decryption gives an incorrect result. Different operations induce different noise growth: addition and permutations cause little growth, but multiplication incurs much more significant growth. So, to a first order, the amount of noise is determined by \emph{multiplicative depth}, i.e., the longest chain of homomorphic multiplications in the computation. Noise forces the use of a large ciphertext modulus $Q$. For example, an FHE program with multiplicative depth of 16 needs $Q$ to be about 512 bits. The noise budget, and thus the tolerable multiplicative depth, grow linearly with~$\log Q$. FHE uses two noise management techniques in tandem: \emph{bootstrapping} and \emph{modulus switching}. \paragraph {Bootstrapping}~\cite{gentry09} enables FHE computations of \emph{unbounded} depth. Essentially, it removes noise from a ciphertext without access to the secret key. This is accomplished by evaluating the decryption function homomorphically. Bootstrapping is an expensive procedure that consists of many (typically tens to hundreds) ho\-mo\-mor\-phic op\-era\-tions. FHE programs with a large multiplicative depth can be divided into regions of limited depth, separated by bootstrapping operations. Even with bootstrapping, FHE schemes need a large noise budget (i.e., a large~$Q$) because \emph{(1)}~bootstrapping is computationally expensive, and a higher noise budget enables less-frequent bootstrapping, and \emph{(2)}~bootstrapping itself consumes a certain noise budget (this is similar to why pipelining circuits hits a performance ceiling: registers themselves add area and latency). \paragraph{Modulus switching} rescales ciphertexts from modulus~$Q$ to a modulus~$Q'$, which reduces the noise proportionately. Modulus switching is usually applied before each homomorphic multiplication, to reduce its noise blowup. For example, to execute an FHE program of multiplicative depth 16, we would start with a 512-bit modulus~$Q$. Right before each multiplication, we would switch to a modulus that is 32 bits shorter. So, for example, operations at depth 8 use a 256-bit modulus. Thus, beyond reducing noise, modulus switching reduces ciphertext sizes, and thus computation cost. \subsubsection{Security and parameters} \textrm{\\The} dimension~$N$ and modulus~$Q$ cannot be chosen independently; $N/\log Q$ must be above a certain level for sufficient security. In practice, this means that using a wide modulus to support deep programs also requires a large $N$. For example, with 512-bit $Q$, $N=16K$ is required to provide an acceptable level of security, resulting in very large ciphertexts. \subsection{Algorithmic insights and optimizations}\label{sec:algoInsights} \label{sec:fhe_optimizations} F1\xspace leverages two optimizations developed in prior work: \paragraph{Fast polynomial multiplication via NTTs:} Multiplying two polynomials requires convolving their coefficients, an expensive (naively $O(N^2)$) operation. Just like convolutions can be made faster with the Fast Fourier Transform, polynomial multiplication can be made faster with the Number-Theoretic Transform (NTT)~\cite{moenck1976practical}, % a variant of the discrete Fourier transform for modular arithmetic. The NTT takes an $N$\hyp{}coefficient polynomial as input and returns an $N$\hyp{}element vector representing the input in the \textit{NTT domain}. Polynomial multiplication can be performed as element-wise multiplication in the NTT domain. Specifically, \begin{equation*} NTT(\mathfrak{a}\mathfrak{b}) = NTT(\mathfrak{a}) \odot NTT(\mathfrak{b}), \end{equation*} where $\odot$ denotes component-wise multiplication. (For this relation to hold with $N$\hyp{}point NTTs, a \emph{negacyclic} NTT~\cite{lyubashevsky:tact10:ideal} must be used (\autoref{sec:fourStepNTT}).) Because an NTT requires only $O(N \log N)$ modular operations, multiplication can be performed in $O(N \log N)$ operations by using two forward NTTs, element-wise multiplication, and an inverse NTT. And in fact, optimized FHE implementations often store polynomials in the NTT domain rather than in their coefficient form \emph{across operations}, further reducing the number of NTTs. This is possible because the NTT is a linear transformation, so additions and automorphisms can also be performed in the NTT domain: \vspace{-0.05in} % \begin{align*} NTT(\sigma_k(\mathfrak{a})) &= \sigma_k(NTT(\mathfrak{a})) \\ NTT(\mathfrak{a} + \mathfrak{b}) &= NTT(\mathfrak{a}) + NTT(\mathfrak{b}) \end{align*} \vspace{-0.2in} \paragraph{Avoiding wide arithmetic via Residue Number System (RNS) representation:} FHE requires wide ciphertext coefficients (e.g., 512 bits), but wide arithmetic is expensive: the cost of a modular multiplier (which takes most of the compute) grows quadratically with bit width in our range of interest. Moreover, \mbox{we need to efficiently} % support a broad range of widths (e.g., 64 to 512 bits in 32-bit increments), both because programs need different widths, and because modulus switching progressively reduces coefficient widths. RNS representation \cite{garner1959residue} % enables representing a single polynomial with wide coefficients as multiple polynomials with narrower coefficients, called \emph{residue polynomials}. To achieve this, the modulus~$Q$ is chosen to be the product of $L$ smaller distinct primes, $Q = q_1q_2\cdots\ q_L$. Then, a polynomial in $R_Q$ can be represented as $L$ polynomials in $R_{q_1}, \ldots, R_{q_L}$, where the coefficients in the $i$-th polynomial are simply the wide coefficients modulo $q_i$. For example, with $W = 32$-bit words, a ciphertext polynomial with $512$-bit modulus~$Q$ is represented as $L = \log Q/W = 16$ polynomials with $32$-bit coefficients. All FHE operations can be carried out under RNS representation, and have either better or equivalent bit-complexity than operating on one wide-coefficient polynomial. % \subsection{Architectural analysis of FHE} \label{sec:fhe_analysis} We now analyze a key FHE kernel in depth to understand how we can (and cannot) accelerate it. Specifically, we consider the key-switching operation, which is expensive and takes the majority of work in all of our benchmarks. \autoref{listing:keyswitch} shows an implementation of key-switching. Key\hyp{}switching takes three inputs: a polynomial \texttt{x}, and two \emph{key-switch hint matrices} \texttt{ksh0} and \texttt{ksh1}. \texttt{x} is stored in RNS form as $L$ residue polynomials (\texttt{RVec}). Each residue polynomial \texttt{x[i]} is a vector of $N$ 32-bit integers modulo $q_i$. Inputs and outputs are in the NTT domain; only the \texttt{y[i]} polynomials (line 3) are in coefficient form. \begin{figure} \begin{center} \begin{lstlisting}[caption={Key-switch implementation. \texttt{RVec} is an $N$-element vector of 32-bit values, storing a single RNS polynomial in either the coefficient or the NTT domain. % % }, mathescape=true, style=custompython, label=listing:keyswitch] def keySwitch(x: RVec[L], ksh0: RVec[L][L], ksh1: RVec[L][L]): y = [INTT(x[i],$q_i$) for i in range(L)] u0: RVec[L] = [0, ...] u1: RVec[L] = [0, ...] for i in range(L): for j in range(L): xqj = (i == j) ? x[i] : NTT(y[i], $q_j$) u0[j] += xqj * ksh0[i,j] mod $q_j$ u1[j] += xqj * ksh1[i,j] mod $q_j$ return (u0, u1) \end{lstlisting} \end{center} \vspace{0.25cm} \end{figure} \paragraph{Computation vs.\ data movement:} A single key-switch requires $L^2$ NTTs, $2L^2$ multiplications, and $2L^2$ additions of $N$-element \mbox{vectors}. In RNS form, the rest of a homomorphic multiplication (excluding key-switching) is $4L$ multiplications and $3L$ additions (\autoref{sec:fhe_operation}), so key-switching is dominant. However, the main cost at high values of $L$ and $N$ is data movement. For example, at $L = 16$, $N = 16K$, each RNS polynomial (\texttt{RVec}) is 64\,KB; each ciphertext polynomial is 1\,MB; each ciphertext is 2\,MB; and the key-switch hints dominate, taking up 32\,MB. With F1\xspace's compute throughput, fetching the inputs of each key-switching from off-chip memory would demand about 10\,TB/s of memory bandwidth. Thus, it is crucial to reuse these values as much as possible. Fortunately, key-switch hints can be reused: all homomorphic multiplications use the same key-switch hint matrices, and each automorphism has its own pair of matrices. But values are so large that few of them fit on-chip. Finally, note that there is no effective way to decompose or tile this operation to reduce storage needs while achieving good reuse: tiling the key-switch hint matrices on either dimension produces many long-lived intermediate values; and tiling across \texttt{RVec} elements is even worse because in NTTs every input element affects every output element. \paragraph{Performance requirements:} We conclude that, to accommodate these large operands, an FHE accelerator requires a memory system that \emph{(1)} decouples data movement from computation, as demand misses during frequent key-switches would tank performance; and \emph{(2)} implements a large amount of on-chip storage (over 32\,MB in our example) to allow reuse across entire homomorphic operations (e.g., reusing the same key-switch hints across many homomorphic multiplications). Moreover, the FHE accelerator must be designed to use the memory system well. First, scheduling data movement and computation is crucial: data must be fetched far ahead of its use to provide decoupling, and operations must be ordered carefully to maximize reuse. Second, since values are large, excessive parallelism can increase footprint and hinder reuse. Thus, the system should use relatively few high-throughput functional units rather than many low-throughput ones. \paragraph{Functionality requirements:} Programmable FHE accelerators must support a wide range of parameters, both $N$ (polynomial/vector sizes) and $L$ (number of RNS polynomials, i.e., number of 32-bit prime factors of $Q$). While $N$ is generally fixed for a single program, $L$ changes as modulus switching sheds off polynomials. Moreover, FHE accelerators must avoid overspecializing in order to support algorithmic diversity. For instance, we have described \emph{an} implementation of key-switching, but there are others~\cite{kim:jmir18:helr,gentry:crypto2012:homomorphic} with different tradeoffs. % For example, an alternative implementation requires much more compute but has key-switch hints that grow with $L$ instead of $L^2$, so it becomes attractive for very large $L$ ($\sim$20). F1\xspace accelerates \emph{primitive operations on large vectors}: modular arithmetic, NTTs, and automorphisms. It exploits wide vector processing to achieve very high throughput, even though this makes NTTs and automorphisms costlier. F1\xspace avoids building functional units for coarser primitives, like key-switching, which would hinder algorithmic diversity. \paragraph{Limitations of prior accelerators:} Prior work has proposed several FHE accelerators for FPGAs~\cite{cousins:hpec14:fpga-he,cousins:tetc17:fpga-he,doroz:tc15:accelerating-fhe,roy:hpca19:fpga-he,migliore:tecs17:he-karatsuba,riazi:asplos20:heax,turan:tc20:heaws,mert:tvlsi20:bfv-accel}. These systems have three important limitations. First, they work by accelerating some primitives but defer others to a general-purpose host processor, and rely on the host processor to sequence operations. This causes excessive data movement that limits speedups. Second, these accelerators build functional units for \emph{fixed parameters} $N$ and $L$ (or $\log Q$ for those not using RNS). Third, many of these systems build overspecialized primitives that limit algorithmic diversity. Most of these systems achieve limited speedups, about 10$\times$ over software baselines. HEAX~\cite{riazi:asplos20:heax} achieves larger speedups (200$\times$ vs.\ a single core). But it does so by overspecializing: it uses relatively low-throughput functional units for primitive operations, so to achieve high performance, it builds a fixed-function pipeline for key-switching. \subsection{FHE schemes other than BGV} \label{sec:fhe_others} We have so far focused on BGV, but other FHE schemes provide different tradeoffs. For instance, whereas BGV requires integer plaintexts, CKKS~\cite{cheon:ictaci17:homomorphic} supports ``approximate'' computation on \mbox{fixed-point} values. B/FV~\cite{brakerski:crypto12:fully,fan:iacr12:somewhat} encodes plaintexts in a way that makes modulus switching before homomorphic multiplication unnecessary, thus easing programming (but forgoing the efficiency gains of modulo switching). And GSW~\cite{gentry:crypto13:homomorphic} features reduced, asymmetric noise growth under homomorphic multiplication, but encrypts a small amount of information per ciphertext (not a full $N/2$-element vector). Because F1\xspace accelerates primitive operations rather than full homomorphic operations, it supports BGV, CKKS, and GSW with the same hardware, since they use the same primitives. Accelerating B/FV would require some other primitives, so, though adding support for them would not be too difficult, our current implementation does not target it. \section{F1 Architecture}\label{sec:arch} \autoref{fig:arch} shows an overview of F1\xspace, which we derive from the insights in \autoref{sec:fhe_analysis}. \paragraph{Vector processing with specialized functional units:} F1\xspace features wide-vector execution with functional units (FUs) tailored to primitive FHE operations. Specifically, F1\xspace implements vector FUs for modular addition, modular multiplication, NTTs (forward and inverse in the same unit), and automorphisms. Because we leverage RNS representation, these FUs use a fixed, small arithmetic word size (32 bits in our implementation), avoiding wide arithmetic. \figArch FUs process vectors of configurable \emph{length} $N$ using a fixed number of \emph{vector lanes} $E$. Our implementation uses $E=$128 lanes and supports power-of-two lengths $N$ from 1,024 to 16,384. This covers the common range of FHE polynomial sizes, so an RNS polynomial maps to a single vector. Larger polynomials (e.g., of 32K elements) can use multiple vectors. All FUs are \emph{fully pipelined}, so they achieve the same throughput of $E=$128 elements/cycle. FUs consume their inputs in contiguous chunks of $E$ elements in consecutive cycles. This is easy for element-wise operations, but hard for NTTs and automorphisms. \autoref{sec:FUs} details our novel FU implementations, including the first vector implementation of automorphisms. Our evaluation shows that these FUs achieve much higher performance than those of prior work. This is important because, as we saw in \autoref{sec:fhe_analysis}, \emph{having fewer high-throughput FUs reduces parallelism and thus memory footprint}. \paragraph{Compute clusters:} Functional units are grouped in \emph{compute clusters}, as \autoref{fig:arch} shows. Each cluster features several FUs (1 NTT, 1 automorphism, 2 multipliers, and 2 adders in our implementation) and a banked register file that can (cheaply) supply enough operands each cycle to keep all FUs busy. The chip has multiple clusters (16 in our implementation). \paragraph{Memory system:} F1\xspace features an explicitly managed memory hierarchy. As \autoref{fig:arch} shows, F1\xspace features a large, heavily banked scratchpad (64\,MB across {16} banks in our implementation). The scratchpad interfaces with both high-bandwidth off-chip memory (HBM2 in our implementation) and with compute clusters through an on-chip network. F1\xspace uses decoupled data orchestration~\cite{pellauer:asplos19:buffets} to hide main memory latency. Scratchpad banks work autonomously, fetching data from main memory far ahead of its use. Since memory has relatively low bandwidth, off-chip data is always staged in scratchpads, and compute clusters do not access main memory directly. The on-chip network connecting scratchpad banks and compute clusters provides very high bandwidth, which is necessary because register files are small and achieve limited reuse. We implement a single-stage bit-sliced crossbar network~\cite{passas:tocaid12:crossbar} that provides full bisection bandwidth. Banks and the network have wide ports (512 bytes), so that a single scratchpad bank can send a vector to a compute unit at the rate it is consumed (and receive it at the rate it is produced). This avoids long staging of vectors at the register files. \paragraph{Static scheduling:} Because FHE programs are completely regular, F1\xspace adopts a \emph{static, exposed microarchitecture}: all components have fixed latencies, which are exposed to the compiler. The compiler is responsible for scheduling operations and data transfers in the appropriate cycles to prevent structural or data hazards. This is in the style of VLIW processors~\cite{fisher:isca83:very}. Static scheduling simplifies logic throughout the chip. For example, FUs need no stalling logic; register files and scratchpad banks need no dynamic arbitration to handle conflicts; and the on-chip network uses simple switches that change their configuration independently over time, without the buffers and arbiters of packet-switched networks. Because memory accesses do have a variable latency, we assume the worst-case latency, and buffer data that arrives earlier % (note that, because we access large chunks of data, e.g., 64\,KB, this worst-case latency is not far from the average). \paragraph{Distributed control:} Though static scheduling is the hallmark of VLIW, F1\xspace's implementation is quite different: rather than having a single stream of instructions with many operations each, in F1\xspace each component has an \emph{independent instruction stream}. % This is possible because F1\xspace does not have any control flow: though FHE programs may have loops, we unroll them to avoid all branches, and compile programs into linear sequences of instructions. This approach may appear costly. But vectors are very long, so each instruction encodes a lot of work and this overhead is minimal. Moreover, this enables a compact instruction format, which encodes a single operation followed by the number of cycles to wait until running the next instruction. This encoding avoids the low utilization of VLIW instructions, which leave many operation slots empty. Each FU, register file, network switch, scratchpad bank, and memory controller has its own instruction stream, which a control~unit~fetches in small blocks and distributes to components. Overall, instruction fetches consume less than 0.1\% of memory traffic. \paragraph{Register file (RF) design:} Each cluster in F1\xspace requires 10 read ports and 6 write ports to keep all FUs busy. To enable this cheaply, we use an 8-banked \emph{element-partitioned} register file design~\cite{asanovic:ucb98:vector} that leverages long vectors: each vector is striped across banks, and each FU cycles through all banks over time, using a single bank each cycle. By staggering the start of each vector operation, FUs access different banks each cycle. This avoids multiporting, requires a simple RF-FU interconnect, and performs within 5\% of an ideal infinite-ported RF. \section{Scheduling Data and Computation}\label{sec:scheduler} We now describe F1\xspace's software stack, focusing on the new static scheduling algorithms needed to use hardware well. \figCompilerOverview \autoref{fig:compilerOverview} shows an overview of the F1\xspace compiler. The compiler takes as input an FHE program written in a high-level domain specific language (\autoref{sec:programming}). The compiler is structured in three stages. First, the \emph{homomorphic operation compiler} orders high-level operations to maximize reuse and translates the program into a \emph{computation dataflow graph}, where operations are computation instructions but there are no loads or stores. Second, the \emph{off-chip data movement scheduler} % schedules transfers between main memory and the scratchpad to achieve decoupling and maximize reuse. This phase uses a simplified view of hardware, considering it as a scratchpad directly attached to functional units. % The result is a dataflow graph that includes loads and stores from off-chip memory. Third, the \emph{cycle-level scheduler} refines this dataflow graph. It uses a cycle-accurate hardware model to divide instructions across compute clusters and schedule on-chip data transfers. This phase determine the exact cycles of all operations, and produces the instruction streams for all components. This multi-pass scheduling primarily minimizes off-chip data movement, the critical bottleneck. Only in the last phase do we consider on-chip placement and data movement. \paragraph{Comparison with prior work:} We initially tried static sched\-uling algorithms from prior work~\cite{blelloch:acm1999:provably,marchal:jpdc2019:limiting,goodman:ics1988:code,ozer:micro1998:unified,barany:odes2011:register}, which primarily target VLIW architectures. However, we found these approaches ill-suited to F1\xspace for multiple reasons. First, VLIW designs have less-flexible decoupling mechanisms and minimizing data movement is secondary to maximizing compute operations per cycle. Second, prior algorithms often focus on loops, where the key concern is to find a compact repeating schedule, e.g., through software pipelining~\cite{lam1989software}. By contrast, F1\xspace has no flow control and we can schedule each operation independently. Third, though prior work has proposed register-pressure-aware instruction scheduling algorithms, they targeted small register files and basic blocks, whereas we must manage a large scratchpad over a much longer horizon. Thus, the algorithms we tried either worked poorly~\cite{ozer:micro1998:unified, goodman:ics1988:code, marchal:jpdc2019:limiting} or could not scale to the sizes required~\cite{barany:odes2011:register, xu:sigplan2007:tetris, touati:ijpp2005:register, berson:pact1993:ursa}. For example, when considering an algorithm such as Code Scheduling to Minimize Register Usage (CSR)~\cite{goodman:ics1988:code}, we find that the schedules it produces suffer from a large blowup of live intermediate values. This large footprint causes scratchpad thrashing and results in poor performance. Furthermore, CSR is also quite computationally expensive, requiring long scheduling times for our larger benchmarks. We evaluate our approach against CSR in \autoref{sec:sensitivity}. We also attempted to frame scheduling as a register allocation problem. Effectively, the key challenge in all of our schedules is \emph{data movement}, not computation. Finding a register allocation which minimizes spilling could provide a good basis for an effective schedule. However, our scratchpad stores at least 1024 residue vectors (1024 at maximum $N = 16K$, more for smaller values of $N$), and many of our benchmarks involve hundreds of thousands of instructions, meaning that register allocation algorithms simply could not scale to our required sizes~\cite{barany:odes2011:register, xu:sigplan2007:tetris, touati:ijpp2005:register, berson:pact1993:ursa}. \subsection{Translating the program to a dataflow graph} \label{sec:programming} We implement a high-level domain-specific language (DSL) for writing F1\xspace programs. To illustrate this DSL and provide a running example, \autoref{listing:mv} shows the code for matrix-vector multiplication. This follows HELib's algorithm~\cite{halevi:crypto14:algorithms} , which \autoref{fig:MultDataflow} shows. This toy $4 \times 16K$ matrix-vector multiply uses input ciphertexts with $N=16K$. Because accessing individual vector elements is not possible, the code uses homomorphic rotations % to produce each output element. \begin{figure} \begin{center} \begin{lstlisting}[caption={$(4 \times 16K)$ matrix-vector multiply in F1\xspace's DSL.}, mathescape=true, style=custompython, label=listing:mv] p = Program(N = 16384) M_rows = [ p.Input(L = 16) for i in range(4) ] output = [ None for i in range(4) ] V = p.Input(L = 16) def innerSum(X): for i in range(log2(p.N)): X = Add(X, Rotate(X, 1 << i)) return X for i in range(4): prod = Mul(M_rows[i], V) output[i] = innerSum(prod) \end{lstlisting} \end{center} \vspace{0.15cm} \end{figure} As \autoref{listing:mv} shows, programs in this DSL are at the level of the simple FHE interface presented in \autoref{sec:fhe_mapping}. There is only one aspect of the FHE implementation in the DSL: programs encode the desired noise budget ($L=16$ in our example), as the compiler does not automate noise management. \subsection{Compiling homomorphic operations} The first compiler phase works at the level of the homomorphic operations provided by the DSL. It clusters operations to improve reuse, and translates them down to instructions. \paragraph{Ordering} homomorphic operations seeks to maximize the reuse of key-switch hints, which is crucial to reduce data movement (\autoref{sec:fhe_analysis}). For instance, the program in \autoref{listing:mv} uses 15 different sets of key-switch hint matrices: one for the multiplies (line 12), and a different one for \emph{each} of the rotations (line 8). If this program was run sequentially as written, it would cycle through all 15 key-switching hints (which total 480\,MB, exceeding on-chip storage) four times, achieving no reuse. Clearly, it is better to reorder the computation to perform all four multiplies, and then all four \texttt{Rotate(X, 1)}, and so on. This reuses each key-switch hint four times. To achieve this, this pass first clusters \emph{independent} homomorphic operations that reuse the same hint, then orders all clusters through simple list-scheduling. This generates schedules with good key-switch hint reuse. \figMultDataflow \paragraph{Translation:} Each homomorphic operation is then compiled into instructions, using the implementation of each operation in the target FHE scheme (BGV, CKKS, or GSW). Each homomorphic operation may translate to thousands of instructions. These instructions are also ordered to minimize the amount of intermediates. The end result is an instruction-level dataflow graph where every instruction is tagged with a priority that reflects its global order. The compiler exploits algorithmic choice. Specifically, there are multiple implementations of key-switching (\autoref{sec:fhe_analysis}), and the right choice depends on $L$, the amount of key-switch reuse, and load on FUs. The compiler leverages knowledge of operation order to estimate these and choose the right variant. \subsection{Scheduling data transfers} \label{sec:datatransfers} The second compiler phase consumes an instruction-level dataflow graph and produces an approximate schedule that includes data transfers decoupled from computation, minimizes off-chip data transfers, and achieves good parallelism. This requires solving an interdependent problem: when to bring a value into the scratchpad and which one to replace depends on the computation schedule; and to prevent stalls, the computation schedule depends on which values are in the scratchpad. To solve this problem, this scheduler uses a simplified model of the machine: it does not consider on-chip data movement, and simply treats all functional units as being directly connected to the scratchpad. % The scheduler is greedy, scheduling one instruction at a time. It considers instructions ready if their inputs are available in the scratchpad, and follows instruction priority among ready ones. To schedule loads, we assign each load a priority \begin{equation*} p(\text{load}) = \max \{ p(u) | u \in users(\text{load})\}, \end{equation*} then greedily issue loads as bandwidth becomes available. When issuing an instruction, we must ensure that there is space to store its result. We can often replace a dead value. % When no such value exists, we evict the value with the furthest expected time to reuse. We estimate time to reuse as the maximum priority among unissued users of the value. This approximates Belady's optimal replacement policy~\cite{belady1966study}. Evictions of dirty data add stores to the dataflow graph. When evicting a value, we add spill (either dirty or clean) and fill instructions to our dataflow graph. \subsection{Cycle-level scheduling} Finally, the cycle-level scheduler takes in the data movement schedule produced by the previous phase, and schedules all operations for all components considering all resource constraints and data dependences. This phase distributes computation across clusters and manages their register files and all on-chip transfers. Importantly, this scheduler is fully constrained by its input schedule's off-chip data movement. It does not add loads or stores in this stage, but it does move loads to their earliest possible issue cycle to avoid stalls on missing operands. All resource hazards are resolved by stalling. In practice, we find that this separation of scheduling into data movement and instruction scheduling produces good schedules in reasonable compilation times. This stage works by iterating through all instructions in the order produced by the previous compiler phase (\autoref{sec:datatransfers}) and determining the minimum cycle at which all required on-chip resources are available. We consider the availability of off-chip bandwidth, scratchpad space, register file space, functional units, and ports. During this final compiler pass, we finally account for store bandwidth, scheduling stores (which result from spills) as needed. In practice, we find that this does not hurt our performance much, as stores are infrequent across most of our benchmarks due to our global schedule and replacement policy design. After the final schedule is generated, we validate it by simulating it forward to ensure that no clobbers or resource usage violations occur. It is important to note that because our schedules are fully static, our scheduler also doubles as a performance measurement tool. As illustrated in \autoref{fig:compilerOverview}, the compiler takes in an architecture description file detailing a particular configuration of F1\xspace. This flexibility allows us to conduct design space explorations very quickly (\autoref{sec:scalability}). \section{Functional Units} \label{sec:FUs} In this section, we describe F1\xspace's novel functional units. These include the first vectorized automorphism unit (\autoref{sec:automorphism}), the first fully-pipelined flexible NTT unit (\autoref{sec:fourStepNTT}), and a new simplified modular multiplier adapted to FHE (\autoref{sec:modMult}). \subsection{Automorphism unit}\label{sec:automorphism} Because F1\xspace uses $E$ vector lanes, each residue polynomial is stored and processed as $G$ groups, or \emph{chunks}, of $E$ elements each ($N=G\cdot E$). An automorphism $\sigma_k$ maps the element at index $i$ to index $ki \textrm{ mod } N$; there are $N$ automorphisms total, two for each odd $k < N$ (\autoref{sec:fhe_operation}). The key challenge in designing an automorphism unit is that these permutations are hard to vectorize: we would like this unit to consume and produce $E=$128 elements/cycle, but the vectors are much longer, with $N$ up to 16\,K, and elements are permuted across different chunks. Moreover, we must support variable $N$ \emph{and} all automorphisms. Standard solutions fail: a 16\,K$\times$16\,K crossbar is much too large; a scalar approach, like reading elements in sequence from an SRAM, is too slow (taking $N$ cycles); and using banks of SRAM to increase throughput runs into frequent bank conflicts: each automorphism ``spreads''~elements with a different stride, so regardless of the \mbox{banking} scheme, some automorphisms will map many consecutive elements to the~same~bank. \figAutomorphism We contribute a new insight that makes vectorizing automorphisms simple: if we interpret a residue polynomial as a $G \times E$ matrix, an automorphism can always be decomposed into two independent \emph{column} and \emph{row permutations}. If we transpose this matrix, both column and row permutations can be applied \emph{in chunks of $E$ elements}. \autoref{fig:automorphism} shows an example of how automorphism $\sigma_3$ is applied to a residue polynomial with $N=16$ and $E=4$ elements/cycle. Note how the permute column and row operations are local to each $4$-element chunk. Other $\sigma_k$ induce different permutations, but with the same row/column structure. \figautfu Our automorphism unit, shown in \autoref{fig:aut_fu}, uses this insight to be both vectorized (consuming $E=128$ elements/cycle) and fully pipelined. Given a residue polynomial of $N=G\cdot E$ elements, the automorphism unit first applies the column permutation to each $E$-element input. Then, it feeds this to a \emph{transpose unit} that reads in the whole residue polynomial interpreting it as a $G\times E$ matrix, and produces its transpose $E\times G$. The transpose unit outputs $E$ elements per cycle (outputting multiple rows per cycle when $G < E$). Row permutations are applied to each $E$-element chunk, and the reverse transpose is applied. Further, we decompose both the row and column permutations into a pipeline of sub-permutations that are \textit{fixed in hardware}, with each sub-permutation either applied or bypassed based on simple control logic; this avoids using crossbars for the $E$-element permute row and column operations. \paragraph{Transpose unit:} Our \textit{quadrant-swap transpose} unit transposes an $E \times E$ (e.g., $128\times 128$) matrix by recursively decomposing it into quadrants and exploiting the identity \begin{equation*} \left[ \begin{array}{c|c} \texttt{A} & \texttt{B}\\ \hline \texttt{C} & \texttt{D} \end{array}\right]^{\textrm{T}} = \left[ \begin{array}{c|c} \texttt{A}^{\textrm{T}} & \texttt{C}^{\textrm{T}} \\ \hline \texttt{B}^{\textrm{T}} & \texttt{D}^{\textrm{T}} \end{array}\right]. \end{equation*} The basic building block is a $K \times K$ \textit{quadrant-swap} unit, which swaps quadrants \texttt{B} and \texttt{C}, as shown in \autoref{fig:quadrantSwap}(left). Operationally, the quadrant swap procedure consists of three steps, each taking $K/2$ cycles: \begin{enumerate} \item Cycle \texttt{i} in the first step reads \texttt{A[i]} and \texttt{C[i]} and stores them in \texttt{top[i]} and \texttt{bottom[i]}, respectively. \item Cycle \texttt{i} in the second step reads \texttt{B[i]} and \texttt{D[i]}. The unit activates the first swap MUX and the bypass line, thus storing \texttt{D[i]} in \texttt{top[i]} and outputing \texttt{A[i]} (by reading from \texttt{top[i]}) and \texttt{B[i]} via the bypass line. \item Cycle \texttt{i} in the third step outputs \texttt{D[i]} and \texttt{C[i]} by reading from \texttt{top[i]} and \texttt{bottom[i]}, respectively. The second swap MUX is activated so that \texttt{C[i]} is on top. \end{enumerate} Note that step $3$ for one input can be done in parallel with step $1$ for the next, so the unit is \emph{fully pipelined}. \figQuadrantSwap The transpose is implemented by a full $E \times E$ quadrant-swap followed by $\log_2E$ layers of smaller transpose units to recursively transpose \texttt{A}, \texttt{B}, \texttt{C}, and \texttt{D}. \autoref{fig:quadrantSwap} (right) shows an implementation for $E=8$. Finally, by selectively bypassing some of the initial quadrant swaps, this transpose unit also works for all values of $N$ ($N=G\times E$ with power-of-2 $G < E$). Prior work has implemented transpose units for signal-processing applications, either using registers~\cite{wang2018pipelined,zhang2020novel} or with custom SRAM designs~\cite{shang2014single}. Our design has three advantages over prior work: it uses standard SRAM memory, so it is dense without requiring complex custom SRAMs; it is fully pipelined; and it works for a wide range of dimensions. \subsection{Four-step NTT unit}\label{sec:fourStepNTT} There are many ways to implement NTTs in hardware: an NTT is like an FFT~\cite{cooley:moc65:algorithm} but with a butterfly that uses modular multipliers. We implement $N$-element NTTs (from 1K to 16K) as a composition of smaller $E$=128-element NTTs, since implementing a full 16K-element NTT datapath is prohibitive. The challenge is that standard approaches result in memory access patterns that are hard to vectorize. \figFourStepNTT To that end, we use the \textit{four-step variant} of the FFT algorithm~\cite{bailey:supercomputing89:FFTs}, which adds an extra multiplication to produce a vector-friendly decomposition. \autoref{fig:fourStepNTT} illustrates our four-step NTT pipeline for $E=4$; we use the same structure with $E=128$. The unit is fully pipelined and consumes $E$ elements per cycle. To compute an $N=E\times E$ NTT, the unit first computes an $E$-point NTT on each $E$-element group, multiplies each group with twiddles, transposes the $E$ groups, and computes another $E$-element NTT on each transpose. The same NTT unit implements the inverse NTT by storing multiplicative factors (\textit{twiddles}) required for both forward and inverse NTTs in a small \textit{twiddle SRAM}. Crucially, we are able to support all values of $N$ using a single four-step NTT pipeline by conditionally bypassing layers in the second NTT butterfly. We use the same transpose unit implementation as with automorphisms. Our four-step pipeline supports negacyclic NTTs (NCNs), which are more efficient than standard non-negacyclic NTTs (that would require padding, \autoref{sec:algoInsights}). Specifically, we extend prior work~\cite{poppelmann2015high,roy2014compact,lyubashevsky:tact10:ideal} in order to support \emph{both} forward and inverse NCNs using the same hardware as for the standard NTT. Namely, prior work shows how to either \emph{(1)} perform a forward NCN via a standard decimation-in-time (DIT) NTT pipeline, or \emph{(2)} perform an inverse NCN via a standard decimation-in-frequency (DIF) NTT pipeline. The DIF and DIT NTT variants use different hardware; therefore, this approach requires separate pipelines for forward and inverse NCNs. Prior work~\cite{lyubashevsky:tact10:ideal} has shown that separate pipelines can be avoided by adding a multiplier either before or after the NTT: doing an \emph{inverse} NCN using a \emph{DIT} NTT requires a multiplier unit \emph{after} the NTT, while doing a \emph{forward} NCN using a \emph{DIF} NTT requires a multiplier unit \emph{before} the NTT. We now show that \emph{both} the forward and inverse NCN can be done in the same standard four-step NTT pipeline, with \emph{no additional hardware}. This is because the four-step NTT already has a multiplier and two NTTs in its pipeline. We set the first NTT to be decimation-in-time and the second to be decimation-in-frequency (\autoref{fig:fourStepNTT}). To do a forward NTT, we use the forward NCN implementation via DIT NTT for the first NTT; we modify the contents of the Twiddle SRAM so that the multiplier does the pre-multiplication necessary to implement a forward NCN in the second NTT (which is DIF and thus requires the pre-multiplication). Conversely, to do an inverse NTT, we modify the Twiddle SRAM contents to do the post\hyp{}mul\-ti\-pli\-ca\-tion necessary to implement an inverse NCN in the first NTT (which is DIT); and we use the inverse NCN imple\-men\-ta\-tion via DIF NTT for the second NTT. The NTT unit is large: each of the 128-element NTTs requires $E(\log (E)-1)/2$=384 multipliers, and the full unit uses 896 multipliers. But its high throughput improves performance over many low-throughput NTTs (\autoref{sec:evaluation}). % This is the first implementation of a fully-pipelined four-step NTT unit, improving NTT performance by 1,600$\times$ over the state of the art (\autoref{sec:perf}). \subsection{Optimized modular multiplier}\label{sec:modMult} \tblModMult Modular multiplication computes $a\cdot b \textrm{ mod } q$. This is the most expensive and frequent operation. Therefore, improvements to the modular multiplier have an almost linear impact on the computational capabilities of an FHE accelerator. Prior work~\cite{mert:euromicro19:design} recognized that a Montgomery multiplier~\cite{montgomery:mom85:modular} within NTTs can be improved by leveraging the fact that the possible values of modulus $q$ are restricted by the number of elements the NTT is applied to. We notice that if we only select moduli $q_i$, such that $q_i = -1 \textrm{ mod } 2^{16}$, we can remove a mutliplier stage from~\cite{mert:euromicro19:design}; this reduces area by 19\% and power by 30\% (\autoref{tbl:modMult}). The additional restriction on $q$ is acceptable because FHE requires at most 10s of moduli~\cite{gentry:crypto2012:homomorphic}, and our approach allows for 6,186~prime~moduli. \section{F1\xspace Implementation} \label{sec:implementation} We have implemented F1\xspace's components in RTL, and synthesize them in a commercial 14/12nm process using state-of-the-art tools. These include a commercial SRAM compiler that we use for scratchpad and register file banks. We use a dual-frequency design: most components run at 1\,GHz, but memories (register files and scratchpads) run double-pumped at 2\,GHz. Memories meet this frequency easily and this enables using single-ported SRAMs while serving up to two accesses per cycle. By keeping most of the logic at 1\,GHz, we achieve higher energy efficiency. We explored several non-blocking on-chip networks (Clos, Benes, and crossbars). We use 3 16$\times$16 bit-sliced crossbars~\cite{passas:tocaid12:crossbar} (scratch\-pad$\rightarrow$cluster, cluster$\rightarrow$scratchpad, and cluster$\rightarrow$cluster). % \autoref{tbl:GF12} shows a breakdown of area by component, as well as the area of our F1\xspace configuration, 151.4\,mm$^2$. FUs take 42\% of the area, with 31.7\% going to memory, 6.6\% to the on-chip network, and 19.7\% to the two HBM2 PHYs. We assume 512\,GB/s bandwidth per PHY; this is similar to the NVIDIA A100 GPU~\cite{choquette2021nvidia}, which has 2.4\,TB/s with 6 HBM2E PHYs~\cite{nvidiadgx}. We use prior work to estimate HBM2 PHY area~\cite{rambuswhite, dasgupta20208} and power~\cite{rambuswhite, ge2011design}. This design is constrained by memory bandwidth: though it has 1\,TB/s of bandwidth, the on-chip network's bandwidth is 24\,TB/s, and the aggregate bandwidth between RFs and FUs is 128\,TB/s. This is why maximizing reuse is crucial. \section{Experimental Methodology} \paragraph{Modeled system:} We evaluate our F1\xspace implementation from \autoref{sec:implementation}. We use a cycle-accurate simulator to execute F1\xspace programs. Because the architecture is static, this is very different from conventional simulators, and acts more as a checker: it runs the instruction stream at each component and verifies that latencies are as expected and there are no missed dependences or structural hazards. We use activity-level energies from RTL synthesis to produce energy breakdowns. \paragraph{Benchmarks:} We use several FHE programs to evaluate F1\xspace. % All programs come from state-of-the-art software implementations, which we port to F1\xspace: \subparagraph{Logistic regression} uses the HELR algorithm~\cite{han:aaai19:logistic}, which is based on CKKS. We compute a single batch of logistic regression training with up to $256$ features, and $256$ samples per batch, starting at computational depth $L = 16$; this is equivalent to the first batch of HELR's MNIST workload. This computation features % ciphertexts with large $\log Q$ ($L = 14,15,16$), so it needs careful data orchestration to run efficiently. \subparagraph{Neural network} benchmarks come from Low Latency CryptoNets (LoLa)~\cite{brutzkus:icml19:low}. This work uses B/FV, an FHE scheme that F1\xspace does not support, so we use CKKS instead. We run two neural networks: LoLa-MNIST is a simple, LeNet-style network used on the MNIST dataset~\cite{lecunn:ieee98:gradient-document}, while LoLa-CIFAR is a much larger 6-layer network (similar in computation to MobileNet v3~\cite{howard2019searching}) used on the CIFAR-10 dataset~\cite{cifar10}. LoLa-MNIST includes two variants with unencrypted and encrypted weights; LoLa-CIFAR is available only with unencrypted weights. These three benchmarks use relatively low $L$ values (their starting $L$ values are 4, 6, and 8, respectively), so they are less memory-bound. They also feature frequent automorphisms, showing the need for a fast automorphism~unit. \tblGF % \subparagraph{DB Lookup} is adapted from HELib's \texttt{BGV\_country\_db\_lookup}~\cite{helib:db-lookup}. A BGV-encrypted query string is used to traverse an encrypted key-value store and return the corresponding value. The original implementation uses a low security level for speed of demonstration, but in our version, we implement it at $L=$17, $N=$16K for realism. We also parallelize the CPU version so it can effectively use all available cores. DB Lookup is both deep and wide, so running it on F1\xspace incurs substantial off-chip data movement. \addtocounter{table}{1} \tblMicrobenchmark \subparagraph{Bootstrapping:} We evaluate bootstrapping benchmarks for BGV and CKKS. Bootstrapping takes an $L=1$ ciphertext with an exhausted noise budget and refreshes it by bringing it up to a chosen top value of $L=L_{max}$, then performing the bootstrapping computation to eventually obtain a usable ciphertext at a lower depth (e.g., $L_{max} - 15$ for BGV). For BGV, we use Sheriff and Peikert's algorithm~\cite{alperin:crypto13:practical} for non-packed BGV boot\-strap\-ping, with $L_{max} = 24$. This is a particularly challenging benchmark because it features computations at large values of $L$. This exercises the scheduler's algorithmic choice component, which selects the right key-switch method to balance computation and data movement. For CKKS, we use non-packed CKKS bootstrapping from HEA\-AN~\cite{cheon:eurocrypt2018:bootstrapping}, also with $L_{max} = 24$. CKKS bootstrapping has many fewer ciphertext multiplications than BGV, greatly reducing reuse opportunities for key-switch hints. \paragraph{Baseline systems:} We compare F1\xspace with a CPU system running the baseline programs (a 4-core, 8-thread, 3.5\,GHz Xeon E3-1240v5). Since prior accelerators do not support full programs, we also include microbenchmarks of single operations and compare against HEAX~\cite{riazi:asplos20:heax}, the fastest prior accelerator. \section{Evaluation}\label{sec:evaluation} \subsection{Performance}\label{sec:perf} \addtocounter{table}{-2} \tblBenchmark \addtocounter{table}{1} \paragraph{Benchmarks:} \autoref{tbl:benchmark} compares the performance of F1\xspace and the CPU on full benchmarks. It reports execution time in milliseconds for each program (lower is better), and F1\xspace's speedup over the CPU (higher is better). F1\xspace achieves dramatic speedups, from 1,195$\times$ to 17,412$\times$ (5,432$\times$ gmean). CKKS bootstrapping has the lowest speedups as it's highly memory-bound; other speedups are within a relatively narrow band, as compute and memory traffic are more balanced. These speedups greatly expand the applicability of FHE. Consider deep learning: in software, even the simple LoLa-MNIST network takes seconds per inference, and a single inference on the more realistic LoLa-CIFAR network takes \emph{20 minutes}. F1\xspace brings this down to 241 \emph{milliseconds}, making real-time deep learning inference practical: when offloading inferences to a server, this time is comparable to the roundtrip latency between server and client. \paragraph{Microbenchmarks:} \autoref{tbl:microbenchmark} compares the performance of F1\xspace, the CPU, and HEAX$_\sigma$ on four microbenchmarks: the basic NTT and automorphism operations on a single ciphertext, and homomorphic multiplication and permutation (which uses automorphisms). We report three typical sets of parameters. We use microbenchmarks to compare against prior accelerators, in particular HEAX. But prior accelerators do not implement automorphisms, so we extend each HEAX key-switching pipeline with an SRAM-based, scalar automorphism unit. We call this extension HEAX$_\sigma$. \autoref{tbl:microbenchmark} shows that F1\xspace achieves large speedups over HEAX$_\sigma$, ranging from 172$\times$ to 1,866$\times$. Moreover, F1\xspace's speedups over the CPU are even larger than in full benchmarks. This is because microbenchmarks are pure compute, and thus miss the data movement bottlenecks of FHE programs. \subsection{Architectural analysis} To gain more insights into these results, we now analyze F1\xspace's data movement, power consumption, and compute. \paragraph{Data movement:} \autoref{fig:dataMovement} shows a breakdown of off-chip memory traffic across data types: key-switch hints (KSH), inputs/outputs, and intermediate values. KSH and input/output traffic is broken into compulsory and non-compulsory (i.e., caused by limited scratchpad capacity). Intermediates, which are always non-compulsory, are classified as loads or stores. \autoref{fig:dataMovement} shows that key-switch hints dominate in high-depth workloads (LogReg, DB Lookup, and bootstrapping), taking up to 94\% of traffic. Key-switch hints are also significant in the LoLa-MNIST variants. This shows why scheduling should prioritize them. Second, due our scheduler design, F1\xspace approaches compulsory traffic for most benchmarks, with non\hyp{}compulsory accesses adding only 5-18\% of traffic. The exception is LoLa-CIFAR, where intermediates consume 75\% of traffic. LoLa-CIFAR has very high reuse of key-switch hints, and exploiting it requires spilling intermediate ciphertexts. \figDataMovement \figOpBreakdown \paragraph{Power consumption:} \autoref{fig:power} reports average power for each benchmark, broken down by component. This breakdown also includes off-chip memory power (\autoref{tbl:GF12} only included the on-chip component). Results show reasonable power consumption for an accelerator card. Overall, computation consumes 20-30\% of power, and data movement dominates. \paragraph{Utilization over time:} F1\xspace's average FU utilization is about 30\%. However, this doesn't mean that fewer FUs could achieve the same performance: benchmarks have memory\hyp{}bound phases that weigh down average FU utilization. To see this, \autoref{fig:opBreakdown} shows a breakdown of FU utilization over time for LoLa-MNIST Plaintext Weights. \autoref{fig:opBreakdown} also shows off-chip bandwidth utilization over time (black line). The program is initially memory-bound, and few FUs are active. As the memory-bound phase ends, compute intensity grows, utilizing a balanced mix of the available FUs. Finally, due to decoupled execution, when memory bandwidth utilization peaks again, F1\xspace can maintain high compute intensity. The highest FU utilization happens at the end of the benchmark and is caused by processing the final (fully connected) layer, which is highly parallel and already has all inputs available on-chip. \subsection{Sensitivity studies} \label{sec:sensitivity} \tblSensitivity To understand the impact of our FUs and scheduling algorithms, we evaluate F1\xspace variants without them. \autoref{tbl:sensitivity} reports the \emph{slowdown (higher is worse)} of F1\xspace with: \emph{(1)} low\hyp{}throughput NTT FUs that follow the same design as HEAX (processing one stage of NTT butterflies per cycle); % \emph{(2)} low\hyp{}throughput automorphism FUs using a serial SRAM memory, and \emph{(3)} Goodman's register-pressure-aware scheduler~\cite{goodman:ics1988:code}. For the FU experiments, our goal is to show the importance of having high-throughput units. Therefore, the low-throughput variants use many more (NTT or automorphism) FUs, so that aggregate throughput across all FUs in the system is the same. Also, the scheduler accounts for the characteristics of these FUs. In both cases, performance drops substantially, by gmean 2.6$\times$ and 3.3$\times$. This is because achieving high throughput requires excessive parallelism, which hinders data movement, forcing the scheduler to balance both. Finally, the scheduler experiment uses register-pressure-aware scheduling~\cite{goodman:ics1988:code} as the off-chip data movement scheduler instead, operating on the full dataflow graph. This algorithm was proposed for VLIW processors and register files; we apply it to the larger scratchpad. The large slowdowns show that prior capacity-aware schedulers are ineffective on F1\xspace. \figConfigs \subsection{Scalability} \label{sec:scalability} Finally, we study how F1\xspace's performance changes with its area budget: we sweep the number of compute clusters, scratchpad banks, HBM controllers, and network topology to find the most efficient design at each area. \autoref{fig:pareto} shows this Pareto frontier, with area in the $x$-axis and performance in the $y$-axis. This curve shows that, as F1\xspace scales, it uses resources efficiently: performance grows about linearly through a large range of areas. \subsection{Functional Simulation}\label{sec:functional_simulation} Here we describe our software simulation efforts for F1. Currently, we have a functional simulator written in C++ on top of Shoup's Number Theory Library.\footnote{\url{https://libntl.org/}} This simulator measures \emph{input-output correctness} and \emph{calls to functional units} throughout a computation. The underlying algorithms are not the same as F1's functional units, but they match common methods used in software (i.e., HElib's algorithms). This allows one to verify correctness of FHE algorithms and to create a dataflow graph. The simulator has all our functional units implemented in software: modular additions, modular multiplications, automorphisms, and NTTs. We then build ciphertext-level operations by calls to these algorithms: ciphertext addition, ciphertext multiplication, rotations, modulus-switching, and a simplified bootstrapping procedure, for non-packed ciphertexts. Our functional simulator works for the parameter ranges discussed throughout the paper: polynomial/ring dimension $N$ as an arbitrary power of 2 (usually 1024-16384 for security) and RNS moduli where each is an NTT-friendly prime, $q_i \equiv 1 \bmod 2N$, roughly 24 bits long. Further, each moduli is sampled randomly, similarly to other FHE RNS implementations. \section{Related Work} \label{sec:related} We now discuss related work not covered so far. \paragraph{FHE accelerators:} Prior work has proposed accelerators for individual FHE operations, but not full FHE computations~\cite{cousins:hpec12:sipher-fpga,cousins:hpec14:fpga-he,cousins:tetc17:fpga-he,doroz:tc15:accelerating-fhe,roy:hpca19:fpga-he,mert:tvlsi20:bfv-accel,migliore:tecs17:he-karatsuba,riazi:asplos20:heax,turan:tc20:heaws}. These designs target FPGAs and rely on a host processor; \autoref{sec:fhe_analysis} discussed their limitations. Early designs accelerated small primitives like NTTs, and were dominated by host-FPGA communication. State-of-the-art accelerators execute a full homomorphic multiplication independently: Roy et al.~\cite{roy:hpca19:fpga-he} accelerate B/FV multiplication by 13$\times$ over a CPU; HEAWS~\cite{turan:tc20:heaws} accelerates B/FV multiplication, and uses it to speed a simple benchmark by 5$\times$; and HEAX~\cite{riazi:asplos20:heax} accelerates CKKS multiplication and key-switching by up to 200$\times$. These designs suffer high data movement (e.g., HEAX does not reuse key-switch hints) and use fixed pipelines with relatively low-throughput FUs. We have shown that accelerating FHE programs requires a different approach: data movement becomes the key constraint, requiring new techniques to extract reuse {across} homomorphic operations; and fixed pipelines cannot support the operations of even a single benchmark. Instead, F1\xspace achieves flexibility and high performance by exploiting wide-vector execution with high-throughput FUs. This lets F1\xspace execute not only full applications, but different FHE schemes. \paragraph{Hybrid HE-MPC accelerators:} Recent work has also proposed ASIC accelerators for some homomorphic encryption primitives in the context of oblivious neural networks~\cite{juvekar2018gazelle,reagen:hpca21:cheetah}. These approaches are very different from FHE: they combine homomorphic encryption with multi-party computation (MPC), executing a single layer of the network at a time and sending intermediates to the client, which computes the final activations. Gazelle~\cite{juvekar2018gazelle} is a low-power ASIC for homomorphic evaluations, and Cheetah~\cite{reagen:hpca21:cheetah} introduces algorithmic optimizations and a large ASIC design that achieves very large speedups over Gazelle. These schemes avoid high-depth FHE programs, so server\hyp{}side homomorphic operations are cheaper. But they are limited by client-side computation and client-server communication: Cheetah and Gazelle use cipher\-texts that are up to $\sim40\times$ small\-er than those used by F1\xspace; however, they re\-quire the client to re\--en\-crypt ciphertexts \textit{every} time they are mul\-ti\-pli\-ed on the server to prevent noise blowup. CHOCO~\cite{vanderhagen:arxiv21:choco} shows that client-side computation costs for HE-MPC are substantial, and when they are accelerated, network latency and throughput overheads dominate (several seconds per DNN inference). By contrast, F1\xspace enables offloading the full inference using FHE, avoiding frequent communication. As a result, a direct comparison between these accelerators and F1 is not possible. F1's hardware also differs substantially from Cheetah and Gazelle. First, Cheetah and Gazelle implement fixed-function pipelines (e.g., for output-stationary DNN inference in Cheetah), whereas F1 is programmable. Second, Cheetah, like HEAX, uses many FUs with relatively low throughput, whereas F1 uses few high-throughput units (e.g., 40$\times$ faster NTTs). Cheetah's approach makes sense for their small ciphertexts, but as we have seen (\autoref{sec:sensitivity}), it is impractical for FHE. \paragraph{GPU acceleration:} Finally, prior work has also used GPUs to accelerate different FHE schemes, including GH~\cite{wang:hpec12:fhe-gpu,wang:tc13:fhe-gpu}, BGV~\cite{wang:iscas14:leveled-gpu}, and B/FV~\cite{al:emerging19:implementation}. Though GPUs have plentiful compute and bandwidth, they lack modular arithmetic, their pure data-parallel approach makes non-element-wise operations like NTTs expensive, and their small on-chip storage adds data movement. As a result, GPUs achieve only modest performance gains. For instance, Badawi et al.~\cite{al:emerging19:implementation} accelerate B/FV multiplication using GPUs, and achieve speedups of around 10$\times$ to 100$\times$ over single-thread CPU execution (and thus commensurately lower speedups over multicore CPUs, as FHE operations parallelize well). \section{Conclusion} FHE has the potential to enable computation offloading with guaranteed security. But FHE's high computation overheads currently limit its applicability to narrow cases (simple computations where privacy is paramount). F1\xspace tackles this challenge, accelerating full FHE computations by over 3-4 orders of magnitude. This enables new use cases for FHE, like secure real-time deep learning inference. F1\xspace is the first FHE accelerator that is programmable, i.e., capable of executing full FHE programs. In contrast to prior accelerators, which build fixed pipelines tailored to specific FHE schemes and parameters, F1\xspace introduces a more effective design approach: it accelerates the \emph{primitive} computations shared by higher-level operations using novel high\hyp{}throughput functional units, and hardware and compiler are co-designed to minimize data movement, the key bottleneck. % This flexibility makes F1\xspace broadly useful: the same hardware can accelerate all operations within a program, arbitrary FHE programs, and even multiple FHE schemes. In short, our key contribution is to show that, for FHE, we can achieve ASIC-level performance without sacrificing programmability. \section*{Acknowledgments} We thank the anonymous reviewers, Maleen Abeydeera, Hyun Ryong Lee, Quan Nguyen, Yifan Yang, Victor Ying, Guowei Zhang, and Joel Emer for feedback on the paper; Tutu Ajayi, Austin Rovinski, and Peter Li for help with the HDL toolchain setup; Shai Halevi, Wei Dai, Olli Saarikivi, and Madan Musuvathi for email correspondence. This research was developed with funding from the Defense Advanced Research Projects Agency (DARPA) under contract number Contract No. HR0011-21-C-0035. The views, opinions and/or findings expressed are those of the author and should not be interpreted as representing the official views or policies of the Department of Defense or the U.S. Government. Nikola Samardzic was supported by the Jae S. and Kyuho Lim Graduate Fellowship at MIT. \bibliographystyle{IEEEtranS}
1,108,101,565,831
arxiv
\section{Introduction} The Galton-Watson (GW) process is a basic stochastic model for the generation size for a population of reproducing particles, see \cite{AN}. Slightly modifying the framework of \cite{Be}, we define a GW process in terms of an infinite system of particles uniquely labeled by pairs $(x,t)\in\mathbb N\times\mathbb Z$, where $t$ refers to the generation number and $x$ is the {\it rank} of the particle within this generation. Given a set of independent and identically distributed random variables \begin{equation}\label{ut} \big\{u_{t}(x)\big\}_{(x,t)\in\mathbb N\times\mathbb Z} \end{equation} taking values in $\mathbb N_0=\{0\}\cup\mathbb N$, a GW process stemming from $Z_a$ particles at time $a\in \mathbb Z$, is the Markov chain $\{Z_t\}_{t\ge a}$ characterized by the branching property \begin{equation}\label{Yt} Z_{t+1}=\sum_{x=1}^{Z_t} u_t(x), \end{equation} with $u_t(x)$ representing the offspring number of the particle $(t,x)$. Relation \eqref{Yt} induces the following rank-inheritance rules: (i) each particle $(x,t+1)$ has a unique parent $(x',t)$, (ii) if $x<y$, then $x'<y'$, where $(x',t)$ and $(y',t)$ are the parents of $(x,t+1)$ and $(y,t+1)$. \noindent For example, if $u_t(1)=k$ is positive, then $k$ children of the rank 1 particle get the ranks $1,\ldots,k$ among the particles born at time $t+1$. The ranks of particles play no role in the standard GW setting, however, they were used in \cite{Be} studying the GW processes with neutral mutations. This paper introduces a new modification of the GW model by allowing the rank of a particle to determine its reproduction law. In a general rank-dependent GW setting, the independent offspring numbers $u_{t}(x)$ have distributions that vary over the birth times $t$ and particle ranks $x$. To illustrate, consider a linear-fractional reproduction law \begin{align*} \rE s^{u_t(x)}&=1-q_t(x)+q_t(x){p_ts\over 1-(1-p_t)s}, \quad p_t\in(0,1], \quad q_t(x)=\left\{ \begin{array}{cl} 1,&\text{ if }x=1, 3,\ldots, \\ 0, & \text{ if }x=2,4,\ldots, \end{array} \right. \end{align*} where the dependence on the particle rank takes effect via $q_t(x)$, the probability of having non-zero offspring. Here, the particles with odd ranks always produce $k\ge1$ offspring with probability $(1-p_t)^{k-1}p_t$, while the particles of even ranks have no offspring. Notice that the corresponding rank-dependent GW process can not be treated as a two-type GW process, since the number of even-ranked children for the rank 3 particle depends on the number children of the rank 1 particle. The standard GW process has many extensions, usually motivated by biological applications, see \cite{HJV, KA}. Some of these extensions can be viewed as examples of rank-dependent GW processes, see Section \ref{BD}, where the scope of the rank-dependent GW setting is highlighted by referring to bounded GW processes, GW processes with immigration and emigration, duals to birth-death GW processes in varying environment, as well as GW processes embedded in continuous time linear birth-death processes in varying environment. In particular, if the reproduction law $\rE s^{u_t(x)}=f_{t}$ is not influenced by the particle rank, then the rank-dependent GW process is a GW process in varying environment satisfying \begin{equation* \rE (s^{Z_t}|Z_a=z)= (f_{a}\circ \ldots\circ f_{t-1}(s))^z, \end{equation*} where $f\circ g(s)$ stands for $f(g(s))$, see \cite{Ja}, as well as \cite{BS,BH,Ker} for recent treatments involving this model. In the rank-dependent GW setting, the last relation does hold in general, making analysis more complicated. The main results of the paper are collected in Section \ref{Sdu}. Our Theorem \ref{proM} considers the rank-dependent GW processes along with their pathwise dual processes, whose definition in Section \ref{se} is based on Siegmund's duality, see \cite{JK,Si,StS}. It shows in particular, that the dual to the dual of a rank-dependent GW process is a shifted copy of the original rank-dependent GW process. In the literature on dual processes, the common setting involves time-homogeneous Markov processes. A notable exception is \cite{AS} treating a class of stationary processes. Our approach handles time-inhomogeneous Markov chains, and can even be adapted to the non-Markov setting, when for example, the offspring number $u_t(x)$ depends on the offspring number $u_{t-1}(x')$ of the parent. The infinite particle system framework allows for an illuminating graphical representation of a system of coupled rank-dependent GW processes and their pathwise duals visualizing their trajectories as forest graphs. A process dual to an asexual reproduction model, like GW process or Wright-Fisher model, is usually interpreted in the terms of a coalescent model \cite{GH, Mo}. Somewhat counter-intuitively, our graphical representation says that the dual to a branching process is again a form of the branching process with dependencies, see Figure \ref{Fig1}. The graphical representation works also for the primary reproduction models with fixed population size, like the Wright-Fisher model. One of the examples in Section \ref{BD} shows that, even with a standard GW process, the dual Markov chain is not necessarily a rank-dependent GW process, because the dual offspring numbers become dependent on each other. An interesting open problem is to characterize the class of rank-dependent GW processes, whose dual Markov chain is itself a rank-dependent GW process. A simpler problem is to characterize the class of GW processes, whose dual Markov chain is itself a rank-dependent GW process. We obtain two results addressing the latter question. Consider the dual of the GW reproduction law. Proposition \ref{JJ} says that the marginal dual offspring distribution is always linear-fractional. Theorem \ref{th} demonstrates that the dual process is GW with an eternal particle if and only if that the primary reproduction law is itself linear-fractional. Yet another example in Section \ref{BD} demonstrates that the dual to a GW process might be a rank-dependent GW process which is not a GW with an eternal particle. Section \ref{Spro} contains the proofs of the results stated in Section \ref{Sdu}. \begin{figure*}[t] \centering \begin{subfigure}[t]{0.32\textwidth} \begin{tikzpicture}[scale=0.6] \draw[step=1cm,dotted, thin] (-0,0) grid (7,7); \node[black] () at (-0.5,0){$-3$}; \node[black] () at (-0.5,1){$-2$}; \node[black] () at (-0.5,2){$-1$}; \node[black] () at (-0.3,3){$0$}; \node[black] () at (-0.3,4){$1$}; \node[black] () at (-0.3,5){$2$}; \node[black] () at (-0.3,6){$3$}; \node[black] () at (-0.3,7){$4$}; \node[black] () at (0,-0.4){$0$}; \node[black] () at (1,-0.4){$1$}; \node[black] () at (2,-0.4){$2$}; \node[black] () at (3,-0.4){$3$}; \node[black] () at (4,-0.4){$4$}; \node[black] () at (5,-0.4){$5$}; \node[black] () at (6,-0.4){$6$}; \node[black] () at (7,-0.4){$7$}; \draw [line width=0.2mm,red] (0,0) -- (0,7); \draw [line width=0.2mm,red] (1,7.03) -- (1,6.97); \draw [line width=0.2mm,red] (2,7.03) -- (2,6.97); \draw [line width=0.2mm,red] (3,7.03) -- (3,6.97); \draw [line width=0.2mm,red] (6,7.03) -- (6,6.97); \draw [line width=0.2mm,red] (7,7.03) -- (7,6.97); \draw [line width=0.2mm,red] (5,7)-- (6,6)-- (6,5); \draw [line width=0.2mm,red] (4,7)-- (5,6); \draw [line width=0.2mm,red] (2,7) --(4,6)--(4,5); \draw [line width=0.2mm,red] (2,7)-- (3,6)-- (3,5); \draw [line width=0.2mm,red] (2,7)-- (2,6); \draw [line width=0.2mm,red] (0,7)-- (1,6); \draw [line width=0.2mm,red] (7,6)-- (7,5); \draw [line width=0.2mm,red] (6,6)-- (5,5); \draw [line width=0.2mm,red] (3,6)-- (2,5); \draw [line width=0.2mm,red] (3,6)-- (1,5)-- (3,4); \draw [line width=0.2mm,red] (6,5)-- (7,4)-- (7,3)-- (7,2); \draw [line width=0.2mm,red] (4,5)-- (6,4)-- (5,3); \draw [line width=0.2mm,red] (4,5)-- (5,4); \draw [line width=0.2mm,red] (3,5)--(4,4)--(4,3); \draw [line width=0.2mm,red] (1,5)-- (2,4)-- (2,3); \draw [line width=0.2mm,red] (0,5)-- (1,4); \draw [line width=0.2mm,red] (7,4)-- (6,3)-- (6,2); \draw [line width=0.2mm,red] (4,4)-- (3,3)-- (3,2); \draw [line width=0.2mm,red] (0,4)--(1,3); \draw [line width=0.2mm,red] (5,3)-- (5,2); \draw [line width=0.2mm,red] (5,3)-- (4,2); \draw [line width=0.2mm,red] (3,3)-- (2,2)-- (3,1); \draw [line width=0.2mm,red] (2,3)-- (1,2); \draw [line width=0.2mm,red] (2,2)-- (4,1); \draw [line width=0.2mm,red] (4,2)-- (5,1)-- (4,0); \draw [line width=0.2mm,red] (4,2)-- (6,1); \draw [line width=0.2mm,red] (2,2)--(2,1); \draw [line width=0.2mm,red] (0,2)-- (1,1)--(1,0); \draw [line width=0.2mm,red] (7,0.5) -- (6,0); \draw [line width=0.2mm,red] (7,1)-- (5,0); \draw [line width=0.2mm,red] (3,1)-- (3,0); \draw [line width=0.2mm,red] (3,1)-- (2,0); \end{tikzpicture} \caption{} \end{subfigure}% ~ \begin{subfigure}[t]{0.32\textwidth} \begin{tikzpicture}[scale=0.6] \draw[step=1cm,dotted, thin] (0,0) grid (7,7); \draw [line width=0.2mm,black] (3,-0.03) -- (3,0.03); \draw [line width=0.2mm,black] (7,-0.03) -- (7,0.03); \draw [line width=0.2mm,black] (0,0) -- (0,7); \draw [line width=0.2mm,black] (1,0) -- (1,1); \draw [line width=0.2mm,black] (2,0) -- (2,1) -- (1,2); \draw [line width=0.2mm,black] (2,0) -- (3,1); \draw [line width=0.2mm,black] (4,0) -- (4,1); \draw [line width=0.2mm,black] (4,0) -- (5,1) -- (3,2); \draw [line width=0.2mm,black] (5,0) -- (6,1); \draw [line width=0.2mm,black] (5,0) -- (7,1) -- (5,2); \draw [line width=0.2mm,black] (6,0) -- (7,0.5); \draw [line width=0.2mm,black] (2,1) -- (2,2) -- (3,3); \draw [line width=0.2mm,black] (5,1) -- (4,2) -- (4,3); \draw [line width=0.2mm,black] (7,1) -- (6,2) -- (6,3); \draw [line width=0.2mm,black] (7,1) -- (7,2) -- (7,3); \draw [line width=0.2mm,black] (1,2) -- (1,3); \draw [line width=0.2mm,black] (1,2) -- (2,3) -- (1,4); \draw [line width=0.2mm,black] (4,2) -- (5,3) -- (5,4); \draw [line width=0.2mm,black] (2,3) -- (2,4) -- (1,5); \draw [line width=0.2mm,black] (3,3) -- (3,4); \draw [line width=0.2mm,black] (3,3) -- (4,4) -- (2,5); \draw [line width=0.2mm,black] (5,3) -- (6,4); \draw [line width=0.2mm,black] (6,3) -- (7,4) -- (5,5); \draw [line width=0.2mm,black] (4,4) -- (3,5); \draw [line width=0.2mm,black] (5,4) -- (4,5) -- (4,6); \draw [line width=0.2mm,black] (7,4) -- (6,5); \draw [line width=0.2mm,black] (1,5) -- (1,6); \draw [line width=0.2mm,black] (1,5) -- (2,6) -- (1,7); \draw [line width=0.2mm,black] (1,5) -- (3,6); \draw [line width=0.2mm,black] (5,5) -- (5,6) -- (3,7); \draw [line width=0.2mm,black] (5,5) -- (6,6) -- (5,7); \draw [line width=0.2mm,black] (7,5) -- (7,6) -- (6,7); \draw [line width=0.2mm,black] (2,6) -- (2,7); \draw [line width=0.2mm,black] (5,6) -- (4,7); \draw [line width=0.2mm,black] (7,6) -- (7,7); \node[black] () at (-0.5,0){$-3$}; \node[black] () at (-0.5,1){$-2$}; \node[black] () at (-0.5,2){$-1$}; \node[black] () at (-0.3,3){$0$}; \node[black] () at (-0.3,4){$1$}; \node[black] () at (-0.3,5){$2$}; \node[black] () at (-0.3,6){$3$}; \node[black] () at (-0.3,7){$4$}; \node[black] () at (0,-0.4){$0$}; \node[black] () at (1,-0.4){$1$}; \node[black] () at (2,-0.4){$2$}; \node[black] () at (3,-0.4){$3$}; \node[black] () at (4,-0.4){$4$}; \node[black] () at (5,-0.4){$5$}; \node[black] () at (6,-0.4){$6$}; \node[black] () at (7,-0.4){$7$}; \end{tikzpicture} \caption{} \end{subfigure} \begin{subfigure}[t]{0.32\textwidth} \begin{tikzpicture}[scale=0.6] \draw[step=1cm,dotted, thin] (0,0) grid (7,7); \draw [line width=0.2mm,black] (3,-0.03) -- (3,0.03); \draw [line width=0.2mm,black] (7,-0.03) -- (7,0.03); \draw [line width=0.2mm,black] (0,0) -- (0,7); \draw [line width=0.2mm,black] (1,0) -- (1,1); \draw [line width=0.2mm,black] (2,0) -- (2,1) -- (1,2); \draw [line width=0.2mm,black] (2,0) -- (3,1); \draw [line width=0.2mm,black] (4,0) -- (4,1); \draw [line width=0.2mm,black] (4,0) -- (5,1) -- (3,2); \draw [line width=0.2mm,black] (5,0) -- (6,1); \draw [line width=0.2mm,black] (5,0) -- (7,1) -- (5,2); \draw [line width=0.2mm,black] (6,0) -- (7,0.5); \draw [line width=0.2mm,black] (2,1) -- (2,2) -- (3,3); \draw [line width=0.2mm,black] (5,1) -- (4,2) -- (4,3); \draw [line width=0.2mm,black] (7,1) -- (6,2) -- (6,3); \draw [line width=0.2mm,black] (7,1) -- (7,2) -- (7,3); \draw [line width=0.2mm,black] (1,2) -- (1,3); \draw [line width=0.2mm,black] (1,2) -- (2,3) -- (1,4); \draw [line width=0.2mm,black] (4,2) -- (5,3) -- (5,4); \draw [line width=0.2mm,black] (2,3) -- (2,4) -- (1,5); \draw [line width=0.2mm,black] (3,3) -- (3,4); \draw [line width=0.2mm,black] (3,3) -- (4,4) -- (2,5); \draw [line width=0.2mm,black] (5,3) -- (6,4); \draw [line width=0.2mm,black] (6,3) -- (7,4) -- (5,5); \draw [line width=0.2mm,black] (4,4) -- (3,5); \draw [line width=0.2mm,black] (5,4) -- (4,5) -- (4,6); \draw [line width=0.2mm,black] (7,4) -- (6,5); \draw [line width=0.2mm,black] (1,5) -- (1,6); \draw [line width=0.2mm,black] (1,5) -- (2,6) -- (1,7); \draw [line width=0.2mm,black] (1,5) -- (3,6); \draw [line width=0.2mm,black] (5,5) -- (5,6) -- (3,7); \draw [line width=0.2mm,black] (5,5) -- (6,6) -- (5,7); \draw [line width=0.2mm,black] (7,5) -- (7,6) -- (6,7); \draw [line width=0.2mm,black] (2,6) -- (2,7); \draw [line width=0.2mm,black] (5,6) -- (4,7); \draw [line width=0.2mm,black] (7,6) -- (7,7); \node[black] () at (-0.5,0){$-3$}; \node[black] () at (-0.5,1){$-2$}; \node[black] () at (-0.5,2){$-1$}; \node[black] () at (-0.3,3){$0$}; \node[black] () at (-0.3,4){$1$}; \node[black] () at (-0.3,5){$2$}; \node[black] () at (-0.3,6){$3$}; \node[black] () at (-0.3,7){$4$}; \node[black] () at (0,-0.4){$0$}; \node[black] () at (1,-0.4){$1$}; \node[black] () at (2,-0.4){$2$}; \node[black] () at (3,-0.4){$3$}; \node[black] () at (4,-0.4){$4$}; \node[black] () at (5,-0.4){$5$}; \node[black] () at (6,-0.4){$6$}; \node[black] () at (7,-0.4){$7$}; \draw [line width=0.2mm,red] (0.5,0) -- (0.5,7); \draw [line width=0.2mm,red] (1.5,7.03) -- (1.5,6.97); \draw [line width=0.2mm,red] (3.5,7.03) -- (3.5,6.97); \draw [line width=0.2mm,red] (6.5,7.03) -- (6.5,6.97); \draw [line width=0.2mm,red] (5.5,7)-- (6.5,6)-- (6.5,5); \draw [line width=0.2mm,red] (4.5,7)-- (5.5,6); \draw [line width=0.2mm,red] (2.5,7) --(4.5,6)--(4.5,5); \draw [line width=0.2mm,red] (2.5,7)-- (3.5,6)-- (3.5,5); \draw [line width=0.2mm,red] (2.5,7)-- (2.5,6); \draw [line width=0.2mm,red] (0.5,7)-- (1.5,6); \draw [line width=0.2mm,red] (6.5,6)-- (5.5,5); \draw [line width=0.2mm,red] (3.5,6)-- (2.5,5); \draw [line width=0.2mm,red] (3.5,6)-- (1.5,5)-- (3.5,4); \draw [line width=0.2mm,red] (6.5,5)-- (7,4.5); \draw [line width=0.2mm,red] (4.5,5)-- (6.5,4)-- (5.5,3); \draw [line width=0.2mm,red] (4.5,5)-- (5.5,4); \draw [line width=0.2mm,red] (3.5,5)--(4.5,4)--(4.5,3); \draw [line width=0.2mm,red] (1.5,5)-- (2.5,4)-- (2.5,3); \draw [line width=0.2mm,red] (0.5,5)-- (1.5,4); \draw [line width=0.2mm,red] (7,3.5)-- (6.5,3)-- (6.5,2); \draw [line width=0.2mm,red] (4.5,4)-- (3.5,3)-- (3.5,2); \draw [line width=0.2mm,red] (0.5,4)--(1.5,3); \draw [line width=0.2mm,red] (5.5,3)-- (5.5,2); \draw [line width=0.2mm,red] (5.5,3)-- (4.5,2); \draw [line width=0.2mm,red] (3.5,3)-- (2.5,2)-- (3.5,1); \draw [line width=0.2mm,red] (2.5,3)-- (1.5,2); \draw [line width=0.2mm,red] (2.5,2)-- (4.5,1); \draw [line width=0.2mm,red] (4.5,2)-- (5.5,1)-- (4.5,0); \draw [line width=0.2mm,red] (4.5,2)-- (6.5,1); \draw [line width=0.2mm,red] (2.5,2)--(2.5,1); \draw [line width=0.2mm,red] (0.5,2)-- (1.5,1)--(1.5,0); \draw [line width=0.2mm,red] (7,0.25) -- (6.5,0); \draw [line width=0.2mm,red] (7,0.75)-- (5.5,0); \draw [line width=0.2mm,red] (3.5,1)-- (3.5,0); \draw [line width=0.2mm,red] (3.5,1)-- (2.5,0); \end{tikzpicture} \caption{} \end{subfigure} \caption{Graphical representation of the iterated reproduction mappings. } \label{Fig1}\end{figure*} \section{Coupled rank-dependent GW processes and their duals}\label{se} Let $ \Phi_0$ be the class of monotone functions $U:\mathbb N_0\to \mathbb N_0$ such that $U(0)=0$. If $U\in \Phi_0$ and $u(x)=U(x)-U(x-1)$, then $U$ will be called a {\it reproduction mapping} with the offspring numbers $u(x)$, $x\in\mathbb N$. Given a set of independent random variables \eqref{ut}, define a sequence of random reproduction mappings $U_t(x)=\sum_{y=1}^x u_t(y)$, and consider the family of stochastic iterations \begin{equation}\label{zt} U_{a,b}=U_{b-1}\circ U_{b-2}\circ\cdots\circ U_{a}, \quad a<b,\quad U_{a,a}(x)\equiv x. \end{equation} Putting $Z_t=U_{a,t}(z)$, we obtain a time-inhomogeneous Markov chain $\{Z_t\}_{t\ge a}$ satisfying \eqref{Yt}, which will be called a rank-dependent GW process. Moreover, using the system of stochastic iterations \begin{equation}\label{bU} \boldsymbol U=\{U_{a,b}\}_{\infty<a\le b<\infty}, \end{equation} we can define coupled Markov chains $\{U_{a,t}(x)\}_{t\ge a}$ starting at $U_{a,a}(x)=x$ for all possible $a\in\mathbb Z$ and $x\in\mathbb N$. We call \eqref{bU} a rank-dependent GW system with the reproduction law $$f_{t,x}(s)=\rE s^{u_{t}(x)},\quad s\in[0,1],\quad x\in\mathbb N,\quad t\in\mathbb Z.$$ \begin{definition}\label{deg} If $U\in \Phi_0$ and $V=U^-$, where \[ U^-(x)=\min\{y:U(y)\ge x\} \] then $V\in \Phi_0$ will be called the {\it pathwise dual} of the reproduction mapping $U$. \end{definition} As shown in Section \ref{Spro}, Definition \ref{deg} is equivalent to the equality \begin{align} \{(x,y)\in\mathbb N_0^2:V(x)\le y\}= \{(x,y)\in\mathbb N_0^2:U(y)\ge x\},\label{VxU} \end{align} and therefore can be referred to as the pathwise Siegmund duality, see \cite{JK,Si,StS}. \begin{definition}\label{dog} Given a rank-dependent GW system \eqref{bU}, define its time-reverse by \begin{equation* \boldsymbol V=\{V_{b,a}\}_{\infty<a\le b<\infty},\quad V_{b,a}=V_{a}\circ\cdots\circ V_{b-1}, \quad a\le b, \end{equation*} where $V_t=U_t^-$ are the dual reproduction mappings. Putting $\hat U_t=V_{-t-1}$, define the pathwise dual of $\boldsymbol U$ by \begin{equation* \hat{\boldsymbol U}=\{\hat U_{a,b}\}_{\infty<a\le b<\infty},\quad \hat U_{a,b}=\hat U_{b-1}\circ\cdots\circ \hat U_{a}, \quad a\le b, \end{equation*} \end{definition} The trajectories of a rank-dependent GW system and its time-reverse can be represented by forest graphs on the grid of nodes $\mathbb N_0\times \mathbb Z$. As seen on the Figure \ref{Fig1}a, the bottom-up lineages $\{(U_{a,t}(x),t), t\ge a\ $ starting from different levels $a\in\mathbb Z$ and different positions $x\in\mathbb N_0$, merge together into coalescent trees. The resulting graph will be called a {\it dual forest}. On the other hand, as shown on the Figure \ref{Fig1}b, the top-down lineages $\{(V_{b,t}(x),t), t\le b\}$ starting from different levels $b\in\mathbb Z$ and different positions $x\in\mathbb N_0$, build up a graph that we call a {\it primary forest}. Figure \ref{Fig1}c demonstrates that the two forests can be conveniently depicted together after the dual forest is shifted to the right by $\sfrac{1}{2}$. Drawn in this way, the lineages of the primary and dual forests do not cross. The primary forest describes the genealogical trees of the primary rank-dependent GW system. A lineage in the dual tree followed up from the vertex $(z,t)$, delineates a trajectory of the Markov chain \eqref{Yt}. Figure \ref{Fig2} illustrates how the trajectories of $\hat{\boldsymbol U}$ are obtained from those of $\boldsymbol V$ by a vertical flipping. \section{Main results}\label{Sdu} Let reproduction mappings $U,V,\tilde U\in\Phi_0$, be connected as $V=U^-$, $\tilde U=V^-$. The same notation will be also used with the time index $t$. \begin{theorem}\label{proM} Given \eqref{bU}, put $$V_t=U_t^-,\quad \hat U_t=V_{-t-1},\quad \hat V_t=\hat U_t^-,\quad \tilde U_t=\hat V_{-t-1},$$ and alongside the rank-dependent GW system $\boldsymbol U$, consider its time-reverse $\boldsymbol V$ and pathwise dual $\hat {\boldsymbol U}$, see Definition \ref{dog}. Whenever $a\le b$ and $x,y\in\mathbb N_0$, the two events coincide \[\{\hat U_{-b,-a}(x)\le y\}=\{x\le U_{a,b}(y)\}.\] The reproduction mappings $\tilde U_t$ define a rank-dependent GW system $\tilde {\boldsymbol U}$ in a similar way as \eqref{bU} defines the primary rank-dependent GW system $\boldsymbol U$. The $\tilde {\boldsymbol U}$ is the dual of the dual $\hat {\boldsymbol U}$, and is obtained as a simple shifting transform of ${\boldsymbol U}$: \[\tilde U_{a,b}(x)= U_{a,b}(x-1)+1,\quad x\ge1,\quad a\le b.\] \end{theorem} Figure \ref{Fig2} is an illustration of the "picture proof" of Theorem \ref{proM} using the graphical representation. Figure \ref{Fig2}b shows an intermediate step in the transformation of the primary forest on Figure \ref{Fig2}a into the primary forest on Figure \ref{Fig2}c representing the twofold dual rank-dependent GW system $\tilde{\boldsymbol U}$. Since $\hat U_{a,b}=V_{-b,-a}$, we find that the primary forest on Figure \ref{Fig2}b representing the genealogical trees of the dual rank-dependent GW system $\hat{\boldsymbol U}$, is the dual forest from Figure \ref{Fig2}a flipped around the axis $t=0$ and shifted to the right by 1 (visually it is shifted just by $\sfrac{1}{2}$). Observe that the rank 1 particle in the dual reproduction flow is necessarily "eternal", giving birth at least to one offspring. Thus the black forest from (a) flips into the red forest in (b), then the black forest in (b) generates the red forest in (b), which in turn gives the black forest in (c). We see that the primary forest on Figure \ref{Fig2}c is a shifted copy of the primary forest from Figure \ref{Fig2}a, as predicted by Theorem \ref{proM}. \begin{figure*}[t] \centering \begin{subfigure}[t]{0.32\textwidth} \begin{tikzpicture}[scale=0.6] \draw[step=1cm,dotted, thin] (0,0) grid (7,7); \draw [line width=0.2mm,black] (0,0) -- (0,7); \draw [line width=0.2mm,black] (3,-0.03) -- (3,0.03); \draw [line width=0.2mm,black] (7,-0.03) -- (7,0.03); \draw [line width=0.2mm,black] (1,0) -- (1,1); \draw [line width=0.2mm,black] (2,0) -- (2,1) -- (1,2); \draw [line width=0.2mm,black] (2,0) -- (3,1); \draw [line width=0.2mm,black] (4,0) -- (4,1); \draw [line width=0.2mm,black] (4,0) -- (5,1) -- (3,2); \draw [line width=0.2mm,black] (5,0) -- (6,1); \draw [line width=0.2mm,black] (5,0) -- (7,1) -- (5,2); \draw [line width=0.2mm,black] (6,0) -- (7,0.5); \draw [line width=0.2mm,black] (2,1) -- (2,2) -- (3,3); \draw [line width=0.2mm,black] (5,1) -- (4,2) -- (4,3); \draw [line width=0.2mm,black] (7,1) -- (6,2) -- (6,3); \draw [line width=0.2mm,black] (7,1) -- (7,2) -- (7,3); \draw [line width=0.2mm,black] (1,2) -- (1,3); \draw [line width=0.2mm,black] (1,2) -- (2,3) -- (1,4); \draw [line width=0.2mm,black] (4,2) -- (5,3) -- (5,4); \draw [line width=0.2mm,black] (2,3) -- (2,4) -- (1,5); \draw [line width=0.2mm,black] (3,3) -- (3,4); \draw [line width=0.2mm,black] (3,3) -- (4,4) -- (2,5); \draw [line width=0.2mm,black] (5,3) -- (6,4); \draw [line width=0.2mm,black] (6,3) -- (7,4) -- (5,5); \draw [line width=0.2mm,black] (4,4) -- (3,5); \draw [line width=0.2mm,black] (5,4) -- (4,5) -- (4,6); \draw [line width=0.2mm,black] (7,4) -- (6,5); \draw [line width=0.2mm,black] (1,5) -- (1,6); \draw [line width=0.2mm,black] (1,5) -- (2,6) -- (1,7); \draw [line width=0.2mm,black] (1,5) -- (3,6); \draw [line width=0.2mm,black] (5,5) -- (5,6) -- (3,7); \draw [line width=0.2mm,black] (5,5) -- (6,6) -- (5,7); \draw [line width=0.2mm,black] (7,5) -- (7,6) -- (6,7); \draw [line width=0.2mm,black] (2,6) -- (2,7); \draw [line width=0.2mm,black] (5,6) -- (4,7); \draw [line width=0.2mm,black] (7,6) -- (7,7); \node[black] () at (-0.5,0){$-3$}; \node[black] () at (-0.5,1){$-2$}; \node[black] () at (-0.5,2){$-1$}; \node[black] () at (-0.3,3){$0$}; \node[black] () at (-0.3,4){$1$}; \node[black] () at (-0.3,5){$2$}; \node[black] () at (-0.3,6){$3$}; \node[black] () at (-0.3,7){$4$}; \node[black] () at (1,-0.4){$1$}; \node[black] () at (2,-0.4){$2$}; \node[black] () at (3,-0.4){$3$}; \node[black] () at (4,-0.4){$4$}; \node[black] () at (5,-0.4){$5$}; \node[black] () at (6,-0.4){$6$}; \node[black] () at (7,-0.4){$7$}; \draw [line width=0.2mm,red] (0.5,0) -- (0.5,7); \draw [line width=0.2mm,red] (1.5,7.03) -- (1.5,6.97); \draw [line width=0.2mm,red] (2.5,7.03) -- (2.5,6.97); \draw [line width=0.2mm,red] (3.5,7.03) -- (3.5,6.97); \draw [line width=0.2mm,red] (6.5,7.03) -- (6.5,6.97); \draw [line width=0.2mm,red] (5.5,7)-- (6.5,6)-- (6.5,5); \draw [line width=0.2mm,red] (4.5,7)-- (5.5,6); \draw [line width=0.2mm,red] (2.5,7) --(4.5,6)--(4.5,5); \draw [line width=0.2mm,red] (2.5,7)-- (3.5,6)-- (3.5,5); \draw [line width=0.2mm,red] (2.5,7)-- (2.5,6); \draw [line width=0.2mm,red] (0.5,7)-- (1.5,6); \draw [line width=0.2mm,red] (6.5,6)-- (5.5,5); \draw [line width=0.2mm,red] (3.5,6)-- (2.5,5); \draw [line width=0.2mm,red] (3.5,6)-- (1.5,5)-- (3.5,4); \draw [line width=0.2mm,red] (6.5,5)-- (7,4.5); \draw [line width=0.2mm,red] (4.5,5)-- (6.5,4)-- (5.5,3); \draw [line width=0.2mm,red] (4.5,5)-- (5.5,4); \draw [line width=0.2mm,red] (3.5,5)--(4.5,4)--(4.5,3); \draw [line width=0.2mm,red] (1.5,5)-- (2.5,4)-- (2.5,3); \draw [line width=0.2mm,red] (0.5,5)-- (1.5,4); \draw [line width=0.2mm,red] (7,3.5)-- (6.5,3)-- (6.5,2); \draw [line width=0.2mm,red] (4.5,4)-- (3.5,3)-- (3.5,2); \draw [line width=0.2mm,red] (0.5,4)--(1.5,3); \draw [line width=0.2mm,red] (5.5,3)-- (5.5,2); \draw [line width=0.2mm,red] (5.5,3)-- (4.5,2); \draw [line width=0.2mm,red] (3.5,3)-- (2.5,2)-- (3.5,1); \draw [line width=0.2mm,red] (2.5,3)-- (1.5,2); \draw [line width=0.2mm,red] (2.5,2)-- (4.5,1); \draw [line width=0.2mm,red] (4.5,2)-- (5.5,1)-- (4.5,0); \draw [line width=0.2mm,red] (4.5,2)-- (6.5,1); \draw [line width=0.2mm,red] (2.5,2)--(2.5,1); \draw [line width=0.2mm,red] (0.5,2)-- (1.5,1)--(1.5,0); \draw [line width=0.2mm,red] (7,0.25) -- (6.5,0); \draw [line width=0.2mm,red] (7,0.75)-- (5.5,0); \draw [line width=0.2mm,red] (3.5,1)-- (3.5,0); \draw [line width=0.2mm,red] (3.5,1)-- (2.5,0); \end{tikzpicture} \caption{} \end{subfigure}% ~ \begin{subfigure}[t]{0.32\textwidth} \begin{tikzpicture}[scale=0.6] \draw[step=1cm,dotted, thin] (0,0) grid (7,7); \draw [line width=0.2mm,black] (0,0) -- (0,7); \draw [line width=0.2mm,black] (2,-0.03) -- (2,0.03); \draw [line width=0.2mm,black] (7,-0.03) -- (7,0.03); \draw [line width=0.2mm,black] (1,0) -- (1,7); \draw [line width=0.2mm,black] (1,0) -- (2,1); \draw [line width=0.2mm,black] (3,0) -- (3,1); \draw [line width=0.2mm,black] (3,0) -- (4,1)--(2,2)--(3,3)--(3,4)--(2,5); \draw [line width=0.2mm,black] (3,0) -- (5,1) -- (5,2)-- (6,3); \draw [line width=0.2mm,black] (5,0) -- (6,1); \draw [line width=0.2mm,black] (6,0) -- (7,1)--(6,2); \draw [line width=0.2mm,black] (4,1) -- (3,2); \draw [line width=0.2mm,black] (4,1) -- (4,2) -- (5,3)--(4,4)--(3,5)--(3,6); \draw [line width=0.2mm,black] (7,1) -- (7,2); \draw [line width=0.2mm,black] (1,2) -- (2,3); \draw [line width=0.2mm,black] (2,2) -- (4,3); \draw [line width=0.2mm,black] (5,2) -- (7,3) -- (6,4) -- (5,5) -- (6,6) -- (5,7); \draw [line width=0.2mm,black] (1,3) -- (2,4); \draw [line width=0.2mm,black] (5,3) -- (5,4); \draw [line width=0.2mm,black] (4,4) -- (4,5); \draw [line width=0.2mm,black] (6,4) -- (6,5); \draw [line width=0.2mm,black] (7,4) -- (7,5); \draw [line width=0.2mm,black] (1,5) -- (2,6)-- (2,7); \draw [line width=0.2mm,black] (3,5) -- (4,6) -- (3,7); \draw [line width=0.2mm,black] (3,5) -- (5,6); \draw [line width=0.2mm,black] (5,5) -- (7,6); \draw [line width=0.2mm,black] (4,6) -- (4,7); \draw [line width=0.2mm,black] (7,6.5) -- (6,7); \node[black] () at (-0.5,0){$-4$}; \node[black] () at (-0.5,1){$-3$}; \node[black] () at (-0.5,2){$-2$}; \node[black] () at (-0.5,3){$-1$}; \node[black] () at (-0.3,4){$0$}; \node[black] () at (-0.3,5){$1$}; \node[black] () at (-0.3,6){$2$}; \node[black] () at (-0.3,7){$3$}; \node[black] () at (1,-0.4){$1$}; \node[black] () at (2,-0.4){$2$}; \node[black] () at (3,-0.4){$3$}; \node[black] () at (4,-0.4){$4$}; \node[black] () at (5,-0.4){$5$}; \node[black] () at (6,-0.4){$6$}; \node[black] () at (7,-0.4){$7$}; \draw [line width=0.2mm,red] (0.5,0) -- (0.5,7); \draw [line width=0.2mm,red] (3.5,7.03) -- (3.5,6.97); \draw [line width=0.2mm,red] (4.5,7)-- (5.5,6)-- (3.5,5); \draw [line width=0.2mm,red] (4.5,7)-- (4.5,6); \draw [line width=0.2mm,red] (5.5,7) --(6.5,6); \draw [line width=0.2mm,red] (5.5,7) --(7,6.25); \draw [line width=0.2mm,red] (2.5,7)-- (2.5,6)-- (1.5,5)-- (1.5,4); \draw [line width=0.2mm,red] (2.5,7)-- (3.5,6); \draw [line width=0.2mm,red] (1.5,7) -- (1.5,6); \draw [line width=0.2mm,red] (6.5,7)-- (7,6.75); \draw [line width=0.2mm,red] (2.5,6)-- (2.5,5)-- (3.5,4)-- (3.5,3); \draw [line width=0.2mm,red] (7,5.5)-- (6.5,5); \draw [line width=0.2mm,red] (7,5.75)-- (5.5,5); \draw [line width=0.2mm,red] (5.5,6)-- (4.5,5)-- (4.5,4); \draw [line width=0.2mm,red] (1.5,5)--(2.5,4)--(1.5,3); \draw [line width=0.2mm,red] (4.5,5)-- (5.5,4)-- (5.5,3)-- (4.5,2)-- (4.5,1); \draw [line width=0.2mm,red] (6.5,5)--(6.5,4)--(7,3.5); \draw [line width=0.2mm,red] (4.5,3)-- (3.5,2); \draw [line width=0.2mm,red] (7,2.75)-- (5.5,2); \draw [line width=0.2mm,red] (7,2.5)-- (6.5,2); \draw [line width=0.2mm,red] (2.5,4)-- (2.5,3)-- (1.5,2)-- (1.5,1); \draw [line width=0.2mm,red] (3.5,4)-- (4.5,3)-- (2.5,2); \draw [line width=0.2mm,red] (5.5,4)-- (6.5,3); \draw [line width=0.2mm,red] (1.5,2)-- (3.5,1); \draw [line width=0.2mm,red] (1.5,2)-- (2.5,1)-- (1.5,0); \draw [line width=0.2mm,red] (5.5,2)-- (5.5,1)-- (3.5,0); \draw [line width=0.2mm,red] (5.5,2)-- (6.5,1)-- (5.5,0); \draw [line width=0.2mm,red] (2.5,1)--(2.5,0); \draw [line width=0.2mm,red] (5.5,1)--(4.5,0); \draw [line width=0.2mm,red] (7,0.5) -- (6.5,0); \end{tikzpicture} \caption{} \end{subfigure} \begin{subfigure}[t]{0.32\textwidth} \begin{tikzpicture}[scale=0.6] \draw[step=1cm,dotted, thin] (0,0) grid (7,7); \draw [line width=0.2mm,black] (1,0) -- (1,7); \draw [line width=0.2mm,black] (0,0) -- (0,7); \draw [line width=0.2mm,black] (3,-0.03) -- (3,0.03); \draw [line width=0.2mm,black] (2,0) -- (2,1); \draw [line width=0.2mm,black] (3,0) -- (3,1) -- (2,2); \draw [line width=0.2mm,black] (3,0) -- (4,1); \draw [line width=0.2mm,black] (5,0) -- (5,1); \draw [line width=0.2mm,black] (5,0) -- (6,1) -- (4,2); \draw [line width=0.2mm,black] (6,0) -- (7,1); \draw [line width=0.2mm,black] (6,0) -- (7,0.5); \draw [line width=0.2mm,black] (3,1) -- (3,2) -- (4,3); \draw [line width=0.2mm,black] (6,1) -- (5,2) -- (5,3); \draw [line width=0.2mm,black] (7,2) -- (7,3); \draw [line width=0.2mm,black] ((7,1.5)--(6,2); \draw [line width=0.2mm,black] (2,2) -- (2,3); \draw [line width=0.2mm,black] (2,2) -- (3,3) -- (2,4); \draw [line width=0.2mm,black] (5,2) -- (6,3) -- (6,4); \draw [line width=0.2mm,black] (3,3) -- (3,4) -- (2,5); \draw [line width=0.2mm,black] (4,3) -- (4,4); \draw [line width=0.2mm,black] (4,3) -- (5,4) -- (3,5); \draw [line width=0.2mm,black] (6,3) -- (7,4); \draw [line width=0.2mm,black] (7,4.5) -- (6,5) -- (6,6); \draw [line width=0.2mm,black] (5,4) -- (4,5); \draw [line width=0.2mm,black] (6,4) -- (5,5)--(5,6); \draw [line width=0.2mm,black] (2,5) -- (2,6); \draw [line width=0.2mm,black] (2,5) -- (3,6) -- (2,7); \draw [line width=0.2mm,black] (2,5) -- (4,6); \draw [line width=0.2mm,black] (6,5) -- (6,6) -- (4,7); \draw [line width=0.2mm,black] (6,5) -- (7,6) -- (6,7); \draw [line width=0.2mm,black] (3,6) -- (3,7); \draw [line width=0.2mm,black] (6,6) -- (5,7); \node[black] () at (-0.5,0){$-3$}; \node[black] () at (-0.5,1){$-2$}; \node[black] () at (-0.5,2){$-1$}; \node[black] () at (-0.3,3){$0$}; \node[black] () at (-0.3,4){$1$}; \node[black] () at (-0.3,5){$2$}; \node[black] () at (-0.3,6){$3$}; \node[black] () at (-0.3,7){$4$}; \node[black] () at (1,-0.4){$1$}; \node[black] () at (2,-0.4){$2$}; \node[black] () at (3,-0.4){$3$}; \node[black] () at (4,-0.4){$4$}; \node[black] () at (5,-0.4){$5$}; \node[black] () at (6,-0.4){$6$}; \node[black] () at (7,-0.4){$7$}; \draw [line width=0.2mm,red] (0.5,0) -- (0.5,7); \draw [line width=0.2mm,red] (1.5,0) -- (1.5,7); \draw [line width=0.2mm,red] (2.5,7.03) -- (2.5,6.97); \draw [line width=0.2mm,red] (3.5,7.03) -- (3.5,6.97); \draw [line width=0.2mm,red] (4.5,7.03) -- (4.5,6.97); \draw [line width=0.2mm,red] (6.5,7)-- (7,6.5); \draw [line width=0.2mm,red] (5.5,7)-- (6.5,6); \draw [line width=0.2mm,red] (3.5,7) --(5.5,6)--(5.5,5); \draw [line width=0.2mm,red] (3.5,7)-- (4.5,6)-- (4.5,5); \draw [line width=0.2mm,red] (3.5,7)-- (3.5,6); \draw [line width=0.2mm,red] (1.5,7)-- (2.5,6); \draw [line width=0.2mm,red] (7,5.5)-- (6.5,5); \draw [line width=0.2mm,red] (4.5,6)-- (3.5,5); \draw [line width=0.2mm,red] (4.5,6)-- (2.5,5)-- (4.5,4); \draw [line width=0.2mm,red] (5.5,5)-- (7,4.25); \draw [line width=0.2mm,red] (7,3.5)-- (6.5,3); \draw [line width=0.2mm,red] (5.5,5)-- (6.5,4); \draw [line width=0.2mm,red] (4.5,5)--(5.5,4)--(5.5,3); \draw [line width=0.2mm,red] (2.5,5)-- (3.5,4)-- (3.5,3); \draw [line width=0.2mm,red] (1.5,5)-- (2.5,4); \draw [line width=0.2mm,red] (5.5,4)-- (4.5,3)-- (4.5,2); \draw [line width=0.2mm,red] (1.5,4)--(2.5,3); \draw [line width=0.2mm,red] (6.5,3)-- (6.5,2); \draw [line width=0.2mm,red] (6.5,3)-- (5.5,2); \draw [line width=0.2mm,red] (4.5,3)-- (3.5,2)-- (4.5,1); \draw [line width=0.2mm,red] (3.5,3)-- (2.5,2); \draw [line width=0.2mm,red] (3.5,2)-- (5.5,1); \draw [line width=0.2mm,red] (5.5,2)-- (6.5,1)-- (5.5,0); \draw [line width=0.2mm,red] (5.5,2)-- (7,1.25); \draw [line width=0.2mm,red] (3.5,2)--(3.5,1); \draw [line width=0.2mm,red] (1.5,2)-- (2.5,1)--(2.5,0); \draw [line width=0.2mm,red] (7,0.25)-- (6.5,0); \draw [line width=0.2mm,red] (4.5,1)-- (4.5,0); \draw [line width=0.2mm,red] (4.5,1)-- (3.5,0); \end{tikzpicture} \caption{} \end{subfigure} \caption{Graphical illustration of Theorem \ref{proM}. The twofold dual on panel (c) is a shifted copy of the primary forest given in (a).} \label{Fig2}\end{figure*} The next result describes the case of a GW reproduction mapping $U$, that is when the corresponding offspring numbers $\{u(x),x\in\mathbb N\}$ are independent and identically distributed. \begin{proposition}\label{JJ} Referring to Definition \ref{deg} consider a reproduction mapping $U$ together with its dual mapping $V$. The mapping $U$ generates a GW reproduction law $$\rP(u(x)=k)=P_{k},\quad k\ge0,\quad x\ge1,$$ if and only if the dual offspring numbers have a representation \begin{align} (v(1),v(2),\ldots)&=(\xi_{1}+1,\underbrace{0,\ldots,0}_{\eta_1},\xi_{2}+1,\underbrace{0,\ldots,0}_{\eta_2},\ldots), \label{hata} \end{align} where $\xi_1,\eta_1,\xi_2,\eta_2,\ldots$ are mutually independent $\mathbb N_0$-valued random variables with the marginal distributions \begin{align*} \rP( \xi_i=k)=P_{0}^k(1-P_{0}),\quad \rP(\eta_i=k)={P_{k+1}\over 1-P_{0}},\quad k\ge0,\quad i\ge1. \end{align*} In this case, the marginal dual reproduction law has a linear-fractional distribution \begin{align} \rE s^{v(x)}&=1-\hat q(x)+\hat q(x){P_{0}s\over 1-(1-P_{0})s}, \label{dlf} \end{align} where \begin{align} \hat q(1)=1,\quad \hat q(x)=(1-P_{0})^{-1}\sum _{k=1}^{x-1}P_{k}\hat q(x-k),\quad x\ge2.\label{p0x} \end{align} \end{proposition} A natural question arising in connection to Proposition \ref{JJ} is whether it is possible that the primary and its dual reproduction mappings are both GW? The answer is no, since the dual law always assigns at least one offspring to the particle of rank 1. The closest the GW dual one can get is a {\it GW reproduction with an eternal particle}, which by definition is a rank-dependent GW reproduction mapping $V$ whose offspring numbers are such that $v(1)\ge1$, and $v(2),v(3),\ldots$ have a common distribution. The following result significantly extends Proposition 3.6 in \cite{KRS}. \begin{theorem} \label{th} Consider a GW reproduction mapping $U$. Its pathwise dual $V$ is a GW reproduction with an eternal particle if and only if \begin{align} \rE s^{u(x)}&=1-q+q{ps\over 1-(1-p)s},\quad p,q\in(0,1].\label{lft}\end{align} In this case, \begin{align} \rE s^{v(1)}&={qs\over 1-(1-q)s}, \qquad \rE s^{v(x)}=1-p+p{qs\over 1-(1-q)s},\quad x\ge 2. \label{lfg} \end{align} \end{theorem} \section{Examples }\label{BD} \noindent {\bf Pure death rank-dependent GW.} A distinct forest structure appears in the case when the offspring numbers take values 0 or 1 $$f_{t,x}(s)=p_{t,x}+(1-p_{t,x})s,\quad x\in\mathbb N,\quad t\in\mathbb Z.$$ Each dual lineage followed upwards, eventually vanishes without branching. Given $p_{t,x}\equiv p_x$, the dual reproduction is not rank-dependent GW because of the dependence in the joint distribution \[\rP(v(1)=k_1,\ldots,v(m)=k_m)=p_1\cdots p_{k_1-1}(1-p_{k_1})\prod_{l=1}^{m-1}p_{k_1+\ldots+k_l+1}\cdots p_{k_1+\ldots+k_{l+1}-1}(1-p_{k_1+\ldots+k_{l+1}}).\] \noindent {\bf Birth-death GW reproduction.} Consider a GW reproduction law $\rP(u=k)=p_k$ assuming $p_0+p_1+p_2=1$. If $p_2=0$, then the dual reproduction is GW with a shifted geometric distribution \[\rP(v(x)=k)=p_0^{k-1}(1-p_0),\quad k\ge1.\] If $p_1=0$, then the dual reproduction is rank-dependent GW described by the example given in the Introduction, with $p=p_0$. On the other hand, if $p_0=0$, then the dual reproduction law is not rank-dependent GW because of the following dependence: \begin{align*} & \rP(v(1)=1)=1,\quad \rP(v(2)=0,v(3)=1)=p_2,\\ & \rP(v(2)=1, v(3)=0)=p_1p_2,\quad \rP(v(1)=0,v(3)=1)=p_1^2. \end{align*} \noindent {\bf Bounded GW processes.} Consider a specific example of the rank-dependent GW process given by \[f_{t,x}(s)= \left\{ \begin{array}{rl} f(s), & x\in[1,B_t], \\ 1, & x>B_t, \end{array} \right. \quad s\in[0,1], \quad t\ge0. \] What we get is a version of a truncated GW process with a stationary reproduction $f$, where the number of particles, allowed to reproduce at time $t$, is bounded by $B_t$. An interesting result for such processes dealing with the supercritical case was obtained in \cite{Z}. \vspace{0.4cm} \noindent {\bf GW processes with immigration.} Consider a rank-dependent GW process with $f_{t,1}(s)= sg_t(s)$ and $f_{t,x}\equiv f_t$, $x\ge2$. This is a GW process with an eternal particle in a varying environment. Removing the eternal particle of rank 1 and keeping its offspring as immigrants, we arrive at a GW with immigration. The GW process with immigration are well-studied in the case of a stationary reproduction $f_{t}=f$ and varying immigration $\{g_t\}_{t\ge0}$, see \cite{Ra}. The case of varying $\{g_t,f_t\}_{t\ge0}$, has got less attention in the literature, see however \cite{MO}. \vspace{0.4cm}\noindent {\bf GW processes with emigration.} Consider a time-homogeneous GW process with an eternal particle, such that \eqref{lft} holds for $x\ge2$, and $u(1)\ge1$ has an arbitrary distribution. Its dual Markov chain can be interpreted in terms of a GW process with emigration (catastrophes, disasters), with a random number $\eta_t\stackrel{d}{=}u(1)-1$ of particles being removed from generation $t$. If the current size $Y_t$ does not exceed $\eta_t$, the population dies out. One of the first papers addressing this model was \cite{V}, where the critical case was studied under the assumption $\eta_t\equiv1$. It was shown in \cite{Gr} that if the GW reproduction is supercritical and the numbers of emigrants $\{\eta_t\}_{t\ge0}$ are independent copies of $\eta$, then the the GW process with emigration goes extinct with probability 1 iff $\rE \log (\eta+1)=\infty $. On the other hand, a well-known result by \cite{AH} says that a subcritical GW process with immigration has a stationary distribution iff $\rE \log (\eta+1)<\infty $ for the number of immigrants $u(1)-1\stackrel{d}{=}\eta$. \vspace{0.4cm}\noindent {\bf Rank-dependent GW process with a carrying capacity.} Consider the time-homogeneous case, $f_{t,x}=f_x$, when the reproduction law is variable along the spatial position. Our setting is suitable for modeling population size dependent reproduction in a way which is different from that of \cite{K,KS}. Let $m_x=f'_x(1)$ be the mean offspring number for the particle of rank $x$. Suppose $m_1>1$ and $m_x$ monotonely decreases with $x$ so that for some $K\in\mathbb N$, \[m_1+\ldots+m_x\ge x,\quad x\le K,\quad m_1+\ldots+m_x<x,\quad x> K.\] Such a $K$ can be viewed as the carrying capacity of a population of individuals which produce less than 1 child per individual when the size of the population exceeds $K$. \vspace{0.4cm}\noindent {\bf Embeddable rank-dependent GW-processes.} Embeddability into continuous time Markov branching processes is not fully resolved issue for basic GW processes \cite[Ch III.12]{AN}. Several explicit examples of embeddable GW processes can be found in \cite{SL}. One known class of embeddable GW processes in varying environments is the case of linear-fractional reproduction addressed in Theorem \ref{th}. Consider a continuous time linear birth-death process $\{Z(t), t\ge t_0\}$ with the variable birth and death rates $\{\lambda(t),\mu(t)\}_{t\in\mathbb R}$ per individual. It is well-known that such a process has linear-fractional distributions. By \cite{Ke}, $$\rE(s^{Z(t)}|Z(t_0 )=1)=1-q(t_0,t)+q(t_0,t){p(t_0,t)s\over 1-(1-p(t_0,t))s},$$ where \begin{align*} q(t_0,t)&=\Big(1+\int_{t_0 }^te^{\rho(t_0,u)}\mu(u)du\Big)^{-1},\quad p(t_0,t)=e^{\rho(t_0,t)}q(t_0,t),\quad \rho(t_0,t)=\int_{t_0 }^t(\mu(u)-\lambda(u))du. \end{align*} A linear-fractional GW process with varying parameters $(q_t,p_t)$ in the expression \eqref{lft} given by \begin{align*} q_{t}&=\Big(1+\int_{t-1}^te^{\rho_u}\mu(u)du\Big)^{-1},\quad p_t=e^{\rho_t}q_t,\quad \rho_u:=\rho(t-1,u)=\int_{t-1}^u(\mu(v)-\lambda(v))dv. \end{align*} can be embedded in a birth-death process. Figure \ref{Fig3} illustrates the graphical representation for such an embedding. See also a recent result \cite{FL} presenting a different approach towards dual random forests in a continuous time setting. \begin{figure*}[t!] \centering \begin{subfigure}[t]{0.32\textwidth} \begin{tikzpicture}[scale=0.7] \draw[step=1cm,dotted, thin] (0,0) grid (6,6); \draw [line width=0.2mm,black] (1,0) -- (1,6); \draw [line width=0.2mm,black] (2,0) -- (2,6); \draw [line width=0.2mm,black] (3,0) -- (3,6); \draw [line width=0.2mm,black] (4,0) -- (4,6); \draw [line width=0.2mm,black] (5,0) -- (5,6); \draw [line width=0.2mm,black] (6,0) -- (6,6); \draw [->] (2,0.7) -- (3,0.7);\draw [->] (3,0.7) -- (4,0.7);\draw [->] (4,0.7) -- (5,0.7);\draw [->] (5,0.7) -- (6,0.7); \draw [<-] (3,0.3) -- (4,0.3);\draw [<-] (4,0.3) -- (5,0.3);\draw [<-] (5,0.3) -- (6,0.3); \draw [->] (2,1.2) -- (3,1.2);\draw [->] (3,1.2) -- (4,1.2);\draw [->] (4,1.2) -- (5,1.2);\draw [->] (5,1.2) -- (6,1.2); \draw [<-] (1,1.5) -- (2,1.5);\draw [<-] (2,1.5) -- (3,1.5);\draw [<-] (3,1.5) -- (4,1.5);\draw [<-] (4,1.5) -- (5,1.5);\draw [<-] (5,1.5) -- (6,1.5); \draw [<-] (4,2.9) -- (5,2.9);\draw [<-] (5,2.9) -- (6,2.9); \draw [->] (1,3.1) -- (2,3.1);\draw [->] (2,3.1) -- (3,3.1);\draw [->] (3,3.1) -- (4,3.1);\draw [->] (4,3.1) -- (5,3.1);\draw [->] (5,3.1) -- (6,3.1); \draw [->] (1,3.6) -- (2,3.6);\draw [->] (2,3.6) -- (3,3.6);\draw [->] (3,3.6) -- (4,3.6);\draw [->] (4,3.6) -- (5,3.6);\draw [->] (5,3.6) -- (6,3.6); \draw [<-] (3,4.5) -- (4,4.5);\draw [<-] (4,4.5) -- (5,4.5);\draw [<-] (5,4.5) -- (6,4.5); \draw [<-] (2,4.1) -- (3,4.1);\draw [<-] (3,4.1) -- (4,4.1);\draw [<-] (4,4.1) -- (5,4.1);\draw [<-] (5,4.1) -- (6,4.1); \draw [->] (4,4.8) -- (5,4.8);\draw [->] (5,4.8)-- (6,4.8); \draw [<-] (1,5.9) -- (2,5.9);\draw [<-] (2,5.9) -- (3,5.9);\draw [<-] (3,5.9) -- (4,5.9);\draw [<-] (4,5.9) -- (5,5.9);\draw [<-] (5,5.9) -- (6,5.9); \draw [<-] (1,5.3) -- (2,5.3);\draw [<-] (2,5.3) -- (3,5.3);\draw [<-] (3,5.3) -- (4,5.3);\draw [<-] (4,5.3) -- (5,5.3);\draw [<-] (5,5.3) -- (6,5.3); \draw [->] (2,5.5) -- (3,5.5);\draw [->] (3,5.5) -- (4,5.5);\draw [->] (4,5.5) -- (5,5.5);\draw [->] (5,5.5)-- (6,5.5); \node[black] () at (-0.5,0){$-3$}; \node[black] () at (-0.5,1){$-2$}; \node[black] () at (-0.5,2){$-1$}; \node[black] () at (-0.3,3){$0$}; \node[black] () at (-0.3,4){$1$}; \node[black] () at (-0.3,5){$2$}; \node[black] () at (-0.3,6){$3$}; \node[black] () at (1,-0.4){$1$}; \node[black] () at (2,-0.4){$2$}; \node[black] () at (3,-0.4){$3$}; \node[black] () at (4,-0.4){$4$}; \node[black] () at (5,-0.4){$5$}; \node[black] () at (6,-0.4){$6$}; \end{tikzpicture} \caption{} \end{subfigure} ~ \begin{subfigure}[t]{0.32\textwidth} \begin{tikzpicture}[scale=0.7] \draw[step=1cm,dotted, thin] (0,0) grid (6,6); \node[black] () at (-0.5,0){$-3$}; \node[black] () at (-0.5,1){$-2$}; \node[black] () at (-0.5,2){$-1$}; \node[black] () at (-0.3,3){$0$}; \node[black] () at (-0.3,4){$1$}; \node[black] () at (-0.3,5){$2$}; \node[black] () at (-0.3,6){$3$}; \node[black] () at (0,-0.4){$0$}; \node[black] () at (1,-0.4){$1$}; \node[black] () at (2,-0.4){$2$}; \node[black] () at (3,-0.4){$3$}; \node[black] () at (4,-0.4){$4$}; \node[black] () at (5,-0.4){$5$}; \node[black] () at (6,-0.4){$6$}; \draw [line width=0.2mm,red] (0,0) -- (0,6); \draw [line width=0.2mm,red] (1,0) -- (1,6); \draw [line width=0.2mm,red] (2,0) -- (2,6); \draw [line width=0.2mm,red] (3,0) -- (3,6); \draw [line width=0.2mm,red] (4,0) -- (4,6); \draw [line width=0.2mm,red] (5,0) -- (5,6); \draw [line width=0.2mm,red] (6,0) -- (6,6); \draw [<-,red] (2,0.7) -- (3,0.7);\draw [<-,red] (3,0.7) -- (4,0.7);\draw [<-,red] (4,0.7) -- (5,0.7);\draw [<-,red] (5,0.7) -- (6,0.7); \draw [->,red] (2,0.3) -- (3,0.3); \draw [->,red] (3,0.3) -- (4,0.3);\draw [->,red] (4,0.3) -- (5,0.3);\draw [->,red] (5,0.3) -- (6,0.3); \draw [<-,red] (2,1.2) -- (3,1.2);\draw [<-,red] (3,1.2) -- (4,1.2);\draw [<-,red] (4,1.2) -- (5,1.2);\draw [<-,red] (5,1.2) -- (6,1.2); \draw [->,red] (0,1.5) -- (1,1.5);\draw [->,red] (1,1.5) -- (2,1.5);\draw [->,red] (2,1.5) -- (3,1.5);\draw [->,red] (3,1.5) -- (4,1.5);\draw [->,red] (4,1.5) -- (5,1.5);\draw [->,red] (5,1.5) -- (6,1.5); \draw [->,red] (3,2.9) -- (4,2.9);\draw [->,red] (4,2.9) -- (5,2.9);\draw [->,red] (5,2.9) -- (6,2.9); \draw [<-,red] (1,3.1) -- (2,3.1);\draw [<-,red] (2,3.1) -- (3,3.1);\draw [<-,red] (3,3.1) -- (4,3.1);\draw [<-,red] (4,3.1) -- (5,3.1);\draw [<-,red] (5,3.1) -- (6,3.1); \draw [<-,red] (1,3.6) -- (2,3.6);\draw [<-,red] (2,3.6) -- (3,3.6);\draw [<-,red] (3,3.6) -- (4,3.6);\draw [<-,red] (4,3.6) -- (5,3.6);\draw [<-,red] (5,3.6) -- (6,3.6); \draw [->,red] (2,4.5) -- (3,4.5);\draw [->,red] (3,4.5) -- (4,4.5);\draw [->,red] (4,4.5) -- (5,4.5);\draw [->,red] (5,4.5) -- (6,4.5); \draw [->,red] (1,4.1) -- (2,4.1);\draw [->,red] (2,4.1) -- (3,4.1);\draw [->,red] (3,4.1) -- (4,4.1);\draw [->,red] (4,4.1) -- (5,4.1);\draw [->,red] (5,4.1) -- (6,4.1); \draw [<-,red] (4,4.8) -- (5,4.8);\draw [<-,red] (5,4.8)-- (6,4.8); \draw [->,red] (0,5.9) -- (1,5.9);\draw [->,red] (1,5.9) -- (2,5.9);\draw [->,red] (2,5.9) -- (3,5.9);\draw [->,red] (3,5.9) -- (4,5.9);\draw [->,red] (4,5.9) -- (5,5.9);\draw [->,red] (5,5.9) -- (6,5.9); \draw [->,red] (0,5.3) -- (1,5.3);\draw [->,red] (1,5.3) -- (2,5.3);\draw [->,red] (2,5.3) -- (3,5.3);\draw [->,red] (3,5.3) -- (4,5.3);\draw [->,red] (4,5.3) -- (5,5.3);\draw [->,red] (5,5.3) -- (6,5.3); \draw [<-,red] (2,5.5) -- (3,5.5);\draw [<-,red] (3,5.5) -- (4,5.5);\draw [<-,red] (4,5.5) -- (5,5.5);\draw [<-,red] (5,5.5)-- (6,5.5); \end{tikzpicture} \caption{} \end{subfigure}% % \begin{subfigure}[t]{0.32\textwidth} \begin{tikzpicture}[scale=0.7] \draw[step=1cm,dotted, thin] (0,0) grid (6,6); \draw [line width=0.2mm,black] (3,-0.03) -- (3,0.03); \draw [line width=0.2mm,black] (1,0) -- (1,1); \draw [line width=0.2mm,black] (2,0) -- (2,1) -- (1,2) -- (1,3) -- (1,4) -- (1,5); \draw [line width=0.2mm,black] (1,3) -- (2,4); \draw [line width=0.2mm,black] (1,3) -- (3,4)-- (2,5); \draw [line width=0.2mm,black] (2,1) -- (2,2) -- (2,3)-- (4,4); \draw [line width=0.2mm,black] (2,0) -- (3,1) -- (3,2) -- (3,3) -- (5,4)-- (3,5)-- (1,6); \draw [line width=0.2mm,black] (3,5)-- (2,6); \draw [line width=0.2mm,black] (4,0) -- (4,1) -- (4,2); \draw [line width=0.2mm,black] (5,0) -- (5,1) -- (5,2) -- (4,3) -- (6,4)-- (4,5)-- (3,6); \draw [line width=0.2mm,black] (6,4)-- (5,5)-- (4,6); \draw [line width=0.2mm,black] (6,0) -- (6,1) -- (6,2) -- (5,3); \draw [line width=0.2mm,black] (6,5)-- (5,6); \node[black] () at (-0.5,0){$-3$}; \node[black] () at (-0.5,1){$-2$}; \node[black] () at (-0.5,2){$-1$}; \node[black] () at (-0.3,3){$0$}; \node[black] () at (-0.3,4){$1$}; \node[black] () at (-0.3,5){$2$}; \node[black] () at (-0.3,6){$3$}; \node[black] () at (1,-0.4){$1$}; \node[black] () at (2,-0.4){$2$}; \node[black] () at (3,-0.4){$3$}; \node[black] () at (4,-0.4){$4$}; \node[black] () at (5,-0.4){$5$}; \node[black] () at (6,-0.4){$6$}; \draw [line width=0.2mm,red] (1.5,6.03) -- (1.5,5.97); \draw [line width=0.2mm,red] (0.5,0)-- (0.5,6); \draw [line width=0.2mm,red] (1.5,0)-- (1.5,1)-- (0.5,2); \draw [line width=0.2mm,red] (1.5,2)-- (1.5,3)-- (3.5,4)-- (2.5,5)-- (0.5,6); \draw [line width=0.2mm,red] (2.5,4)-- (1.5,5)-- (0.5,6); \draw [line width=0.2mm,red] (1.5,4)-- (1.5,5); \draw [line width=0.2mm,red] (2.5,1)-- (2.5,3)-- (4.5,4); \draw [line width=0.2mm,red] (2.5,0)-- (3.5,1)-- (3.5,3); \draw [line width=0.2mm,red] (3.5,0)-- (3.5,2); \draw [line width=0.2mm,red] (4.5,4)-- (2.5,5); \draw [line width=0.2mm,red] (4.5,5)-- (3.5,6); \draw [line width=0.2mm,red] (4.5,0)-- (4.5,2)-- (3.5,3)-- (5.5,4)-- (3.5,5)-- (2.5,6); \draw [line width=0.2mm,red] (5.5,0)-- (5.5,2)-- (4.5,3)-- (6,3.75); \draw [line width=0.2mm,red] (6,2.5)-- (5.5,3)-- (6,3.5); \draw [line width=0.2mm,red] (6,4.5)-- (4.5,6); \draw [line width=0.2mm,red] (6,5.5)-- (5.5,6); \end{tikzpicture} \caption{} \end{subfigure} \caption{Graphical representation of the birth-death processes. Arrows to the left mean the death at the end position of the line of arrows. Arrows to the right mean splitting at the origin of the line of arrows.} \label{Fig3}\end{figure*} \vspace{0.4cm}\noindent {\bf Defective rank-dependent GW.} For any $V\in\Phi_0$, there is either finite or infinite limit $\bar V=\lim_{x\to\infty} V(x)$. We will call {\it defective} a random reproduction mapping $U\in\Phi_0$ such that its dual $V$ satisfies $\rP(\bar V<\infty)>0$. In the defective case, a particle is able to produce infinitely many offspring. GW processes with a defective reproduction law were studied in a recent paper \cite{SC}. Turning to the non-linear birth-death processes, see for example \cite{SS}, we observe that in general, the embedding, discussed in the previous example, does not yield a rank-dependent GW process, as the number of offspring may depend on each other. An interesting exception is the pure death processes producing embedded pure death rank-dependent GW processes mentioned in the first example of this section. Given the time-homogeneous death rate $\mu_x$ for an individual of rank $x\ge1$, such that \[\sum_{x=1}^\infty {1\over \mu_1+\ldots+\mu_x}<\infty,\] we get a pure death process coming down from infinity, see for example \cite{SF}. Observe that in this case, the dual Markov chain gives a defective reproduction model which is not a rank-dependent GW process. \section{Proofs}\label{Spro} Let reproduction mappings $U,V,\tilde U\in\Phi_0$, be connected as $V=U^-$, $\tilde U=V^-$. The corresponding offspring numbers are denoted $u(x), v(x)$ and $\tilde u(x)$. \begin{lemma}\label{L} If $(\xi_i,\eta_i)$ are defined by \begin{align} (u(1),u(2),\ldots)&=(\underbrace{0,\ldots,0}_{\xi_1},\eta_{1}+1,\underbrace{0,\ldots,0}_{\xi_2},\eta_{2}+1,\ldots),\quad \xi_i,\eta_i\ge0,\quad i\in\mathbb N, \label{hata1} \end{align} then we have \eqref{hata} and \begin{align} (\tilde u(1),\tilde u(2),\ldots)&=(1,\underbrace{0,\ldots,0}_{\xi_1},\eta_{1}+1,\underbrace{0,\ldots,0}_{\xi_2},\eta_{2}+1,\ldots) .\label{hatb} \end{align} \end{lemma} \begin{proof} From $V(x)=\min\{y:U(y)\ge x\}$, we get $V(0)=0$ and \begin{align} \{x: V(x)=y\}= \{x: U(y-1)<x\le U(y)\},\quad y\ge1,\label{vx} \end{align} which implies \eqref{hata}. In a similar way, relation \eqref{hatb} follows from \eqref{hata}. Observe also that \eqref{vx} entails \eqref{VxU}. \end{proof} \vspace{0.4cm}\noindent {\bf Proof of Theorem \ref{proM}.} Using \eqref{VxU}, we obtain consecutively \begin{align*} \{\hat U_{-b,-a}(x)\le y\}&=\{\hat U_{-a-1}\circ\cdots\circ \hat U_{-b}(x)\le y\}=\{V_{a}\circ\cdots\circ V_{b-1}(x)\le y\}\\ &=\{V_{a+1}\circ\cdots\circ V_{b-1}(x)\le U_a(y)\}=\ldots=\{x\le U_{b-1}\circ\cdots\circ U_a(y)\}=\{x\le U_{a,b}(y)\}. \end{align*} Observe that $\tilde U_t=V_t^-$, and by Lemma \ref{L}, we have $\tilde U_t(x)=U_t(x-1)+1$, which yields \begin{align*} \tilde U_{a,b}(x)&=\tilde U_{b-1}\circ\cdots\circ \tilde U_{a}(x)=\tilde U_{b-1}\circ\cdots\circ \tilde U_{a+1}(U_a(x-1)+1)\\ &= \tilde U_{b-1}\circ\cdots\circ \tilde U_{a+2}( U_{a+1}\circ U_a(x-1)+1)= U_{a,b}(x-1)+1. \end{align*} \vspace{0.4cm}\noindent {\bf Proof of Proposition \ref{JJ}.} The random variables $u(1),u(2),\ldots$ are independent with a common distribution $\{P_k\}_{k=0}^\infty$ if and only if relation \eqref{hata1} holds with mutually independent $\xi_1,\eta_1,\xi_2,\eta_2,\ldots$, such that \begin{align*} \rP( \xi_i=k)=P_{0}^k(1-P_{0}),\quad \rP(\eta_i=k)=\rP(u(1)=k+1|u(1)\ge1),\quad k\ge0,\quad i\ge1. \end{align*} By Lemma \ref{L}, this proves the first statement of the proposition. Turning to the second statement concerning the distribution of $v(x)$, denote $\hat q(x)=\rP(v(x)>0)$. The first statement implies that \eqref{dlf} holds with \begin{align*} \hat q(x)&= \rP\Big(\bigcup_{n=0}^\infty \{1+n+\eta_{1}+\ldots+\eta_{n}=x\}\Big)=\sum_{n=0}^{x-1} \rP(\eta_{1}+\ldots+\eta_{n}=x-n-1),\quad x\ge1. \end{align*} This entails that $\hat q(1)=1$, and recursion \eqref{p0x} for $x\ge2$, is obtained via conditioning on $\eta_1$: \begin{align*} \hat q(x)&=\sum_{n=1}^{x-1} \rP(\eta_{1}+\ldots+\eta_{n}=x-n-1)=\sum_{n=1}^{x-1}\sum_{k=1}^{x-n} \rP(\eta_{1}=k-1)\rP(\eta_{2}+\ldots+\eta_{n}=x-n-k)\\ &=(1-P_0)^{-1}\sum_{k=1}^{x-1} P_k\sum_{n=1}^{x-k}\rP(\eta_{2}+\ldots+\eta_{n}=x-k-n)=(1-P_0)^{-1}\sum_{k=1}^{x-1} P_k\hat q(x-k). \end{align*} \vspace{0.4cm}\noindent {\bf Proof of Theorem \ref{th}.} Suppose that the conditions of Proposition \ref{JJ} are valid. If, as stated by \eqref{lft}, $\hat q(x)=q$ for all $x\ge2$, then Proposition \ref{JJ} implies \eqref{lfg} it is easy to verify independence of $v(1),v(2),\ldots$. Thus we find that the dual reproduction is that of a GW process with an eternal particle. To prove the reverse statement assume that $v(1),v(2),\ldots$ are independent and $v(2),v(3),\ldots$ have a common distribution. Using \eqref{p0x}, we find that for some $q\in(0,1]$, \[ (1-P_{0})q=p(x-1)+q \sum _{k=1}^{x-2}P_{k},\quad x\ge2.\] Therefore, for all $n\ge1$, we obtain \[ p(n)=q \sum _{k=n}^\infty P_{k},\] which leads to \eqref{lfg}, which in turn yields \eqref{lft}. \vspace{0.4cm}\noindent{\bf Acknowledgements.} The authors thank Uwe R\"{o}sler for a discussion of an issue concerning duality. The research by Jonas Jagers was supported by the Royal Swedish Academy of Sciences through the Elis Sidenbladh foundation grant.
1,108,101,565,832
arxiv
\section{Introduction} The representation theory of the de Sitter group, and also all the questions concerning this group and the de Sitter spacetime, comes in the forefront due the recent discoveries in modern cosmology. One of the most important problem in this area is a construction of quantum field theory in the de Sitter spacetime (see, for example, \cite{All85,BM96,MRT05,Var05a}). As is known, in the standard quantum field theory in Minkowski spacetime solutions (wave functions) of relativistic wave equations are expressed via an expansion in relativistic spherical functions (matrix elements of the Lorentz group representations) \cite{AAV69,Var03,Var05,Var06}. The analogous problem in five dimensions (solutions of wave equations in de Sitter space) requires the most exact definition for the matrix elements and spherical functions of irreducible representations of the de Sitter group. In the present work spherical functions are studied on the various homogeneous spaces of the de Sitter group $\SO_0(1,4)$. A starting point of this research is an analogue between universal coverings of the Lorentz and de Sitter groups, which was first established by Takahashi \cite{Tak63} (see also the work of Str\"{o}m \cite{Str69}). Namely, the universal covering of $\SO_0(1,4)$ is $\spin_+(1,4)\simeq\Sp(1,1)$ and the spinor group $\spin_+(1,4)$ is described in terms of $2\times 2$ quaternionic matrices. On the other hand, the universal covering of the Lorentz group $\SO_0(1,3)$ is $\spin_+(1,3)\simeq\SL(2,\hbox{\bb C})$, where the spinor group $\spin_+(1,3)$ is described in terms of $2\times 2$ complex matrices. This analogue allows us to apply (with some restrictions) Gel'fand-Naimark representation theory of the Lorentz group \cite{GMS,Nai58} to $\SO_0(1,4)$. The section 2 contains a further development of the Takahashi-Str\"{o}m analogue (quaternionic description of $\SO_0(1,4)$). It is shown that for the group $\spin_+(1,4)\simeq\Sp(1,1)$ there are quaternion Euler angles which contain complex Euler angles of $\spin_+(1,3)\simeq\SL(2,\hbox{\bb C})$ as a particular case. Differential operators (Laplace-Beltrami and Casimir operators) are defined on $\Sp(1,1)$ in terms of the quaternion Euler angles. Spherical functions on the group $\SO_0(1,4)$ are understood as functions of representations of the class 1 realized on the homogeneous spaces of $\SO_0(1,4)$. A list of homogeneous spaces of $\SO_0(1,4)$, including symmetric Riemannian and non-Riemannian spaces, is given at the end of section 2. Spherical functions on the group $\SO(4)$ (maximal compact subgroup of $\SO_0(1,4)$) are studied in the section 3. It is shown that for a universal covering $\spin(4)\simeq\SU(2)\otimes\SU(2)$ of $\SO(4)$ there are double Euler angles. It should be noted that all the hypercomplex extensions (complex, double, quaternion) of usual Euler angles of the group $\SU(2)$ follow directly from the algebraic structure underlying the groups $\spin_+(p,q)$ and describing within the framework of Clifford algebras $C\kern -0.2em \ell_{p,q}$ \cite{Var04}. Matrix elements and spherical functions of $\SO(4)$ are expressed via the product of two hypergeometric functions. Further, spherical functions of finite-dimensional representations of $\SO_0(1,4)$ are studied in the section 4 on the various homogeneous spaces of $\SO_0(1,4)$. It is shown that matrix elements of $\SO_0(1,4)$ admit factorizations with respect to the matrix elements of subgroups $\SO(4)$ and $\SO_0(1,3)$, since double and complex angles are particular cases of the quaternion angles. In turn, matrix elements and spherical functions of $\SO_0(1,4)$ are expressed via multiple hypergeometric series (the product of three hypergeometric functions). At the end of the section 4 we consider applications of the spherical functions, defined on the four-dimensional hyperboloid, to hydrogen and antihydrogen atom problems. Spherical functions of the principal series representations of $\SO_0(1,4)$ are considered in the section 5 within the Dixmier-Str\"{o}m representation basis of the de Sitter group $\SO_0(1,4)$ \cite{Dix61,Str69}. \section{The de Sitter group $\SO_0(1,4)$} The homogeneous de Sitter group $\SO_0(1,4)$ consists of all real matrices of fifth order with the unit determinant which leave invariant the quadratic form \[ Q(x)=x^2_0-x^2_1-x^2_2-x^2_3-x^2_4. \] The Lie algebra $\mathfrak{so}(1,4)$ of $\SO_0(1,4)$ consists of all real matrices \begin{equation}\label{Sit1} \begin{bmatrix} 0 & a_{01} & a_{02} & a_{03} & a_{04}\\ a_{01} & 0 &-a_{12}&-a_{13} &-a_{14}\\ a_{02} & a_{12} & 0 & -a_{23} & -a_{24}\\ a_{03} & a_{13} & a_{23} & 0 & -a_{34}\\ a_{04} & a_{14} & a_{24} & a_{34} & 0 \end{bmatrix}. \end{equation} Thus, the algebra $\mathfrak{so}(1,4)$ has basis elements of the form \begin{equation}\label{Sit2} L_{rs}=-e_{rs}+e_{sr},\quad s,r=1,2,3,4,\;\; s<r, \end{equation} \begin{equation}\label{Sit3} L_{0r}=e_{0r}+e_{r0},\quad r=1,2,3,4, \end{equation} where $e_{rs}$ is a matrix with elements $(e_{rs})_{pq}=\delta_{rp}\delta_{sq}$. The basis elements (\ref{Sit2}) and (\ref{Sit3}) satisfy the following commutation relations: \begin{equation} \left[ L_{\mu\nu},L_{\rho\sigma}\right]=g_{\nu\rho}L_{\mu\sigma}+g_{\mu\sigma}L_{\nu\rho}- g_{\mu\rho}L_{\nu\sigma}-g_{\nu\sigma}L_{\mu\rho},\label{Sit4} \end{equation} \[ \rho,\,\mu,\,\nu,\,\sigma=0,1,2,3,4, \] where $g_{k0}=g_{0k}=\delta_{0k}$, $g_{ks}=-\delta_{ks}$; $k,s=1,2,3,4$. $\SO_0(1,4)$ is a 10-parametric group. The maximal compact subgroup $K$ of $\SO_0(1,4)$ is isomorphic to the group $\SO(4)$ and consists of the matrices \[ \begin{pmatrix} 1 & 0\\ 0 & \SO(4) \end{pmatrix}. \] Further, Cartan decomposition of the algebra $\mathfrak{so}(1,4)$ and Iwasawa decomposition of the group $\SO_0(1,4)$ have a great importance at the construction of representations of the de Sitter group $\SO_0(1,4)$. So, in the Cartan decomposition $\mathfrak{so}(1,4)=\mathfrak{so}(4)+\mathfrak{p}$ a subspace $\mathfrak{p}$ consists of the basis elements (\ref{Sit3}). The group $\SO_0(1,4)$ has a real rank 1. For that reason the commutative subalgebra $\mathfrak{a}$ of $\mathfrak{so}(1,4)$ is one dimensional. We can take the matrix $L_{04}$ as a basis element of $\mathfrak{a}$. Therefore, the commutative subgroup $A$ consists of the matrices \begin{equation}\label{Sit5} \begin{bmatrix} \cosh\alpha & 0 & 0 & 0 & \sinh\alpha\\ 0 & 1 & 0 & 0 & 0\\ 0 & 0 & 1 & 0 & 0\\ 0 & 0 & 0 & 1 & 0\\ \sinh\alpha & 0 & 0 & 0 & \cosh\alpha \end{bmatrix},\quad 0\leq\alpha\leq\infty. \end{equation} Using the relations (\ref{Sit4}), we verify that a nilpotent subalgebra $\mathfrak{n}$ of $\mathfrak{so}(1,4)$ is defined by the matrices $L_{02}+L_{24}$, $L_{03}+L_{34}$ and $L_{01}+L_{14}$. Making an exponential mapping of the subalgebra $\mathfrak{n}$ into the subgroup $N$, we find that the nilpotent subgroup $N$ consists of the matrices \begin{equation}\label{Sit6} \begin{bmatrix} 1+(r^2+s^2+t^2)/2 & t & r & s & -(r^2+s^2+t^2)\\ t & 1 & 0 & 0 & -t\\ r & 0 & 1 & 0 & -r\\ s & 0 & 0 & 1 & -s\\ (r^2+s^2+t^2)/2 & t & r & s & 1-(r^2+s^2+t^2) \end{bmatrix}. \end{equation} The subgroups $K$, $A$ and $N$ define the Iwasawa decomposition $\SO_0(1,4)=\SO(4)\cdot NA$. In accordance with the definition of the subgroup $M$ of $\SO_0(1,4)$ (see, for example, \cite{KK85}), the subgroup $M$ is isomorphic to $\SO(3)$. Thus, a minimal parabolic subgroup $P$ has a decomposition $P=\SO(3)\cdot NA$. Since the rank of $\SO_0(1,4)$ is equal to 1, then there exist no other parabolic subgroups containing $P$. In the group $\SO_0(1,4)$ there are two independent Casimir operators \begin{equation}\label{Sit7} F=L^2_{12}+L^2_{13}+L^2_{14}+L^2_{23}+L^2_{24}+L^2_{34}-L^2_{01}-L^2_{02}-L^2_{03} -L^2_{04}, \end{equation} \begin{multline} W=(L_{12}L_{24}-L_{13}L_{24}+L_{14}L_{23})^2-(L_{12}L_{34}-L_{03}L_{24}+L_{04}L_{23})^2-\\ -(L_{01}L_{34}-L_{03}L_{14}+L_{04}L_{13})^2-(L_{01}L_{24}-L_{02}L_{14}+L_{04}L_{12})^2 -(L_{01}L_{23}-L_{02}L_{13}+L_{03}L_{12})^2.\label{Sit8} \end{multline} It is known that Casimir operator $W$ is equal to zero on the representations $T^\sigma$ of the class 1 \cite{Boy71}. The Casimir operator $F$ takes the values $\sigma(\sigma+3)$ on the representations $T^\sigma$. With the aim to obtain selfconjugated operators we will consider generators $J_{\mu\nu}={\bf i} L_{\mu\nu}$ instead the elements $L_{\mu\nu}$ of the algebra $\mathfrak{so}(1,4)$. In unitary representations we have $J^\ast_{\mu\nu}=J_{\mu\nu}$. Let us introduce the following designations for the ten generators $J_{\mu\nu}$ of $\SO_0(1,4)$: \begin{eqnarray} \textbf{\emph{M}}&=&(M_1\equiv J_{23},\;M_2\equiv J_{31},\;M_3\equiv J_{12}),\nonumber\\ \textbf{\emph{P}}&=&(P_1\equiv J_{14},\;P_2\equiv J_{24},\;P_3\equiv J_{34}),\nonumber\\ \textbf{\emph{N}}&=&(N_1\equiv J_{01},\;N_2\equiv J_{02},\;N_3\equiv J_{03}),\label{Sit12}\\ P_0&=&J_{04}.\nonumber \end{eqnarray} Casimir operators of the group $\SO_0(1,4)$ in this designation have the form \begin{eqnarray} F&=&(P^2_0+\textbf{\emph{N}}^2)-(\textbf{\emph{P}}^2+\textbf{\emph{M}}^2),\nonumber\\ W&=&(\textbf{\emph{M}}\cdot\textbf{\emph{P}})^2-(P_0\textbf{\emph{M}}-\textbf{\emph{P}}\times\textbf{\emph{N}})^2-(\textbf{\emph{M}}\cdot\textbf{\emph{N}})^2.\nonumber \end{eqnarray} The generators (\ref{Sit12}) satisfy the following commutation relations: \begin{eqnarray} &&\left[ M_k,M_l\right]={\bf i}\varepsilon_{klm}M_m,\quad\left[ N_k,N_l\right]=-{\bf i}\varepsilon_{klm}M_m,\nonumber\\ &&\left[ P_k,P_l\right]={\bf i}\varepsilon_{klm}M_m,\nonumber\\ &&\left[ M_k,N_l\right]={\bf i}\varepsilon_{klm}N_m,\quad\left[ M_k,P_l\right]={\bf i}\varepsilon_{klm}P_m,\nonumber\\ &&\left[ M_k,N_k\right]=\left[ M_k,P_k\right]=\left[ M_k, P_0\right]=0,\label{Sit13}\\ &&\left[ P_0,N_k\right]={\bf i} P_k,\;\left[ P_0,P_k\right]={\bf i} N_k,\;\left[ P_k,N_l\right]={\bf i}\delta_{kl}P_0,\nonumber \end{eqnarray} where $\varepsilon_{klm}$ is an antisymmetric tensor of third rank, which takes the values 0 or $\pm 1$ ($k,l,m=1,2,3$). \subsection{Quaternionic description of $\SO_0(1,4)$} Universal covering of the de Sitter group $\SO_0(1,4)$ is a spinor group $\spin_+(1,4)\simeq\Sp(1,1)$ \cite{Hel78,Var04}. In its turn, $\spin_+(1,4)\inC\kern -0.2em \ell^+_{1,4}$, where $C\kern -0.2em \ell^+_{1,4}$ is an even subalgebra of the Clifford algebra $C\kern -0.2em \ell_{1,4}$ associated with the de Sitter space $\R^{1,4}$. Further, there is an isomorphism $C\kern -0.2em \ell^+_{1,4}\simeqC\kern -0.2em \ell_{1,3}$, where $C\kern -0.2em \ell_{1,3}$ is a space-time algebra associated with the Minkowski space $\R^{1,3}$. In virtue of the Karoubi theorem \cite{Kar78}, the space-time algebra $C\kern -0.2em \ell_{1,3}$ admits the following decomposition\footnote{This decomposition is a particular case of the most general formula $C\kern -0.2em \ell(V\oplus V^\prime,Q\oplus Q^\prime)\simeqC\kern -0.2em \ell(V,Q)\otimesC\kern -0.2em \ell(V^\prime,-Q^\prime)$, where $V$ and $V^\prime$ are vector spaces endowed with quadratic forms $Q$ and $Q^\prime$ over the field $\F$, $\dim V$ is even \cite[prop. 3.16]{Kar78}.}: \[ C\kern -0.2em \ell_{1,3}\simeqC\kern -0.2em \ell_{1,1}\otimesC\kern -0.2em \ell_{0,2}. \] The decomposition $C\kern -0.2em \ell_{1,3}\simeqC\kern -0.2em \ell_{1,1}\otimesC\kern -0.2em \ell_{0,2}$ means that for the algebra $C\kern -0.2em \ell_{1,3}$ there exists a transition from the real coordinates to quaternion coordinates of the form $a+b\zeta_1+c\zeta_2+d\zeta_1\zeta_2$, where $\zeta_1=\mbox{\bf e}_{123}$, $\zeta_2=\mbox{\bf e}_{124}$. At this point, $\zeta^2_1=\zeta^2_2=(\zeta_1\zeta_2)^2=-1$, $\mbox{\bf e}^2_1=1$, $\mbox{\bf e}^2_2=\mbox{\bf e}^2_3=\mbox{\bf e}^2_4=-1$. It is easy to see that the units $\zeta_1$ and $\zeta_2$ form a basis of the quaternion algebra, since $\zeta_1\sim{\bf i}$, $\zeta_2\sim{\bf j}$, $\zeta_1\zeta_2\sim{\bf k}$. Therefore, a general element \[ \mathcal{A}_{C\kern -0.2em \ell_{1,3}}=a^0\mbox{\bf e}_0+\sum^4_{i=1}a^i\mbox{\bf e}_i+\sum^4_{i=1}\sum^4_{j=1}a^{ij}\mbox{\bf e}_i\mbox{\bf e}_j +\sum^4_{i=1}\sum^4_{j=1}\sum^4_{k=1}a^{ijk}\mbox{\bf e}_i\mbox{\bf e}_j\mbox{\bf e}_k+ a^{1234}\mbox{\bf e}_1\mbox{\bf e}_2\mbox{\bf e}_3\mbox{\bf e}_4 \] of the space-time algebra $C\kern -0.2em \ell_{1,3}$ can be written in the form \[ \mathcal{A}_{C\kern -0.2em \ell_{1,3}}=C\kern -0.2em \ell^0_{1,1}+C\kern -0.2em \ell^1_{1,1}\zeta_1+C\kern -0.2em \ell^2_{1,1}\zeta_2+ C\kern -0.2em \ell^3_{1,1}\zeta_1\zeta_2, \] where the each coefficient $C\kern -0.2em \ell^i_{1,1}$ $(i=0,1,2,3)$ is isomorphic to the anti-quaternion algebra $C\kern -0.2em \ell_{1,1}$\footnote{$C\kern -0.2em \ell_{1,1}$ is a real Clifford algebra of the type $p-q\equiv 0\pmod{8}$ with a division ring $\K\simeq\R$. This algebra is called the anti-quaternion algebra by Rozenfel'd \cite{Roz55}.}: \begin{eqnarray} C\kern -0.2em \ell^0_{1,1}&=&a^0+a^1\mbox{\bf e}_1+a^2\mbox{\bf e}_2+a^{12}\mbox{\bf e}_{12},\nonumber\\ C\kern -0.2em \ell^1_{1,1}&=&a^{123}-a^{23}\mbox{\bf e}_1-a^{13}\mbox{\bf e}_2-a^3\mbox{\bf e}_{12},\nonumber\\ C\kern -0.2em \ell^2_{1,1}&=&a^{124}-a^{24}\mbox{\bf e}_1+a^{14}\mbox{\bf e}_2+a^4\mbox{\bf e}_{12},\nonumber\\ C\kern -0.2em \ell^3_{1,1}&=&-a^{34}-a^{134}\mbox{\bf e}_1-a^{234}\mbox{\bf e}_2+a^{1234}\mbox{\bf e}_{12}.\nonumber \end{eqnarray} It is easy to verify that the units $\zeta_1$ and $\zeta_2$ commute with all the basis elements of $C\kern -0.2em \ell_{1,1}$. Further, let us define matrix representations of the quaternion units $\zeta_1$ and $\zeta_2$ as follows: \[ \zeta_1\longmapsto\begin{pmatrix} 0 & -1\\ 1 & 0\end{pmatrix},\quad\zeta_2\longmapsto\begin{pmatrix} 0 & {\bf i}\\ {\bf i} & 0\end{pmatrix}. \] Thus, in virtue of the Karoubi theorem we have \[ C\kern -0.2em \ell_{1,3}\simeq\Mat_2(C\kern -0.2em \ell_{1,1})=\begin{bmatrix}C\kern -0.2em \ell^0_{1,1}-{\bf i}C\kern -0.2em \ell^3_{1,1} & -C\kern -0.2em \ell^1_{1,1}+{\bf i}C\kern -0.2em \ell^2_{1,1}\\ C\kern -0.2em \ell^1_{1,1}+{\bf i}C\kern -0.2em \ell^2_{1,1} & C\kern -0.2em \ell^0_{1,1}+{\bf i}C\kern -0.2em \ell^3_{1,1} \end{bmatrix}. \] Or, \begin{gather} C\kern -0.2em \ell_{1,3}\simeq\Mat_2(C\kern -0.2em \ell_{1,1})=\begin{bmatrix} a & b\\ c & d\end{bmatrix}=\nonumber\\ \begin{bmatrix}\scriptstyle a^0-a^{134}+(a^1+a^{34}){\bf i}-(a^{13}+a^4){\bf j}+(a^{14}-a^3){\bf k} &\scriptstyle a^{24}-a^{123}+(a^{23}+a^{124}){\bf i}+(a^{13}-a^4){\bf j}+(a^{14}+a^3){\bf k}\\ \scriptstyle a^{24}+a^{123}+(a^{124}-a^{23}){\bf i}-(a^{13}+a^4){\bf j}+(a^{14}-a^3){\bf k} & \scriptstyle a^0+a^{134}+(a^1-a^{34}){\bf i}+(a^2-a^{1234}){\bf j}+(a^{12}+a^{234}){\bf k} \end{bmatrix},\nonumber \end{gather} where ${\bf i}=\mbox{\bf e}_1$, ${\bf j}=\mbox{\bf e}_2$, ${\bf k}=\mbox{\bf e}_{12}$ are anti-quaternion units, which satisfy the relations \begin{gather} {\bf i}^2=-1,\quad{\bf j}^2=1,\quad{\bf k}^2=1,\nonumber\\ {\bf i}{\bf j}=-{\bf j}{\bf i}={\bf k},\quad{\bf k}{\bf i}=-{\bf i}{\bf k}={\bf j},\quad{\bf k}{\bf j}=-{\bf j}{\bf k}={\bf i}. \nonumber \end{gather} In such a way, the universal covering of the de Sitter group $\SO_0(1,4)$ is \[\ar \spin_+(1,4)\simeq\left\{\begin{bmatrix} a & b \\ c & c \end{bmatrix}\in\BH(2):\;\;\det\begin{bmatrix}a & b \\ c & d \end{bmatrix}=1\right\}=\Sp(1,1), \] where $\det\begin{bmatrix}a & b \\ c & d \end{bmatrix}=1$ means that \[ \overline{a}b=\overline{c}d,\quad |a|^2-|c|^2=1,\quad |d|^2-|b|^2=1, \] or, \[ a\overline{c}=b\overline{d},\quad |a|^2-|b|^2=1,\quad |d|^2-|c|^2=1, \] here $\overline{a}$ means a quaternion conjugation. The ten-parameter group $\spin_+(1,4)\simeq\Sp(1,1)$ has the following one-parameter subgroups: \[ m_{12}(\psi)=\begin{pmatrix} e^{{\bf i}\frac{\psi}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\psi}{2}}\end{pmatrix},\quad m_{13}(\varphi)=\begin{pmatrix} \cos\frac{\varphi}{2} & -\sin\frac{\varphi}{2}\\ \sin\frac{\varphi}{2} & \cos\frac{\varphi}{2}\end{pmatrix},\quad m_{23}(\theta)=\begin{pmatrix} \cos\frac{\theta}{2} & {\bf i}\sin\frac{\theta}{2}\\ {\bf i}\sin\frac{\theta}{2} & \cos\frac{\theta}{2}\end{pmatrix}, \] \[ p_{14}(\phi)=\begin{pmatrix}\cos\frac{\phi}{2} & {\bf i}\sin\frac{\phi}{2}\\ {\bf i}\sin\frac{\phi}{2} & \cos\frac{\phi}{2}\end{pmatrix},\quad p_{24}(\varsigma)=\begin{pmatrix}\cos\frac{\varsigma}{2} & -{\bf j}\sin\frac{\varsigma}{2}\\ {\bf j}\sin\frac{\varsigma}{2} & \cos\frac{\varsigma}{2}\end{pmatrix},\quad p_{34}(\chi)=\begin{pmatrix}e^{{\bf k}\frac{\chi}{2}} & 0\\ 0 & e^{-{\bf k}\frac{\chi}{2}}\end{pmatrix}, \] \[ n_{01}(\tau)=\begin{pmatrix}\cosh\frac{\tau}{2} & \sinh\frac{\tau}{2}\\ \sinh\frac{\tau}{2} & \cosh\frac{\tau}{2}\end{pmatrix},\quad n_{02}(\epsilon)=\begin{pmatrix}\cosh\frac{\epsilon}{2} & {\bf i}\sinh\frac{\epsilon}{2}\\ -{\bf i}\sinh\frac{\epsilon}{2} & \cosh\frac{\epsilon}{2}\end{pmatrix},\quad n_{03}(\varepsilon)=\begin{pmatrix}e^{\frac{\varepsilon}{2}} & 0\\ 0 & e^{-\frac{\varepsilon}{2}}\end{pmatrix}, \] \[ p_{04}(\omega)=\begin{pmatrix}e^{\frac{\omega}{2}} & 0\\ 0 & e^{-\frac{\omega}{2}}\end{pmatrix}, \] where the ranges of parameters (Euler angles) are \begin{equation}\label{QEA} {\renewcommand{\arraystretch}{1.05} \begin{array}{ccccc} 0 &\leq&\theta& \leq& \pi,\\ 0 &\leq&\varphi& <&2\pi,\\ -2\pi&\leq&\psi&<&2\pi, \end{array}\quad\quad \begin{array}{ccccc} 0 &\leq&\phi& \leq& \pi,\\ 0 &\leq&\varsigma& <&2\pi,\\ -2\pi&\leq&\chi&<&2\pi, \end{array}} \end{equation} \begin{equation}\label{QEA2} {\renewcommand{\arraystretch}{1.05} \begin{array}{ccccc} -\infty &<&\tau&<&+\infty,\\ -\infty&<&\epsilon&<&+\infty,\\ -\infty&<&\varepsilon&<&+\infty,\\ -\infty&<&\omega&<&+\infty. \end{array}} \end{equation} Let us find a general transformation $\mathfrak{q}$ of $\spin_+(1,4)$ in the space of representation with the smallest weight (a so-called fundamental representation). In general, this form of the element $g\in G$ is related closely with the Cartan decomposition $G=KAK$, where $G$ is a connected Lie group, $K$ is a maximal compact subgroup of $G$ and $A$ is a maximal commutative subgroup of $G$. For example, the 3-parameter group $\SU(2)$ (a universal covering of $\SO(3)$) has the following subgroups: \begin{equation}\label{Sub1} K=\left\{\begin{pmatrix}\cos\frac{\theta}{2} & {\bf i}\sin\frac{\theta}{2}\\ {\bf i}\sin\frac{\theta}{2} & \cos\frac{\theta}{2}\end{pmatrix}\right\},\quad A=\left\{\begin{pmatrix}e^{{\bf i}\frac{t}{2}} & 0\\ 0 & e^{-{\bf i}\frac{t}{2}}\end{pmatrix}\right\}, \end{equation} where $t=\{\varphi,\psi\}$. Therefore, the Cartan decomposition $\SU(2)=KAK$ of the element $u\in\SU(2)$ is (see, for example, \cite{VK90}) \begin{equation}\label{Elem1} g\equiv u(\varphi,\theta,\psi)=\begin{pmatrix} e^{{\bf i}\frac{\varphi}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\varphi}{2}}\end{pmatrix}\!\!\begin{pmatrix}\cos\frac{\theta}{2} & {\bf i}\sin\frac{\theta}{2}\\ {\bf i}\sin\frac{\theta}{2} & \cos\frac{\theta}{2}\end{pmatrix}\!\!\begin{pmatrix}e^{{\bf i}\frac{\psi}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\psi}{2}}\end{pmatrix}, \end{equation} where $\varphi$, $\theta$, $\psi$ are Euler angles. In its turn, the 6-parameter group $\spin_+(1,3)\simeq\SL(2,\hbox{\bb C})$ (a universal covering of the Lorentz group $\SO_0(1,3)$) is a complex extension of the group $\SU(2)$, that is, $\SL(2,\hbox{\bb C})=[\SU(2)]^c=K^cA^cK^c$, where $K^c$ and $A^c$ are complex extensions of the groups (\ref{Sub1}): \begin{eqnarray} K^c&=&\left\{\begin{pmatrix}\cos\frac{\theta^c}{2} & {\bf i}\sin\frac{\theta^c}{2}\\ {\bf i}\sin\frac{\theta^c}{2} & \cos\frac{\theta^c}{2}\end{pmatrix}=\begin{pmatrix}\cos\frac{\theta}{2} & {\bf i}\sin\frac{\theta}{2}\\ {\bf i}\sin\frac{\theta}{2} & \cos\frac{\theta}{2}\end{pmatrix} \begin{pmatrix} \cosh\frac{\tau}{2} & \sinh\frac{\tau}{2}\\ \sinh\frac{\tau}{2} & \cosh\frac{\tau}{2}\end{pmatrix}\right\},\nonumber\\ A^c&=&\left\{\begin{pmatrix}e^{{\bf i}\frac{t^c}{2}} & 0\\ 0 & e^{-{\bf i}\frac{t^c}{2}}\end{pmatrix}=\begin{pmatrix}e^{{\bf i}\frac{p}{2}} & 0\\ 0 & e^{-{\bf i}\frac{p}{2}}\end{pmatrix}\begin{pmatrix} e^{\frac{q}{2}} & 0\\ 0 & e^{-\frac{q}{2}}\end{pmatrix}\right\},\nonumber \end{eqnarray} where $p=\{\varphi,\psi\}$, $q=\{\epsilon,\varepsilon\}$. Thus, the Cartan decomposition $\SL(2,\hbox{\bb C})=K^cA^cK^c$ of the element $\mathfrak{g}\in\spin_+(1,3)\simeq\SL(2,\hbox{\bb C})$ is \begin{gather}g\equiv\mathfrak{g}(\varphi^c,\theta^c,\psi^c)= \mathfrak{g}(\varphi,\,\epsilon,\,\theta,\,\tau,\,\psi,\,\varepsilon)= \nonumber\\[0.2cm] \begin{pmatrix} e^{{\bf i}\frac{\varphi}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\varphi}{2}} \end{pmatrix}{\renewcommand{\arraystretch}{1.1}\!\!\!\begin{pmatrix} e^{\frac{\epsilon}{2}} & 0\\ 0 & e^{-\frac{\epsilon}{2}} \end{pmatrix}}\!\!\!{\renewcommand{\arraystretch}{1.3}\begin{pmatrix} \cos\frac{\theta}{2} & {\bf i}\sin\frac{\theta}{2}\\ {\bf i}\sin\frac{\theta}{2} & \cos\frac{\theta}{2} \end{pmatrix}\!\!\!\! \begin{pmatrix} \cosh\frac{\tau}{2} & \sinh\frac{\tau}{2}\\ \sinh\frac{\tau}{2} & \cosh\frac{\tau}{2} \end{pmatrix}}\!\!\!{\renewcommand{\arraystretch}{1.1}\begin{pmatrix} e^{{\bf i}\frac{\psi}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\psi}{2}} \end{pmatrix}}\!\!\! \begin{pmatrix} e^{\frac{\varepsilon}{2}} & 0\\ 0 & e^{-\frac{\varepsilon}{2}} \end{pmatrix}=\nonumber\\[0.2cm] =\begin{pmatrix} e^{{\bf i}\frac{\varphi^c}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\varphi^c}{2}}\end{pmatrix}\!\!\!\begin{pmatrix}\cos\frac{\theta^c}{2} & {\bf i}\sin\frac{\theta^c}{2}\\ {\bf i}\sin\frac{\theta^c}{2} & \cos\frac{\theta^c}{2}\end{pmatrix}\!\!\!\begin{pmatrix}e^{{\bf i}\frac{\psi^c}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\psi^c}{2}}\end{pmatrix},\label{Elem2} \end{gather} where \begin{eqnarray} \varphi^c&=&\varphi-{\bf i}\epsilon,\nonumber\\ \theta^c&=&\theta-{\bf i}\tau,\nonumber\\ \psi^c&=&\psi-{\bf i}\varepsilon\nonumber \end{eqnarray} are {\it complex Euler angles}. Hence it follows that the element (\ref{Elem2}) is a complex extension of (\ref{Elem1}). Further, the 6-parameter spinor group $\spin(4)$ (a universal covering of $\SO(4)$) due to an isomorphism $\spin(4)\simeq\SU(2)\otimes\SU(2)$ admits the decomposition $\spin(4)=K^eA^eK^e$, where $K^e$ and $A^e$ are double extensions of the subgroups (\ref{Sub1}): \begin{eqnarray} K^e&=&\left\{\begin{pmatrix}\cos\frac{\theta^e}{2} & {\bf i}\sin\frac{\theta^e}{2}\\ {\bf i}\sin\frac{\theta^e}{2} & \cos\frac{\theta^e}{2}\end{pmatrix}=\begin{pmatrix}\cos\frac{\theta}{2} & {\bf i}\sin\frac{\theta}{2}\\ {\bf i}\sin\frac{\theta}{2} & \cos\frac{\theta}{2}\end{pmatrix} \begin{pmatrix} \cos\frac{\phi}{2} & {\bf i}\sin\frac{\phi}{2}\\ {\bf i}\sin\frac{\phi}{2} & \cos\frac{\phi}{2}\end{pmatrix}\right\},\nonumber\\ A^e&=&\left\{\begin{pmatrix}e^{{\bf i}\frac{t^e}{2}} & 0\\ 0 & e^{-{\bf i}\frac{t^e}{2}}\end{pmatrix}=\begin{pmatrix}e^{{\bf i}\frac{p}{2}} & 0\\ 0 & e^{-{\bf i}\frac{p}{2}}\end{pmatrix}\begin{pmatrix} e^{{\bf i}\frac{q}{2}} & 0\\ 0 & e^{-{\bf i}\frac{q}{2}}\end{pmatrix}\right\},\nonumber \end{eqnarray} where $p=\{\varphi,\psi\}$, $q=\{\varsigma,\chi\}$. In this case, the Cartan decomposition $\spin(4)=K^eA^eK^e$ of the element $g\in\SU(2)\otimes\SU(2)$ is \begin{gather}g\equiv g(\varphi^e,\theta^e,\psi^e)= g(\varphi,\,\varsigma,\,\theta,\,\phi,\,\psi,\,\chi)=\nonumber\\[0.2cm] \begin{pmatrix} e^{{\bf i}\frac{\varphi}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\varphi}{2}} \end{pmatrix}{\renewcommand{\arraystretch}{1.1}\!\!\!\begin{pmatrix} e^{{\bf i}\frac{\varsigma}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\varsigma}{2}} \end{pmatrix}}\!\!\!{\renewcommand{\arraystretch}{1.3}\begin{pmatrix} \cos\frac{\theta}{2} & {\bf i}\sin\frac{\theta}{2}\\ {\bf i}\sin\frac{\theta}{2} & \cos\frac{\theta}{2} \end{pmatrix}\!\!\!\! \begin{pmatrix} \cos\frac{\phi}{2} & {\bf i}\sin\frac{\phi}{2}\\ {\bf i}\sin\frac{\phi}{2} & \cos\frac{\phi}{2} \end{pmatrix}}\!\!\!{\renewcommand{\arraystretch}{1.1}\begin{pmatrix} e^{{\bf i}\frac{\psi}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\psi}{2}} \end{pmatrix}}\!\!\!\! \begin{pmatrix} e^{{\bf i}\frac{\chi}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\chi}{2}} \end{pmatrix}=\nonumber\\[0.2cm] =\begin{pmatrix} e^{{\bf i}\frac{\varphi^e}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\varphi^e}{2}}\end{pmatrix}\!\!\!\begin{pmatrix}\cos\frac{\theta^e}{2} & {\bf i}\sin\frac{\theta^e}{2}\\ {\bf i}\sin\frac{\theta^e}{2} & \cos\frac{\theta^e}{2}\end{pmatrix}\!\!\!\begin{pmatrix}e^{{\bf i}\frac{\psi^e}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\psi^e}{2}}\end{pmatrix},\label{Elem3} \end{gather} where \begin{equation}\label{DEA} {\renewcommand{\arraystretch}{1.3} \left.\begin{array}{ccc} \theta^e&=&\theta+\phi,\\ \varphi^e&=&\varphi+\varsigma,\\ \psi^e&=&\psi+\chi \end{array}\right\}} \end{equation} are {\it double Euler angles}. It is easy to see that the element (\ref{Elem3}) is a double extension of (\ref{Elem1}). Finally, the 10-parameter spinor group $\spin_+(1,4)\simeq\Sp(1,1)$ (a universal covering of the de Sitter group $\SO_0(1,4)$) is defined in terms of $2\times 2$ quaternionic matrices. This fact allows us to introduce a decomposition $\Sp(1,1)=K^qA^qK^q$, where $K^q$ and $A^q$ are quaternionic extensions of the groups (\ref{Sub1}): \begin{eqnarray} K^q&=&\left\{\begin{pmatrix}\cos\frac{\theta^q}{2} & {\bf i}\sin\frac{\theta^q}{2}\\ {\bf i}\sin\frac{\theta^q}{2} & \cos\frac{\theta^q}{2}\end{pmatrix}=\begin{pmatrix}\cos\frac{\theta}{2} & {\bf i}\sin\frac{\theta}{2}\\ {\bf i}\sin\frac{\theta}{2} & \cos\frac{\theta}{2}\end{pmatrix} \begin{pmatrix} \cosh\frac{\tau}{2} & \sinh\frac{\tau}{2}\\ \sinh\frac{\tau}{2} & \cosh\frac{\tau}{2}\end{pmatrix}\begin{pmatrix} \cos\frac{\phi}{2} & {\bf i}\sin\frac{\phi}{2}\\ {\bf i}\sin\frac{\phi}{2} & \cos\frac{\phi}{2}\end{pmatrix}\right\},\nonumber\\ A^q&=&\left\{\begin{pmatrix}e^{{\bf i}\frac{\varphi^q}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\varphi^q}{2}}\end{pmatrix}=\begin{pmatrix}e^{{\bf i}\frac{\varphi}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\varphi}{2}}\end{pmatrix}\begin{pmatrix} e^{\frac{\epsilon}{2}} & 0\\ 0 & e^{-\frac{\epsilon}{2}}\end{pmatrix}\begin{pmatrix} e^{{\bf k}\frac{\varsigma}{2}} & 0 \\ 0 & e^{-{\bf k}\frac{\varsigma}{2}}\end{pmatrix},\right.\nonumber\\ &&\left.\begin{pmatrix}e^{{\bf i}\frac{\psi^q}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\psi^q}{2}}\end{pmatrix}=\begin{pmatrix}e^{{\bf i}\frac{\psi}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\psi}{2}}\end{pmatrix}\begin{pmatrix} e^{\frac{\varepsilon}{2}} & 0\\ 0 & e^{-\frac{\varepsilon}{2}}\end{pmatrix}\begin{pmatrix} e^{\frac{\omega}{2}} & 0 \\ 0 & e^{-\frac{\omega}{2}}\end{pmatrix}\begin{pmatrix}e^{{\bf j}\frac{\chi}{2}} & 0\\ 0 & e^{-{\bf j}\frac{\chi}{2}}\end{pmatrix}\right\}.\nonumber \end{eqnarray} Therefore, the Cartan decomposition $\Sp(1,1)=K^qA^qK^q$ of the element $\mathfrak{q}\in\Sp(1,1)$ is \begin{gather} g\equiv\mathfrak{q}(\varphi^q,\theta^q,\psi^q)=\mathfrak{q}(\varphi,\epsilon,\varsigma,\theta, \tau,\phi,\psi,\varepsilon,\omega,\chi)=\nonumber\\[0.2cm] =\begin{pmatrix}e^{{\bf i}\frac{\varphi}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\varphi}{2}}\end{pmatrix}\!\!\!\begin{pmatrix} e^{\frac{\epsilon}{2}} & 0\\ 0 & e^{-\frac{\epsilon}{2}}\end{pmatrix}\!\!\!\begin{pmatrix} e^{{\bf k}\frac{\varsigma}{2}} & 0 \\ 0 & e^{-{\bf k}\frac{\varsigma}{2}}\end{pmatrix}\!\!\! \begin{pmatrix}\cos\frac{\theta}{2} & {\bf i}\sin\frac{\theta}{2}\\ {\bf i}\sin\frac{\theta}{2} & \cos\frac{\theta}{2}\end{pmatrix}\!\!\! \begin{pmatrix} \cosh\frac{\tau}{2} & \sinh\frac{\tau}{2}\\ \sinh\frac{\tau}{2} & \cosh\frac{\tau}{2}\end{pmatrix}\!\!\!\begin{pmatrix} \cos\frac{\phi}{2} & {\bf i}\sin\frac{\phi}{2}\\ {\bf i}\sin\frac{\phi}{2} & \cos\frac{\phi}{2}\end{pmatrix}\times\nonumber\\[0.2cm] \times\begin{pmatrix}e^{{\bf i}\frac{\psi}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\psi}{2}}\end{pmatrix}\!\!\!\begin{pmatrix} e^{\frac{\varepsilon}{2}} & 0\\ 0 & e^{-\frac{\varepsilon}{2}}\end{pmatrix}\!\!\!\begin{pmatrix} e^{\frac{\omega}{2}} & 0 \\ 0 & e^{-\frac{\omega}{2}}\end{pmatrix}\!\!\!\begin{pmatrix}e^{{\bf j}\frac{\chi}{2}} & 0\\ 0 & e^{-{\bf j}\frac{\chi}{2}}\end{pmatrix}=\nonumber\\[0.2cm] =\begin{pmatrix} e^{{\bf i}\frac{\varphi^q}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\varphi^q}{2}}\end{pmatrix}\!\!\!\begin{pmatrix}\cos\frac{\theta^q}{2} & {\bf i}\sin\frac{\theta^q}{2}\\ {\bf i}\sin\frac{\theta^q}{2} & \cos\frac{\theta^q}{2}\end{pmatrix}\!\!\!\begin{pmatrix}e^{{\bf i}\frac{\psi^q}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\psi^q}{2}}\end{pmatrix},\label{Elem4} \end{gather} where \begin{equation}\label{QEuler} {\renewcommand{\arraystretch}{1.3} \left.\begin{array}{ccc} \theta^q&=&\theta+\phi-{\bf i}\tau,\\ \varphi^q&=&\varphi-{\bf i}\epsilon+{\bf j}\varsigma,\\ \psi^q&=&\psi-{\bf i}\varepsilon-{\bf i}\omega+{\bf k}\chi \end{array}\right\}} \end{equation} are {\em quaternion Euler angles}\footnote{Quaternion Euler angles of $\spin_+(1,4)\simeq\Sp(1,1)$ contain complex Euler angles $\theta^c=\theta-{\bf i}\tau$, $\varphi^c=\varphi-{\bf i}\epsilon$, $\psi^c=\psi-{\bf i}\varepsilon$ of the group $\spin_+(1,3)\simeq\SL(2,\hbox{\bb C})$ as a particular case (for more details see \cite{Var06}).}. Hence it immediately follows that the element (\ref{Elem4}) is a quaternionic extension of (\ref{Elem1}). \subsection{Differential operators on the group $\Sp(1,1)$} \begin{sloppypar}\noindent Let $\Omega(t)$ be the one-parameter subgroup of $\Sp(1,1)$ and let $\omega(t)$ be a matrix from the group $\Omega(t)$. The operators of the right regular representation of $\Sp(1,1)$, corresponding to the elements of the subgroup $\Omega(t)$, transfer quaternion functions $f(\mathfrak{q})$ into $R(\omega(t))f(\mathfrak{q})=f(\mathfrak{q}\omega(t))$. For that reason the infinitesimal operator of the right regular representation\index{representation!right regular} $R(\mathfrak{q})$, associated with one--parameter subgroup $\Omega(t)$, transfers the function $f(\mathfrak{q})$ into $\frac{df(\mathfrak{q}\omega(t))}{dt}$ at $t=0$.\end{sloppypar} Let us denote quaternion Euler angles of the element $\mathfrak{q}\omega(t)$ via $\varphi^q(t),\theta^q(t),\psi^q(t)$. Then there is an equality \[ \left.\frac{df(\mathfrak{q}\omega(t))}{dt}\right|_{t=0}= \frac{\partial f}{\partial\varphi^q}\left(\varphi^q(0)\right)^\prime+ \frac{\partial f}{\partial\theta^q}\left(\theta^q(0)\right)^\prime+ \frac{\partial f}{\partial\psi^q}\left(\psi^q(0)\right)^\prime. \] The infinitesimal operator\index{operator!infinitesimal} $J_\omega$, corresponding to the subgroup $\Omega(t)$, has a form \[ J_\omega= \left(\varphi^q(0)\right)^\prime\frac{\partial}{\partial\varphi^q}+ \left(\theta^q(0)\right)^\prime\frac{\partial}{\partial\theta^q}+ \left(\psi^q(0)\right)^\prime\frac{\partial}{\partial\psi^q}. \] Let us calculate infinitesimal operators $J^q_{\omega_1}$, $J^q_{\omega_2}$, $J^q_{\omega_3}$ corresponding to the quaternion subgroups $\Omega^q_1$, $\Omega^q_2$, $\Omega^q_3$. The quaternion subgroups $\Omega^q_i$ ($i=1,2,3$) arise from the fact that all the ten parameters of $\Sp(1,1)$ can be divided in three groups according the Cartan decomposition (\ref{Elem4}) for the element $\mathfrak{q}\in\Sp(1,1)$. The subgroup $\Omega^q_3$ consists of the matrices \[ \omega_3(t^q)= \begin{pmatrix} e^{{\bf i}\frac{t^q}{2}} & 0\\ 0 & e^{-{\bf i}\frac{t^q}{2}} \end{pmatrix}, \] where the variable $t^q$ has the form of quaternionic angles. Let $\mathfrak{q}=\mathfrak{q}(\varphi^q,\theta^q,\psi^q)$ be a matrix with quaternion Euler angles (the matrix (\ref{Elem4})) $\varphi^q=\varphi-{\bf i}\epsilon+{\bf j}\varsigma$, $\theta^q=\theta+\phi-{\bf i}\tau$, $\psi^q=\psi-{\bf i}\varepsilon-{\bf i}\omega+{\bf k}\chi$. Therefore, Euler angles of the matrix $\mathfrak{q}\omega_3(t^q)$ equal to $\varphi^q$, $\theta^q$, $\psi^q=t-{\bf i} t-{\bf i} t+{\bf k} t$. Hence it follows that \begin{gather} \varphi^\prime(0)=0,\;\; \epsilon^\prime(0)=0,\;\;\omega^\prime(0)=-{\bf i},\;\; \theta^\prime(0)=0,\;\;\phi^\prime(0)=0,\;\; \tau^\prime(0)=0,\;\; \psi^\prime(0)=1,\nonumber\\ \varepsilon^\prime(0)=-{\bf i},\;\;\varsigma^\prime(0)={\bf j},\;\;\chi^\prime(0)={\bf k}. \nonumber \end{gather} So, the operator $J^q_{\omega_3}$, corresponding to the subgroup $\Omega^c_3$, has the form \begin{equation}\label{J3} J^q_{\omega_3}=\frac{\partial}{\partial\psi}- {\bf i}\frac{\partial}{\partial\varepsilon}-{\bf i}\frac{\partial}{\partial\omega} +{\bf k}\frac{\partial}{\partial\chi}. \end{equation} Whence \begin{equation}\label{IO3} M_3=\frac{\partial}{\partial\psi},\quad N_3=\frac{\partial}{\partial\varepsilon},\quad P_3=\frac{\partial}{\partial\chi},\quad P_0=\frac{\partial}{\partial\omega}. \end{equation} Let us calculate the infinitesimal operator $J^q_{\omega_1}$ corresponding to the quaternion subgroup $\Omega^q_1$. The subgroup $\Omega^q_1$ consists of the matrices \[ \omega_1(t^q)= {\renewcommand{\arraystretch}{1.3} \begin{pmatrix} \cos\frac{t^q}{2} & {\bf i}\sin\frac{t^q}{2}\\ {\bf i}\sin\frac{t^q}{2} & \cos\frac{t^q}{2} \end{pmatrix}}. \] The Euler angles of these matrices equal to $0,\,t^q=t+et-{\bf i} t,\,0$, $e$ is the double unit. Let us represent the matrix $\mathfrak{q}\omega_1(t^q)$ by the following product: \[ {\renewcommand{\arraystretch}{1.3} \mathfrak{q}\omega_1(t^q)= \begin{pmatrix} \cos\frac{\theta^q}{2}e^{{\bf i}\frac{(\varphi^q+\psi^q)}{2}} & {\bf i}\sin\frac{\theta^q}{2}e^{{\bf i}\frac{(\varphi^q-\psi^q)}{2}}\\ {\bf i}\sin\frac{\theta^q}{2}e^{{\bf i}\frac{(\psi^q-\varphi^q)}{2}} & \cos\frac{\theta^q}{2}e^{-{\bf i}\frac{(\varphi^q+\psi^q)}{2}} \end{pmatrix} \begin{pmatrix} \cos\frac{t^q}{2} & {\bf i}\sin\frac{t^q}{2}\\ {\bf i}\sin\frac{t^q}{2} & \cos\frac{t^q}{2} \end{pmatrix}. } \] Multiplying the matrices on the right-side of the latter expression, we obtain \begin{eqnarray} \cos\theta^q(t)&=&\cos\theta^q\cos t^q-\sin\theta^q\sin t^q\cos\psi^q, \label{SL9}\\[0.2cm] e^{{\bf i}\varphi^q(t)}&=&e^{{\bf i}\varphi^q}\frac{\sin\theta^q\cos t^q+ \cos\theta^q\sin t^q\cos\psi^q+{\bf i}\sin t^q\sin\psi^q} {\sin\theta^q(t)},\label{SL10}\\[0.2cm] e^{{\bf i}\frac{[\varphi^q(t)+\psi^q(t)]}{2}}&=&e^{{\bf i}\frac{\varphi^q}{2}} \frac{\cos\frac{\theta^q}{2}\cos\frac{t^q}{2}e^{\frac{{\bf i}\psi^q}{2}}- \sin\frac{\theta^q}{2}\sin\frac{t^q}{2}e^{-{\bf i}\frac{\psi^q}{2}}} {\cos\frac{\theta^q(t)}{2}}.\label{SL11} \end{eqnarray} For calculation of derivatives $\varphi^\prime(t)$, $\epsilon^\prime(t)$, $\omega^\prime(t)$, $\theta^\prime(t)$, $\phi^\prime(t)$, $\tau^\prime(t)$, $\psi^\prime(t)$, $\varepsilon^\prime(t)$, $\varsigma^\prime(t)$, $\chi^\prime(t)$ at $t=0$ we must differentiate on $t$ the both parts of the each equality from (\ref{SL9})--(\ref{SL11}). At this point, we have $\varphi(0)=\varphi$, $\epsilon(0)=\epsilon$, $\ldots$, $\chi(0)=\chi$. So, let us differentiate the both parts of (\ref{SL9}). As a result, we obtain \[ -\sin\theta^q(t)\left[\theta^\prime(t)+e\phi^\prime(t)-{\bf i}\tau^\prime(t) \right]=-\cos\theta^q\sin t^q(1+e-{\bf i})-\sin\theta^q\cos t^q\cos\psi^q(1+e-{\bf i}). \] Taking $t=0$, we find that \[ \theta^\prime(0)+e\phi^\prime(0)-{\bf i}\tau^\prime(0)=\cos\psi^q(1+e-{\bf i}). \] Whence \[ \theta^\prime(0)=\cos\psi^q,\quad\phi^\prime(0)=\cos\psi^q,\quad \tau^\prime(0)=\cos\psi^q. \] Differentiating now the both parts of (\ref{SL10}) and taking $t=0$, we obtain \[ \varphi^\prime(0)-{\bf i}\epsilon^\prime(0)+{\bf j}\varsigma^\prime(0)= \frac{\sin\psi^q(1+e-{\bf i})}{\sin\theta^q}. \] Therefore, \[ \varphi^\prime(0)=\frac{\sin\psi^q}{\sin\theta^q},\quad \epsilon^\prime(0)=\frac{\sin\psi^q}{\sin\theta^q},\quad \varsigma^\prime(0)=\frac{\sin\psi^q}{\sin\theta^q}. \] Further, differentiating the both parts of (\ref{SL11}) and taking $t=0$, we find that \[ \psi^\prime(0)-{\bf i}\varepsilon^\prime(0)-{\bf i}\omega^\prime(0)+ {\bf j}\chi^\prime(0)=(-1-e+{\bf i})\cot\theta^q\sin\psi^q \] and \[ \psi^\prime(0)=\varepsilon^\prime(0)=\chi^\prime(0)= -\cot\theta^q\sin\psi^q,\quad\omega^\prime(0)=0. \] In such a way, we have \begin{equation}\label{J1} J^q_{\omega_1}=M_1+P_1-{\bf i} N_1, \end{equation} where \begin{eqnarray} M_1&=&\cos\psi^q\frac{\partial}{\partial\theta}+ \frac{\sin\psi^q}{\sin\theta^q}\frac{\partial}{\partial\varphi}- \cot\theta^q\sin\psi^q\frac{\partial}{\partial\psi},\label{SL12}\\ N_1&=&\cos\psi^q\frac{\partial}{\partial\tau}+ \frac{\sin\psi^q}{\sin\theta^q}\frac{\partial}{\partial\epsilon}- \cot\theta^q\sin\psi^q\frac{\partial}{\partial\varepsilon},\label{SL13}\\ P_1&=&\cos\psi^q\frac{\partial}{\partial\phi}+ \frac{\sin\psi^q}{\sin\theta^q}\frac{\partial}{\partial\varsigma}- \cot\theta^q\sin\psi^q\frac{\partial}{\partial\chi}.\label{SL14} \end{eqnarray} Let us calculate now an infinitesimal operator $J^q_{\omega_2}$ corresponding to the quaternion subgroup $\Omega^q_2$. The subgroup $\Omega^q_2$ consists of the matrices \[ \omega_2(t^q)= {\renewcommand{\arraystretch}{1.3} \begin{pmatrix} \cos\frac{t^q}{2} & -\sin\frac{t^q}{2}\\ \sin\frac{t^q}{2} & \cos\frac{t^q}{2} \end{pmatrix}}, \] where the Euler angles equal correspondingly to $0,\,t^c=t-{\bf i} t+{\bf j} t,\,0$. It is obvious that the matrix $\mathfrak{q}\omega_2(t^q)$ can be represented by the product \[ {\renewcommand{\arraystretch}{1.3} \mathfrak{q}\omega_1(t^q)= \begin{pmatrix} \cos\frac{\theta^q}{2}e^{{\bf i}\frac{(\varphi^q+\psi^q)}{2}} & {\bf i}\sin\frac{\theta^q}{2}e^{{\bf i}\frac{(\varphi^q-\psi^q)}{2}}\\ {\bf i}\sin\frac{\theta^q}{2}e^{{\bf i}\frac{(\psi^q-\varphi^q)}{2}} & \cos\frac{\theta^q}{2}e^{-{\bf i}\frac{(\varphi^q+\psi^q)}{2}} \end{pmatrix} \begin{pmatrix} \cos\frac{t^q}{2} & -\sin\frac{t^q}{2}\\ \sin\frac{t^q}{2} & \cos\frac{t^q}{2} \end{pmatrix}. } \] Multiplying the matrices on the right-side of this equality, we see that Euler angles of the product $\mathfrak{q}\omega_2(t^q)$ are related by the formulae \begin{eqnarray} \cos\theta^q(t)&=&\cos\theta^q\cos t^q+\sin\theta^q\sin t^q\sin\psi^q, \label{SL15}\\[0.2cm] e^{{\bf i}\varphi^q(t)}&=&e^{{\bf i}\varphi^q}\frac{\sin\theta^q\cos t^q- \cos\theta^q\sin t^q\sin\psi^q+{\bf i}\sin t^q\cos\psi^q} {\sin\theta^q(t)},\label{SL16}\\[0.2cm] e^{{\bf i}\frac{[\varphi^q(t)+\psi^q(t)]}{2}}&=&e^{{\bf i}\frac{\varphi^q}{2}} \frac{\cos\frac{\theta^q}{2}\cos\frac{t^q}{2}e^{\frac{{\bf i}\psi^q}{2}}+ \sin\frac{\theta^q}{2}\sin\frac{t^q}{2}e^{-{\bf i}\frac{\psi^q}{2}}} {\cos\frac{\theta^q(t)}{2}}.\label{SL17} \end{eqnarray} Differentiating on $t$ the both parts of the each equalities (\ref{SL15})--(\ref{SL17}) and taking $t=0$, we obtain \begin{eqnarray} &&\theta^\prime(0)=\tau^\prime(0)=\phi^\prime(0)=-\sin\psi^q,\nonumber\\ &&\varphi^\prime(0)=\epsilon^\prime(0)=\varsigma^\prime(0)= \frac{\cos\psi^q}{\sin\theta^q},\nonumber\\ &&\psi^\prime(0)=\varepsilon^\prime(0)=\chi^\prime(0)= -\cot\theta^q\cos\psi^q,\quad\omega^\prime(0)=0.\nonumber \end{eqnarray} Therefore, for the subgroup $\Omega^q_2$ we have \begin{equation}\label{J2} J^q_{\omega_2}=M_2-{\bf i} N_2+{\bf j} P_2, \end{equation} where \begin{eqnarray} M_2&=&-\sin\psi^q\frac{\partial}{\partial\theta}+ \frac{\cos\psi^q}{\cos\theta^q}\frac{\partial}{\partial\varphi}- \cot\theta^q\sin\psi^q\frac{\partial}{\partial\psi},\label{SL18}\\ N_2&=&-\sin\psi^q\frac{\partial}{\partial\tau}+ \frac{\cos\psi^q}{\sin\theta^q}\frac{\partial}{\partial\epsilon}- \cot\theta^q\cos\psi^q\frac{\partial}{\partial\varepsilon},\label{SL19}\\ P_2&=&-\sin\psi^q\frac{\partial}{\partial\phi}+ \frac{\cos\psi^q}{\sin\theta^q}\frac{\partial}{\partial\varsigma}- \cot\theta^q\cos\psi^q\frac{\partial}{\partial\chi}.\label{SL20} \end{eqnarray} Let us introduce an auxiliary quaternion angle $\psi^q_1=\psi-{\bf i}\varepsilon+{\bf k}\chi$. It is easy to see that $\psi^q=\psi^q_1-{\bf i}\omega$; therefore, $\psi^q_1$ is the part of $\psi^q$. Further, taking into account expressions (\ref{IO3}), (\ref{SL12})--(\ref{SL14}) and (\ref{SL18})--(\ref{SL20}), we can rewrite the operators (\ref{J3}), (\ref{J1}), (\ref{J2}) in the form\footnote{These operators look like as $\SU(2)$ type (or $\SU(2)\otimes\SU(2)$ type) infinitesimal operators. However, it is easy to verify that they do not form a group, since $\psi^q\ne\psi^q_1$.} \begin{eqnarray} J^q_{\omega_1}&=&\cos\psi^q\frac{\partial}{\partial\theta^q}+ \frac{\sin\psi^q}{\sin\theta^q}\frac{\partial}{\partial\varphi^q}- \cot\theta^q\sin\psi^q\frac{\partial}{\partial\psi^q_1},\label{qJ1}\\ J^q_{\omega_2}&=&-\sin\psi^q\frac{\partial}{\partial\theta^q}+ \frac{\cos\psi^q}{\sin\theta^q}\frac{\partial}{\partial\varphi^q}- \cot\theta^q\cos\psi^q\frac{\partial}{\partial\psi^q_1},\label{qJ2}\\ J^q_{\omega_3}&=&\frac{\partial}{\partial\psi^q},\label{qJ3}\\ \dot{J}^q_{\omega_1}&=&\cos\dot{\psi}^q\frac{\partial}{\partial\dot{\theta}^q}+ \frac{\sin\dot{\psi}^q}{\sin\dot{\theta}^q} \frac{\partial}{\partial\dot{\varphi}^q}- \cot\dot{\theta}^q\sin\dot{\psi}^q\frac{\partial}{\partial\dot{\psi}^q_1}, \label{dqJ1}\\ \dot{J}^q_{\omega_2}&=&-\sin\dot{\psi}^q\frac{\partial}{\partial\dot{\theta}^q}+ \frac{\cos\dot{\psi}^q}{\sin\dot{\theta}^q} \frac{\partial}{\partial\dot{\varphi}^q}- \cot\dot{\theta}^q\cos\dot{\psi}^q\frac{\partial}{\partial\dot{\psi}^q_1}, \label{dqJ2}\\ \dot{J}^q_{\omega_3}&=&\frac{\partial}{\partial\dot{\psi}^q},\label{dqJ3} \end{eqnarray} where \[ {\renewcommand{\arraystretch}{1.55} \begin{array}{ccl} \dfrac{\partial}{\partial\theta^q}&=&\dfrac{\partial}{\partial\theta}+ \dfrac{\partial}{\partial\phi}+{\bf i}\dfrac{\partial}{\partial\tau},\\ \dfrac{\partial}{\partial\varphi^q}&=&\dfrac{\partial}{\partial\varphi}+ {\bf i}\dfrac{\partial}{\partial\epsilon}+{\bf j}\dfrac{\partial}{\partial\varsigma},\\ \dfrac{\partial}{\partial\psi^q}&=&\dfrac{\partial}{\partial\psi}+ {\bf i}\dfrac{\partial}{\partial\varepsilon}+{\bf i}\dfrac{\partial}{\partial\omega}+ {\bf k}\dfrac{\partial}{\partial\chi},\\ \dfrac{\partial}{\partial\psi^q_1}&=&\dfrac{\partial}{\partial\psi}+ {\bf i}\dfrac{\partial}{\partial\varepsilon}+ {\bf k}\dfrac{\partial}{\partial\chi}. \end{array} \quad \begin{array}{ccl} \dfrac{\partial}{\partial\dot{\theta}^q}&=&\dfrac{\partial}{\partial\theta}- \dfrac{\partial}{\partial\phi}-{\bf i}\dfrac{\partial}{\partial\tau},\\ \dfrac{\partial}{\partial\dot{\varphi}^q}&=&\dfrac{\partial}{\partial\varphi}- {\bf i}\dfrac{\partial}{\partial\epsilon}-{\bf j}\dfrac{\partial}{\partial\varsigma},\\ \dfrac{\partial}{\partial\dot{\psi}^q}&=&\dfrac{\partial}{\partial\psi}- {\bf i}\dfrac{\partial}{\partial\varepsilon}-{\bf i}\dfrac{\partial}{\partial\omega}- {\bf k}\dfrac{\partial}{\partial\chi},\\ \dfrac{\partial}{\partial\dot{\psi}^q_1}&=&\dfrac{\partial}{\partial\psi}- {\bf i}\dfrac{\partial}{\partial\varepsilon}- {\bf k}\dfrac{\partial}{\partial\chi}. \end{array}} \] Using the expressions (\ref{qJ1})--(\ref{qJ3}), we see that for the first Casimir operator $F$ of the group $\SO_0(1,4)$ there exists the following equality: \[ -F=-P^2_0-\textbf{\emph{N}}^2+\textbf{\emph{P}}^2+\textbf{\emph{M}}^2=\left(J^q_{\omega_1}\right)^2+ \left(J^q_{\omega_2}\right)^2+\left(J^q_{\omega_3}\right)^2. \] Or, \begin{equation}\label{FKO} -F=\frac{\partial^2}{\partial{\theta^q}^2}+ \cot\theta^q\frac{\partial}{\partial\theta^q}+ \frac{1}{\sin^2\theta^q}\frac{\partial^2}{\partial{\varphi^q}^2}- \frac{2\cos\theta^q}{\sin^2\theta^q} \frac{\partial^2}{\partial\varphi^q\partial\psi^q_1}+ \cot^2\theta^q\frac{\partial^2}{\partial{\psi^q_1}^2}+ \frac{\partial^2}{\partial{\psi^q}^2}. \end{equation} Matrix elements $t^{\sigma}_{mn}(\mathfrak{q})= \mathfrak{M}^{\sigma}_{mn}(\varphi^q,\theta^q,\psi^q)$ of irreducible representations of the group $\SO_0(1,4)$ are eigenfunctions of the operator (\ref{FKO}): \begin{equation}\label{FKO2} \left[-F+\sigma(\sigma+3)\right]\mathfrak{M}^{\sigma}_{mn}(\mathfrak{q})=0, \end{equation} where \begin{equation}\label{MF} \mathfrak{M}^{\sigma}_{mn}(\mathfrak{q})=e^{-{\bf i}(m\varphi^q+n(\psi^q_1-{\bf i}\omega))}\mathfrak{Z}^{\sigma}_{mn} (\cos\theta^q), \end{equation} since $\psi^q=\psi^q_1-{\bf i}\omega$. Here, $\mathfrak{M}^\sigma_{mn}(\mathfrak{q})$ are general matrix elements of the representations of $\SO_0(1,4)$, and $\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)$ are {\it hyperspherical functions}. Substituting the functions (\ref{MF}) into (\ref{FKO2}) and taking into account the operator (\ref{FKO}), we arrive at the following differential equation: \begin{multline} \frac{d^2\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)}{d{\theta^q}^2}+\cot\theta^q \frac{d \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)}{d\theta^q}-\frac{m^2}{\sin^2\theta^q} \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)+\frac{2mn\cos\theta^q}{\sin^2\theta^q} \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)-\\ -n^2\cot^2\theta^q\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)- n^2\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)+\sigma(\sigma+3)\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=0, \nonumber \end{multline} or \[ \left[\frac{d^2}{d{\theta^q}^2}+\cot\theta^q\frac{d}{d\theta^q}- \frac{m^2+n^2-2mn\cos\theta^q}{\sin^2\theta^q}+\sigma(\sigma+3)\right] \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=0. \] After substitution $z=\cos\theta^q$ this equation can be rewritten as \begin{equation}\label{FKO3} \left[(1-z^2)\frac{d^2}{dz^2}-2z\frac{d}{dz}- \frac{m^2+n^2-2mnz}{1-z^2}+\sigma(\sigma+3)\right] \mathfrak{Z}^{\sigma}_{mn}(z)=0. \end{equation} The latter equation has three singular points $-1$, $+1$, $\infty$. It is a Fuchsian equation. Indeed, denoting $w(z)=\mathfrak{Z}^\sigma_{mn}(z)$, we write the equation (\ref{FKO3}) in the form \begin{equation}\label{Fux1} \frac{d^2w(z)}{dz^2}-p(z)\frac{dw(z)}{dz}+q(z)w(z)=0, \end{equation} where \[ p(z)=\frac{2z}{(1-z)(1+z)},\quad q(z)=\frac{\sigma(\sigma+3)(1-z^2)-m^2-n^2+2mnz}{(1-z)^2(1+z)^2}. \] Let us find solutions of (\ref{Fux1}). Applying the substitution \[ t=\frac{1-z}{2},\quad w(z)=t^{\frac{|m-n|}{2}}(1-t)^{\frac{|m+n|}{2}}v(t),\nonumber \] we arrive at hypergeometric equation \begin{equation}\label{Hyper} t(1-t)\frac{d^2v}{dt^2}+[c-(a+b+1)t]\frac{dv}{dt}-abv(t)=0, \end{equation} where \begin{eqnarray} a&=&\sigma+3+\frac{1}{2}(|m-n|+|m+n|),\nonumber\\ b&=&-\sigma+\frac{1}{2}(|m-n|+|m+n|),\nonumber\\ c&=&|m-n|+1.\nonumber \end{eqnarray} Therefore, a solution of (\ref{Hyper}) is \[ v(t)=C_1\hypergeom{2}{1}{a,b}{c}{t}+C_2t^{1-c} \hypergeom{2}{1}{b-c+1,a-c+1}{2-c}{t}. \] Coming back to initial variable, we obtain \begin{multline} w(z)=C_1\left(\frac{1-z}{2}\right)^{\frac{|m-n|}{2}} \left(\frac{1+z}{2}\right)^{\frac{|m+n|}{2}}\times\\ \times\hypergeom{2}{1}{\sigma+3+\frac{1}{2}(|m-n|+|m+n|),-\sigma+\frac{1}{2}(|m-n|+|m+n|)} {|m-n|+1}{\frac{1-z}{2}}+\\ +C_2\left(\frac{1-z}{2}\right)^{-\frac{|m-n|}{2}} \left(\frac{1+z}{2}\right)^{\frac{|m+n|}{2}}\times\\ \times\hypergeom{2}{1}{-\sigma+\frac{1}{2}(|m+n|-|m-n|),\sigma+3+\frac{1}{2}(|m+n|-|m-n|)} {1-|m-n|}{\frac{1-z}{2}}. \label{Sol1'} \end{multline} Thus, from (\ref{Sol1'}) it follows that the function $\mathfrak{Z}^\sigma_{mn}$ can be represented by the following particular solution: \begin{multline} \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=C_1\sin^{|m-n|}\frac{\theta^q}{2} \cos^{|m+n|}\frac{\theta^q}{2}\times\\ \times\hypergeom{2}{1}{\sigma+3+\frac{1}{2}(|m-n|+|m+n|),-\sigma+\frac{1}{2}(|m-n|+|m+n|)} {|m-n|+1}{\sin^2\frac{\theta^q}{2}}.\label{Hyper2} \end{multline} In section 4 and 5 we will give more explicit expressions for the functions $\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)$ via the multiple hypergeometric series. Finally, using the formulae (\ref{dqJ1})--(\ref{dqJ3}), we can obtain the same differential equation for the function $\mathfrak{Z}^{\dot{\sigma}}_{\dot{m}\dot{n}}(\cos\dot{\theta}^q)$. All the calculations in this case are analogous to the previous calculations for $\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)$. \subsection{Homogeneous spaces of $\SO_0(1,4)$} Before introducing the spherical functions on the group $\SO_0(1,4)$ it is useful to give a general definition for spherical functions on the group $G$. Let $T(g)$ be an irreducible representation of the group $G$ in the space $L$ and let $H$ be a subgroup of $G$. The vector $\boldsymbol{\xi}$ in the space $L$ is called {\it an invariant with respect to the subgroup} $H$ if for all $h\in H$ the equality $T(h)\boldsymbol{\xi}=\boldsymbol{\xi}$ holds. The representation $T(g)$ is called {\it a representation of the class one with respect to the subgroup} $H$ if in its space there are non-null vectors which are invariant with respect to $H$. At this point, a contraction of $T(g)$ onto its subgroup $H$ is unitary: \[ (T(h)\boldsymbol{\xi}_1,T(h)\boldsymbol{\xi}_2)=(\boldsymbol{\xi}_1, \boldsymbol{\xi}_2). \] Hence it follows that a function \[ f(g)=(T(g)\boldsymbol{\eta},\boldsymbol{\xi}) \] corresponds the each vector $\boldsymbol{\eta}\in L$. $f(g)$ are called {\it spherical functions of the representation $T(g)$ with respect to $H$}. Spherical functions can be considered as functions on homogeneous spaces ${\cal M}=G/H$. In its turn, a homogeneous space ${\cal M}$ of the group $G$ has the following properties:\\ a) It is a topological space on which the group $G$ acts continuously, that is, let $y$ be a point in ${\cal M}$, then $gy$ is defined and is again a point in ${\cal M}$ ($g\in G$).\\ b) This action is transitive, that is, for any two points $y_1$ and $y_2$ in ${\cal M}$ it is always possible to find a group element $g\in G$ such that $y_2=gy_1$.\\ There is a one-to-one correspondence between the homogeneous spaces of $G$ and the coset spaces of $G$. Let $H_0$ be a maximal subgroup of $G$ which leaves the point $y_0$ invariant, $hy_0=y_0$, $h\in H_0$, then $H_0$ is called {\it the stabilizer of} $y_0$. Representing now any group element of $G$ in the form $g=g_ch$, where $h\in H_0$ and $g_c\in G/H_0$, we see that, by virtue of the transitivity property, any point $y\in{\cal M}$ can be given by $y=g_chy_0=g_cy$. Hence it follows that the elements $g_c$ of the coset space give a parametrization of ${\cal M}$. The mapping ${\cal M}\leftrightarrow G/H_0$ is continuous since the group multiplication is continuous and the action on ${\cal M}$ is continuous by definition. The stabilizers $H$ and $H_0$ of two different points $y$ and $y_0$ are conjugate, since from $H_0g_0=g_0$, $y_0=g^{-1}y$, it follows that $gH_0g^{-1}y=y$, that is, $H=gH_0g^{-1}$. Coming back to the de Sitter group $G=\SO_0(1,4)$, we see that there are the following homogeneous spaces of $\SO_0(1,4)$ depending on the stabilizer $H$. First of all, when $H=0$ the homogeneous space ${\cal M}_{10}$ coincides with {\it a group manifold} $\mathfrak{S}_{10}$ of $\SO_0(1,4)$. Therefore, $\mathfrak{S}_{10}$ is a maximal homogeneous space of the de Sitter group. Further, when $H=\Omega^q_\psi$, where $\Omega^q_\psi$ is a group of diagonal matrices \[ \begin{pmatrix} e^{\frac{{\bf i}\psi^q}{2}} & 0\\ 0 & e^{-\frac{{\bf i}\psi^q}{2}} \end{pmatrix}, \] the homogeneous space ${\cal M}_6$ coincides with a {\it two-dimensional quaternion sphere} $S^q_2$, ${\cal M}_6=S^q_2\sim\Sp(1,1)/\Omega^q_\psi$\footnote{When the stabilizer $H$ is a compact group, the homogeneous space ${\cal M}=G/H$ is called {\it a Riemannian symmetric space} \cite{Hel78}. When $H$ is a non-compact group, we arrive at the non-Riemannian spaces. The homogeneous space ${\cal M}_6=S^q_2\sim\Sp(1,1)/\Omega^q_\psi$ is the non-Riemannian space, since the stabilizer $H=\Omega^q_\psi$ is non-compact subgroup of $\Sp(1,1)$. Quaternion and anti-quaternion spheres were studied by Rozenfel'd \cite{Roz55}.}. We obtain the following homogeneous space ${\cal M}_4$ when the stabilizer $H$ coincides with a maximal compact subgroup $K=\SO(4)$ of $\SO_0(1,4)$. In this case we arrive at the upper sheet of a four-dimensional hyperboloid ${\cal M}_4=H^4\sim\SO_0(1,4)/\SO(4)$. The upper sheet $H^4_+$ of the two-sheeted hyperboloid $H^4$ can be understood as a quotient space $\SO_0(1,4)/\SO(4)$. Indeed, let us consider the upper sheet $H^4_+$ of $H^4$: \begin{equation}\label{Sit9} H^4_+:\; x^2_0-x^2_1-x^2_2-x^2_3-x^2_4=1,\quad x_0>0 \end{equation} and the point $x^0=(1,0,0,0,0)$ on $H^4_+$. The group $\SO_0(1,4)$ transfers the hyperboloid $H^4_+$ into itself. Besides, for any two points $x^\prime$ and $x^{\prime\prime}$ of $H^4_+$ there is such an element $g\in\SO_0(1,4)$ that $gx^\prime=x^{\prime\prime}$, that is, $\SO_0(1,4)$ is a transitive transformation group of the homogeneous space. The set of elements from $\SO_0(1,4)$, leaving the point $x^0$ invariant, coincides with the subgroup $\SO(4)$. Therefore, $H^4_+$ is homeomorphic to the quotient space $\SO_0(1,4)/\SO(4)$. It should be noted that {\it a four-dimensional Lobatchevski space} $\mathcal{L}^4$, called also {\it a de Sitter space}, is realized on the hyperboloid $H^4_+$\footnote{It is obvious that among the all homogeneous spaces of $\SO_0(1,4)$ the space $H^4_+$ is the most important for physics. In accordance with modern cosmology, $H^4_+$ is understood as a space-time endowed with a global topology of constant negative curvature (the de Sitter universe).}. In the case $x^2_0-x^2_1-x^2_2-x^2_3-x^4=0$ we arrive at a cone $C^4$ which can also be considered as a homogeneous space of $\SO_0(1,4)$. Usually, only the upper sheets $H^4_+$ and $C^4_+$ are considered in applications. The following homogeneous space ${\cal M}_3$ of $\SO_0(1,4)$ is a three-dimensional real sphere $S^3\sim\SO(4)/\SO(3)$. In contrast to the previous homogeneous spaces, the sphere $S^3$ coincides with a quotient space $\SO_0(1,4)/P$, where $P$ is a minimal parabolic subgroup of $\SO_0(1,4)$. From the Iwasawa decompositions $\SO_0(1,4)=KNA$ and $P=MNA$, where $M=\SO(3)$, $N$ and $A$ are nilpotent and commutative subgroups of $\SO_0(1,4)$, it follows that $\SO_0(1,4)/P=KNA/MNA\sim K/M\sim\SO(4)/\SO(3)$. \begin{sloppypar} A minimal homogeneous space ${\cal M}_2$ of $\SO_0(1,4)$ is a two-dimensional real sphere $S^2\sim\SO(3)/\SO(2)$. \end{sloppypar} Taking into account the list of homogeneous spaces of $\SO_0(1,4)$, we now introduce the following types of spherical functions $f(\mathfrak{q})$ on the de Sitter group: \begin{itemize} \item $f(\mathfrak{q})=\mathfrak{M}^\sigma_{mn}(\mathfrak{q})= e^{-{\bf i} m\varphi^q}\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)e^{-{\bf i} n\psi^q}$. This function is defined on the group manifold $\mathfrak{S}_{10}$ of $\SO_0(1,4)$. It is the most general spherical function on the group $\SO_0(1,4)$. In this case $f(\mathfrak{q})$ depends on all the ten parameters of $\SO_0(1,4)$ and for that reason it should be called as {\it a function on the de Sitter group}. An explicit form of $\mathfrak{M}^\sigma_{mn}(\mathfrak{q})$ (respectively $\mathfrak{M}^{\dot{\sigma}}_{\dot{m}\dot{n}}(\mathfrak{q})$) for finite-dimensional representations and of $\mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho,l_0}_{mn}(\mathfrak{q})$ (resp. $\mathfrak{M}^{-\frac{3}{2}-{\bf i}\rho,l_0}_{\dot{m}\dot{n}}(\mathfrak{q})$) for infinite-dimensional representations of $\SO_0(1,4)$ will be given in the sections 4 and 5, respectively. \item $f(\varphi^q,\theta^q)=\mathfrak{M}^m_\sigma(\varphi^q,\theta^q,0)=e^{-{\bf i}\varphi^q} \mathfrak{Z}^m_\sigma(\cos\theta^q)$. This function is defined on the homogeneous space ${\cal M}_6=S^2_q\sim\Sp(1,1)/\Omega^q_\psi$, that is, on the surface of the two-dimensional quaternion sphere $S^q_2$. The function $\mathfrak{M}^m_\sigma(\varphi^q,\theta^q,0)$ is a five-dimensional analogue of the usual spherical function $Y^m_l(\varphi,\theta)$ defined on the surface of the real two-sphere $S_2$. In its turn, the function $f(\dot{\varphi}^q,\dot{\theta}^q)= \mathfrak{M}^{\dot{m}}_{\dot{\sigma}}(\dot{\varphi}^q,\dot{\theta}^q,0)$ is defined on the surface of the dual quaternion sphere $\dot{S}^2_q$. An explicit form of the functions $\mathfrak{M}^m_\sigma(\varphi^q,\theta^q,0)$ ($\mathfrak{M}^{\dot{m}}_{\dot{\sigma}}(\dot{\varphi}^q,\dot{\theta}^q,0)$) and $\mathfrak{M}^m_{-\frac{3}{2}+{\bf i}\rho,l_0}(\varphi^q,\theta^q,0)$ ($\mathfrak{M}^{\dot{m}}_{-\frac{3}{2}-{\bf i}\rho,l_0}(\dot{\varphi}^q,\dot{\theta}^q,0)$) will be given in the section 4 and 5. \item $f(\epsilon,\tau,\varepsilon,\omega)=\mathfrak{M}^\sigma_{mn}(\epsilon,\tau,\varepsilon,\omega)= e^{{\bf i} m\epsilon}\mathfrak{P}^\sigma_{mn}(\cosh\tau)e^{{\bf i} n(\varepsilon+\omega)}$. This function is defined on the homogeneous space ${\cal M}_4=H^4_+\sim\SO_0(1,4)/\SO(4)$, that is, on the upper sheet of the hyperboloid $x^2_0-x^2_1-x^2_2-x^2_3-x^2_4=1$. An explicit form of the functions $\mathfrak{M}^\sigma_{mn}(\epsilon,\tau,\varepsilon,\omega)$ ($\mathfrak{M}_{\dot{m}\dot{n}}^{\dot{l}}(\epsilon,\tau,\varepsilon,\omega)$) and $\mathfrak{M}_{mn}^{-\frac{3}{2}+{\bf i}\rho}(\epsilon,\tau,\varepsilon,\omega)$ ($\mathfrak{M}_{\dot{m}\dot{n}}^{-\frac{3}{2}-{\bf i}\rho}(\epsilon,\tau,\varepsilon,\omega)$) will be given in the section 4 and 5. \item\begin{sloppypar}\noindent $f(\varphi,\theta,\psi)=\mathfrak{M}^\sigma_{mn}(\varphi,\theta,\psi)= e^{-{\bf i} m\varphi}P^\sigma_{mn}(\cos\theta)e^{-{\bf i} n\psi}$ (or $f(\varsigma,\phi,\chi)=\mathfrak{M}^\sigma_{mn}(\varsigma,\phi,\chi)= e^{-{\bf i} m\varsigma}P^\sigma_{mn}(\cos\phi) e^{-{\bf i} n\chi}$). This function is defined on the homogeneous space ${\cal M}_3\sim S^3=\SO(4)/\SO(3)$, that is, on the surface of the real 3-sphere $x^2_0+x^2_1+x^2_2+x^2_3=1$. In essence, we come here to representations of $\SO_0(1,4)$ restricted to the subgroup $\SO(4)$. \end{sloppypar} \item\begin{sloppypar}\noindent $f(\varphi,\theta)=\mathfrak{M}^m_l(\varphi,\theta,0)= e^{-{\bf i} m\varphi}P^m_{\sigma}(\cos\theta)\sim Y^m_\sigma(\varphi,\theta)$ (or $f(\varsigma,\phi)=\mathfrak{M}^m_\sigma(\varsigma,\phi,0)= e^{-{\bf i} m\varsigma}P^m_{\sigma}(\cos\phi)\sim Y^m_\sigma(\varsigma,\phi)$). This function is defined on the homogeneous space ${\cal M}_2=S^2\sim\SO(3)/\SO(2)$, that is, on the surface of the real 2-sphere $S^2$. We come here to the most degenerate representations of $\SO_0(1,4)$ restricted to the subgroup $\SO(3)$.\end{sloppypar} \end{itemize} \section{Spherical functions on the group $\SO(4)$} As is known, the group $\SO(4)$ is a maximal compact subgroup of $\SO_0(1,4)$. $\SO(4)$ corresponds to basis elements $\textbf{\emph{M}}=(M_1,M_2,M_3)$ and $\textbf{\emph{P}}=(P_1,P_2,P_3)$ of the algebra $\mathfrak{so}(1,4)$: \begin{equation}\label{Sit15} \left[ M_k,M_l\right]={\bf i}\varepsilon_{klm}M_m,\quad \left[ M_k,P_l\right]={\bf i}\varepsilon_{klm}P_m,\quad\left[ P_k,P_l\right]={\bf i}\varepsilon_{klm}M_m. \end{equation} Introducing linear combinations $\textbf{\emph{V}}=(\textbf{\emph{M}}+\textbf{\emph{P}})/2$ and $\textbf{\emph{V}}^\prime=(\textbf{\emph{M}}-\textbf{\emph{P}})/2$, we obtain \begin{equation}\label{Sit16} \left[ V_k,V_l\right]={\bf i}\varepsilon_{klm}V_m,\quad \left[ V^\prime_k,V^\prime_l\right]={\bf i}\varepsilon_{klm}V^\prime_m. \end{equation} The operators $\textbf{\emph{V}}$ and $\textbf{\emph{V}}^\prime$ form bases of the two independent algebras $\mathfrak{so}(3)$. It means that $\SO(4)$ is isomorphic to a direct product $\SO(3)\otimes\SO(3)$. A universal covering of $\SO(4)$ is $\spin(4)\simeq\SU(2)\otimes\SU(2)$. The one-parameter subgroups of $\spin(4)$ are \[ m_{12}(\psi)=\begin{pmatrix} e^{{\bf i}\frac{\psi}{2}} & 0 \\ 0 & e^{-{\bf i}\frac{\psi}{2}}\end{pmatrix},\quad m_{13}(\varphi)= \begin{pmatrix} e^{{\bf i}\frac{\varphi}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\varphi}{2}}\end{pmatrix},\quad m_{23}(\theta)= \begin{pmatrix} \cos\frac{\theta}{2} & {\bf i}\sin\frac{\theta}{2}\\ {\bf i}\sin\frac{\theta}{2} & \cos\frac{\theta}{2} \end{pmatrix}, \] \[ p_{14}(\chi)=\begin{pmatrix} e^{{\bf i}\frac{\chi}{2}} & 0 \\ 0 & e^{-{\bf i}\frac{\chi}{2}}\end{pmatrix},\quad p_{24}(\varsigma)= \begin{pmatrix} e^{{\bf i}\frac{\varsigma}{2}} & 0\\ 0 & e^{-{\bf i}\frac{\varsigma}{2}}\end{pmatrix},\quad p_{34}(\phi)= \begin{pmatrix} \cos\frac{\phi}{2} & {\bf i}\sin\frac{\phi}{2}\\ {\bf i}\sin\frac{\phi}{2} & \cos\frac{\phi}{2} \end{pmatrix}, \] where \[ {\renewcommand{\arraystretch}{1.05} \begin{array}{ccccc} 0 &\leq&\theta& \leq& \pi,\\ 0 &\leq&\varphi& <&2\pi,\\ -2\pi&\leq&\psi&<&2\pi, \end{array}\quad\quad \begin{array}{ccccc} 0 &\leq&\phi& \leq& \pi,\\ 0 &\leq&\varsigma& <&2\pi,\\ -2\pi&\leq&\chi&<&2\pi. \end{array}} \] A fundamental representation of the group $\spin(4)\simeq\SU(2)\otimes\SU(2)$ is defined by the matrix (\ref{Elem3}). On the group $\SO(4)$ there exist the following Laplace-Beltrami operators: \begin{eqnarray} \textbf{\emph{V}}^2&=&V^2_1+V^2_2+V^2_3=\frac{1}{4}(\textbf{\emph{M}}^2+\textbf{\emph{P}}^2+2\textbf{\emph{M}}\textbf{\emph{P}}),\label{BLO1}\\ {\textbf{\emph{V}}^\prime}^2&=&{V^\prime_1}^2+{V^\prime_2}^2+{V^\prime_3}^2= \frac{1}{4}(\textbf{\emph{M}}^2+\textbf{\emph{P}}^2-2\textbf{\emph{M}}\textbf{\emph{P}}).\label{BLO2} \end{eqnarray} At this point, we see that operators (\ref{BLO1}), (\ref{BLO2}) contain Casimir operators $\textbf{\emph{M}}^2+\textbf{\emph{P}}^2$, $\textbf{\emph{M}}\textbf{\emph{P}}$ of the group $\SO(4)$. Using expressions (\ref{DEA}), we obtain a Euler parametrization of the Laplace-Beltrami operators, \begin{eqnarray} \textbf{\emph{V}}^2&=&\frac{\partial^2}{\partial\theta^e{}^2}+ \cot\theta^e\frac{\partial}{\partial\theta^e}+\frac{1}{\sin^2\theta^e}\left[ \frac{\partial^2}{\partial\varphi^e{}^2}- 2\cos\theta^e\frac{\partial}{\partial\varphi^e} \frac{\partial}{\partial\psi^e}+ \frac{\partial^2}{\partial\psi^e{}^2}\right],\nonumber\\ {\textbf{\emph{V}}^\prime}^2&=&\frac{\partial^2}{\partial\dot{\theta}^e{}^2}+ \cot\dot{\theta}^e\frac{\partial}{\partial\dot{\theta}^e}+ \frac{1}{\sin^2\dot{\theta}^e}\left[ \frac{\partial^2}{\partial\dot{\varphi}^e{}^2}- 2\cos\dot{\theta}^e\frac{\partial}{\partial\dot{\varphi}^e} \frac{\partial}{\partial\dot{\psi}^e}+ \frac{\partial^2}{\partial\dot{\psi}^e{}^2}\right].\label{KO2} \end{eqnarray} Here, $\dot{\theta}^e=\theta-\phi$, $\dot{\varphi}^e=\varphi-\varsigma$, $\dot{\psi}^e=\psi-\chi$ are conjugate double angles. Matrix elements $t^{l}_{mn}(g)= \mathfrak{M}^{l}_{mn}(\varphi^e,\theta^e,\psi^e)$ of irreducible representations of the group $\SO(4)$ are eigenfunctions of the operators (\ref{KO2}), \begin{eqnarray} \left[\textbf{\emph{V}}^2+l(l+1)\right]\mathfrak{M}^{l}_{mn}(\varphi^e,\theta^e,\psi^e)&=&0,\nonumber\\ \left[{\textbf{\emph{V}}^\prime}^2+\dot{l}(\dot{l}+1)\right]\mathfrak{M}^{\dot{l}}_{\dot{m}\dot{n}} (\dot{\varphi}^e,\dot{\theta}^e,\dot{\psi}^e)&=&0,\label{EQ} \end{eqnarray} where \begin{eqnarray} \mathfrak{M}^{l}_{mn}(g)&=& e^{-{\bf i}(m\varphi^e+n\psi^e)}\mathfrak{Z}^{l}_{mn} (\cos\theta^e),\nonumber\\ \mathfrak{M}^{\dot{l}}_{\dot{m}\dot{n}}(g)&=&e^{{\bf i}(\dot{m}\dot{\varphi}^e+ \dot{n}\dot{\psi}^e)}\mathfrak{Z}^{\dot{l}}_{\dot{m}\dot{n}}(\cos\dot{\theta}^e). \label{HF3'} \end{eqnarray} Here, $\mathfrak{M}^l_{mn}(g)$ are general matrix elements of the representations of $\SO(4)$, and $\mathfrak{Z}^l_{mn}(\cos\theta^e)$ are {\it hyperspherical functions} of $\SO(4)$. Substituting the functions (\ref{HF3'}) into (\ref{EQ}) and taking into account the operators (\ref{KO2}) and substitutions $z=\cos\theta^e$, $\overset{\ast}{z}=\cos\dot{\theta}^e$, we come to the following differential equations: \begin{eqnarray} \left[(1-z^2)\frac{d^2}{dz^2}-2z\frac{d}{dz}- \frac{m^2+n^2-2mnz}{1-z^2}+l(l+1)\right] \mathfrak{Z}^{l}_{mn}(z)&=&0,\label{Leg1}\\ \left[(1-\overset{\ast}{z}{}^2)\frac{d^2}{d\overset{\ast}{z}{}^2}- 2\overset{\ast}{z}\frac{d}{d\overset{\ast}{z}}- \frac{\dot{m}^2+\dot{n}^2-2\dot{m}\dot{n}\overset{\ast}{z}} {1-\overset{\ast}{z}{}^2}+\dot{l}(\dot{l}+1)\right] \mathfrak{Z}^{\dot{l}}_{\dot{m}\dot{n}}(\overset{\ast}{z})&=&0.\label{Leg2} \end{eqnarray} The latter equations have three singular points $-1$, $+1$, $\infty$. The equations (\ref{Leg1}), (\ref{Leg2}) are Fuchsian equations. Indeed, denoting $w(z)=\mathfrak{Z}^l_{mn}(z)$, we write the equation (\ref{Leg1}) in the form \begin{equation}\label{Fux} \frac{d^2w(z)}{dz^2}-p(z)\frac{dw(z)}{dz}+q(z)w(z)=0, \end{equation} where \[ p(z)=\frac{2z}{(1-z)(1+z)},\quad q(z)=\frac{l(l+1)(1-z^2)-m^2-n^2+2mnz}{(1-z)^2(1+z)^2}. \] The solution of (\ref{Fux}) is \begin{multline} w(z)=C_1\left(\frac{1-z}{2}\right)^{\frac{|m-n|}{2}} \left(\frac{1+z}{2}\right)^{\frac{|m+n|}{2}}\times\\ \times\hypergeom{2}{1}{l+1+\frac{1}{2}(|m-n|+|m+n|),-l+\frac{1}{2}(|m-n|+|m+n|)} {|m-n|+1}{\frac{1-z}{2}}+\\ +C_2\left(\frac{1-z}{2}\right)^{-\frac{|m-n|}{2}} \left(\frac{1+z}{2}\right)^{\frac{|m+n|}{2}}\times\\ \times\hypergeom{2}{1}{-l+\frac{1}{2}(|m+n|-|m-n|),l+1+\frac{1}{2}(|m+n|-|m-n|)} {1-|m-n|}{\frac{1-z}{2}}. \label{Sol1} \end{multline} It is obvious that a solution of (\ref{Leg2}) has the analogous structure. Let us now consider spherical functions $f(g)$ and homogeneous spaces ${\cal M}=\SO(4)/H$ of the group $\SO(4)$ depending on the stabilizer $H$. First of all, when $H=0$ the homogeneous space ${\cal M}_6$ coincides with {\it a group manifold} $\mathfrak{K}_6$ of $\SO(4)$. Therefore, $\mathfrak{K}_6$ is a maximal homogeneous space of the group $\SO(4)$. Further, when $H=\Omega^e_\psi$, where $\Omega^e_\psi$ is a group of diagonal matrices \[ \begin{pmatrix} e^{\frac{{\bf i}\psi^e}{2}} & 0\\ 0 & e^{-\frac{{\bf i}\psi^e}{2}} \end{pmatrix}, \] the homogeneous space ${\cal M}_4$ coincides with a {\it two-dimensional double sphere} $S^e_2$, ${\cal M}_4=S^e_2\sim\spin(4)/\Omega^e_\psi$. The sphere $S^e_2$ can be constructed from the quantities $z_k=x_k+ey_k$, $\overset{\ast}{z}_k=x_k-ey_k$ $(k=1,2,3)$ as follows: \begin{equation}\label{DBS} S^e_2:\;z^2_1+z^2_2+z^2_3={\bf x}^2+{\bf y}^2+2e{\bf x}{\bf y}=r^2, \end{equation} where $e$ is {\it a double unit}, $e^2=1$. The conjugate (dual) sphere $\dot{S}^e_2$ is \begin{equation}\label{DDS} \dot{S}^e_2:\;\overset{\ast}{z}_1{}^2+\overset{\ast}{z}_2{}^2+ \overset{\ast}{z}_3{}^2={\bf x}^2+{\bf y}^2-2e{\bf x}{\bf y}=\overset{\ast}{r}{}^2. \end{equation} We obtain the following homogeneous space ${\cal M}_3$ when the stabilizer $H$ coincides with a subgroup $\SO(3)$. In this case we have a three-dimensional sphere ${\cal M}_3=S^3\sim\SO(4)/\SO(3)$ in the space $\R^4$. Finally, a minimal homogeneous space ${\cal M}_2$ of $\SO(4)$ is a two-dimensional real sphere $S_2\sim\SO(3)/\SO(2)$. All the homogeneous spaces of $\SO(4)$ are symmetric Riemannian spaces. Taking into account the list of homogeneous spaces of $\SO(4)$, we now introduce the following types of spherical functions $f(g)$ on the group $\SO(4)$. \begin{itemize} \item $f(g)=\mathfrak{M}^l_{mn}(g)$. This function is defined on the group manifold $\mathfrak{K}_6$ of $\SO(4)$. It is the most general spherical function on the group $\SO(4)$. In this case $f(g)$ depends on all the six parameters of $\SO(4)$ and for that reason it should be called as {\it a function on the group $\SO(4)$}. \item $f(\varphi^e,\theta^e)=\mathfrak{M}^m_l(\varphi^e,\theta^e,0)$. This function is defined on the homogeneous space ${\cal M}_4=S^e_2\sim\SO(4)/\Omega^e_\psi$, that is, on the surface of the two-dimensional double sphere $S^e_2$. The function $\mathfrak{M}^m_l(\varphi^e,\theta^e,0)$ is a four-dimensional analogue of the usual spherical function $Y^m_l(\varphi,\theta)$ defined on the surface of the real two-sphere $S^2$. In its turn, the function $f(\dot{\varphi}^e,\dot{\theta}^e)= \mathfrak{M}^{\dot{m}}_{\dot{l}}(\dot{\varphi}^e,\dot{\theta}^e,0)$ is defined on the surface of the dual sphere $\dot{S}^e_2$. \item $f(\varphi,\theta,\psi)=e^{-{\bf i} m\varphi}P^l_{mn}(\cos\theta)e^{-{\bf i} n\psi}$ (or $f(\varsigma,\phi,\chi)=e^{-{\bf i} m\varsigma}P^l_{mn}(\cos\phi) e^{-{\bf i} n\chi}$). This function is defined on the homogeneous space ${\cal M}_3\sim S^3=\SO(4)/\SO(3)$, that is, on the surface of the real 3-sphere $x^2_0+x^2_1+x^2_2+x^2_3=1$. \item $f(\varphi,\theta)=e^{-{\bf i} m\varphi}P^m_{l}(\cos\theta)\sim Y^m_l(\varphi,\theta)$ (or $f(\varsigma,\phi)=e^{-{\bf i} m\varsigma}P^m_{l}(\cos\phi)\sim Y^m_l(\varsigma,\phi)$). This function is defined on the homogeneous space ${\cal M}_2=S^2\sim\SO(3)/\SO(2)$, that is, on the surface of the real 2-sphere $S^2$. We come here to the most degenerate representations of $\SO(4)$ restricted to the subgroup $\SU(2)$. \end{itemize} First, let us consider spherical functions $f(g)=\mathfrak{M}^l_{mn}(g)=e^{-{\bf i} m\varphi^e}\mathfrak{Z}^l_{mn}(\cos\theta^e)e^{-{\bf i} n\psi^e}$ on the group manifold $\mathfrak{K}_6$ of $\SO(4)$. The Laplace-Beltrami operators $\bigtriangleup_L(\mathfrak{K}_6)$ and $\overline{\bigtriangleup}_L(\mathfrak{K}_6)$ are coincide with (\ref{BLO1}) and (\ref{BLO2}). Spherical functions of the first type $f(g)=\mathfrak{M}^l_{mn}(g)$ ($f(\dot{g})=\mathfrak{M}^{\dot{l}}_{\dot{m}\dot{n}}(\dot{g})$) are eigenfunctions of the operator $\bigtriangleup_L(\mathfrak{K}_6)$ ($\overline{\bigtriangleup}_L(\mathfrak{K}_6)$). With the aim to find an explicit form of hyperspherical functions on $\mathfrak{Z}^l_{mn}(\cos\theta^e)$, we will use an addition theorem for generalized spherical functions $P^l_{mn}(\cos\theta)$ of the group $\SU(2)$ \cite{Vil65}: \begin{equation}\label{Add1} e^{-{\bf i}(m\varphi+n\psi)}P^l_{mn}(\cos\theta)=\sum_{k=-l}^le^{-{\bf i} k\varphi_2} P^l_{mk}(\cos\theta_1)P^l_{kn}(\cos\theta_2), \end{equation} where the angles $\varphi$, $\psi$, $\theta$, $\theta_1$, $\varphi_2$, $\theta_2$ are related by the formulae \begin{eqnarray} \cos\theta&=&\cos\theta_1\cos\theta_2-\sin\theta_1\sin\theta_2\cos\varphi_2,\label{Add2}\\ e^{{\bf i}\varphi}&=&\frac{\sin\theta_1\cos\theta_2+\cos\theta_1\sin\theta_2\cos\varphi_2+ {\bf i}\sin\theta_2\sin\varphi_2}{\sin\theta},\label{Add3}\\ e^{\frac{{\bf i}(\varphi+\psi)}{2}}&=&\frac{\cos\frac{\theta_1}{2}\cos\frac{\theta_2}{2} e^{{\bf i}\frac{\varphi_2}{2}}-\sin\frac{\theta_1}{2}\sin\frac{\theta_2}{2} e^{-{\bf i}\frac{\varphi_2}{2}}}{\cos\frac{\theta}{2}}.\label{Add4} \end{eqnarray} Let $\cos(\theta+\phi)=\cos\theta^e$ and $\varphi_2=0$, then the formulae (\ref{Add2})--(\ref{Add4}) take the form \begin{eqnarray} \cos\theta^e&=&\cos\theta\cos\phi-\sin\theta\sin\phi,\nonumber\\ e^{{\bf i}\varphi}&=&\frac{\sin\theta\cos\phi+\cos\theta\sin\phi}{\sin\theta^e}=1,\nonumber\\ e^{\frac{{\bf i}(\varphi+\psi)}{2}}&=&\frac{\cos\frac{\theta}{2}\cos\frac{\phi}{2}- \sin\frac{\theta}{2}\sin\frac{\phi}{2}}{\cos\frac{\theta^e}{2}}=1. \nonumber \end{eqnarray} Hence it follows that $\varphi=\psi=0$ and the formula (\ref{Add1}) can be written as \begin{equation}\label{HFSO4} \mathfrak{Z}^l_{mn}(\cos\theta^e)=\sum^l_{k=-l}P^l_{mk}(\cos\theta)P^l_{kn}(\cos\phi). \end{equation} $\mathfrak{Z}^l_{mn}(\cos\theta^e)$ are {\it hyperspherical functions of the group $\SO(4)$}\footnote{The functions $\mathfrak{Z}^l_{mn}(\cos\theta^e)$ and $\mathfrak{Z}^{\dot{l}}_{\dot{m}\dot{n}}(\cos\dot{\theta}^e)$ form a representation of the type $(l,0)\oplus(0,\dot{l})$, that is, when $l=\dot{l}$. In the case of tensor representations, when $l\ne\dot{l}$, we arrive at the functions $\mathfrak{Z}^{l\dot{l}}_{mn;\dot{m}\dot{n}}(\cos\theta^e,\cos\dot{\theta}^e)= \mathfrak{Z}^l_{mn}(\cos\theta^e)\mathfrak{Z}^{\dot{l}}_{\dot{m}\dot{n}}(\cos\dot{\theta}^e)$ ({\it generalized hyperspherical functions of} $\SO(4)$), which can be expressed via the product of the two generalized hypergeometric functions $\hypergeom{3}{2}{\alpha,\beta,\gamma}{\delta,\epsilon}{x}$. In the case of Lorentz group, general solutions of relativistic wave equations for arbitrary spin chains (tensor representations) are defined via an expansion in generalized hyperspherical functions $\mathfrak{Z}^{l\dot{l}}_{mn;\dot{m}\dot{n}}(\cos\theta^c,\cos\dot{\theta}^c)$ of $\SO_0(1,3)$, where $\theta^c$, $\dot{\theta}^c$ are complex Euler angles of $\spin_+(1,3)\simeq\SL(2,\hbox{\bb C})$ \cite{Var05}.}. Using an explicit expression for the function $P^l_{mn}$ \cite{Vil65,Var06}, we obtain \begin{multline} \mathfrak{Z}^l_{mn}(\cos\theta^e)= \sum^l_{k=-l}{\bf i}^{m+n-2k} \sqrt{\Gamma(l-m+1)\Gamma(l+m+1)\Gamma(l-k+1)\Gamma(l+k+1)}\times\\ \cos^{2l}\frac{\theta}{2}\tan^{m-k}\frac{\theta}{2}\times\\[0.2cm] \sum^{\min(l-m,l+k)}_{j=\max(0,k-m)} \frac{{\bf i}^{2j}\tan^{2j}\dfrac{\theta}{2}} {\Gamma(j+1)\Gamma(l-m-j+1)\Gamma(l+k-j+1)\Gamma(m-k+j+1)}\times\\[0.2cm] \sqrt{\Gamma(l-n+1)\Gamma(l+n+1)\Gamma(l-k+1)\Gamma(l+k+1)} \cos^{2l}\frac{\phi}{2}\tan^{n-k}\frac{\phi}{2}\times\\[0.2cm] \sum^{\min(l-n,l+k)}_{s=\max(0,k-n)} \frac{{\bf i}^{2s}\tan^{2s}\dfrac{\phi}{2}} {\Gamma(s+1)\Gamma(l-n-s+1)\Gamma(l+k-s+1)\Gamma(n-k+s+1)}.\label{PPtan} \end{multline} On the other hand, the function $\mathfrak{Z}^l_{mn}(\cos\theta^e)$ can be expressed via the hypergeometric function. Using hypergeometric-type formulae for $P^l_{mn}$ \cite{Vil65,Var06}, we have at $m\geq n$ \begin{multline} \mathfrak{Z}^l_{mn}(\cos\theta^e)={\bf i}^{m-n}\sqrt{\frac{\Gamma(l+m+1)\Gamma(l-n+1)}{\Gamma(l-m+1)\Gamma(l+n+1)}} \cos^{2l}\frac{\theta}{2}\cos^{2l}\frac{\phi}{2}\times\\ \sum^l_{k=-l}\tan^{m-k}\frac{\theta}{2}\tan^{k-n}\frac{\phi}{2}\times\\ \times\hypergeom{2}{1}{m-l,-k-l}{m-k+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-l,-n-l}{k-n+1}{-\tan^2\frac{\phi}{2}},\quad m\geq k,\;k\geq n; \label{PBFtan1} \end{multline} \begin{multline} \mathfrak{Z}^l_{mn}(\cos\theta^e)=\sqrt{\frac{\Gamma(l+m+1)\Gamma(l-n+1)}{\Gamma(l-m+1)\Gamma(l+n+1)}} \cos^{2l}\frac{\theta}{2}\cos^{2l}\frac{\phi}{2}\times\\ \sum^l_{k=-l}{\bf i}^{m+n-2k}\tan^{m-k}\frac{\theta}{2}\tan^{n-k}\frac{\phi}{2}\times\\ \times\hypergeom{2}{1}{m-l,-k-l}{m-k+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{n-l,-k-l}{n-k+1}{-\tan^2\frac{\phi}{2}},\quad m\geq k,\;n\geq k; \label{PBFtan2} \end{multline} and at $n\geq m$ \begin{multline} \mathfrak{Z}^l_{mn}(\cos\theta^e)={\bf i}^{n-m}\sqrt{\frac{\Gamma(l-m+1)\Gamma(l+n+1)}{\Gamma(l+m+1)\Gamma(l-n+1)}} \cos^{2l}\frac{\theta}{2}\cos^{2l}\frac{\phi}{2}\times\\ \sum^l_{k=-l}\tan^{k-m}\frac{\theta}{2}\tan^{n-k}\frac{\phi}{2}\times\\ \times\hypergeom{2}{1}{k-l,-m-l}{k-m+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{n-l,-k-l}{n-k+1}{-\tan^2\frac{\phi}{2}},\quad k\geq m,\;n\geq k; \label{PBFtan3} \end{multline} \begin{multline} \mathfrak{Z}^l_{mn}(\cos\theta^e)=\sqrt{\frac{\Gamma(l-m+1)\Gamma(l+n+1)}{\Gamma(l+m+1)\Gamma(l-n+1)}} \cos^{2l}\frac{\theta}{2}\cos^{2l}\frac{\phi}{2}\times\\ \sum^l_{k=-l}{\bf i}^{2k-m-n}\tan^{k-m}\frac{\theta}{2}\tan^{k-n}\frac{\phi}{2}\times\\ \times\hypergeom{2}{1}{k-l,-m-l}{k-m+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-l,-n-l}{k-n+1}{-\tan^2\frac{\phi}{2}},\quad k\geq m,\;k\geq n. \label{PBFtan4} \end{multline} By way of example let us calculate matrix elements $\mathfrak{M}^l_{mn}(g)=e^{-{\bf i} m\varphi^e}\mathfrak{Z}^l_{mn}(\cos\theta^e)e^{-{\bf i} n\psi^e}$ at $l=0,\,1/2,\,1$, where $\mathfrak{Z}^l_{mn}(\cos\theta^e)$ is defined via (\ref{PPtan}) or (\ref{PBFtan1})--(\ref{PBFtan4}). The representation matrices at $l=0,\,\frac{1}{2},\,1$ have the following form: \begin{gather} T_0(\varphi^e,\theta^e,\psi^e)=1,\label{T0}\\[0.3cm] T_{\frac{1}{2}}(\varphi^e,\theta^e,\psi^e)=\ar\begin{pmatrix} \mathfrak{M}^{\frac{1}{2}}_{-\frac{1}{2}-\frac{1}{2}} & \mathfrak{M}^{\frac{1}{2}}_{\frac{1}{2}-\frac{1}{2}}\\ \mathfrak{M}^{\frac{1}{2}}_{-\frac{1}{2}\frac{1}{2}} & \mathfrak{M}^{\frac{1}{2}}_{\frac{1}{2}\frac{1}{2}} \end{pmatrix}=\ar\begin{pmatrix} e^{\frac{{\bf i}}{2}\varphi^e}\mathfrak{Z}^{\frac{1}{2}}_{-\frac{1}{2}-\frac{1}{2}}e^{\frac{{\bf i}}{2}\psi^e} & e^{\frac{{\bf i}}{2}\varphi^e}\mathfrak{Z}^{\frac{1}{2}}_{-\frac{1}{2}\frac{1}{2}}e^{-\frac{{\bf i}}{2}\psi^e}\\ e^{-\frac{{\bf i}}{2}\varphi^e}\mathfrak{Z}^{\frac{1}{2}}_{\frac{1}{2}-\frac{1}{2}}e^{\frac{{\bf i}}{2}\psi^e} & e^{-\frac{{\bf i}}{2}\varphi^e}\mathfrak{Z}^{\frac{1}{2}}_{\frac{1}{2}\frac{1}{2}}e^{-\frac{{\bf i}}{2}\psi^e} \end{pmatrix}=\nonumber\\[0.3cm] =\ar\begin{pmatrix} e^{\frac{{\bf i}}{2}\varphi^e}\cos\frac{\theta^e}{2}e^{\frac{{\bf i}}{2}\psi^e} & {\bf i} e^{\frac{{\bf i}}{2}\varphi^e}\sin\frac{\theta^e}{2}e^{-\frac{{\bf i}}{2}\psi^e}\\ {\bf i} e^{-\frac{{\bf i}}{2}\varphi^e}\sin\frac{\theta^e}{2}e^{\frac{{\bf i}}{2}\psi^e} & e^{-\frac{{\bf i}}{2}\varphi^e}\cos\frac{\theta^e}{2}e^{-\frac{{\bf i}}{2}\psi^e} \end{pmatrix}=\nonumber\\[0.3cm] {\renewcommand{\arraystretch}{1.3} =\begin{pmatrix} \left[\cos\frac{\theta}{2}\cos\frac{\phi}{2}- \sin\frac{\theta}{2}\sin\frac{\phi}{2}\right] e^{\frac{{\bf i}(\varphi+\varsigma+\psi+\chi))}{2}} & {\bf i}\left[\cos\frac{\theta}{2}\sin\frac{\phi}{2}+ \sin\frac{\theta}{2}\cos\frac{\phi}{2}\right] e^{\frac{{\bf i}(\varphi+\varsigma-\psi-\chi)}{2}} \\ {\bf i}\left[\cos\frac{\theta}{2}\sin\frac{\phi}{2}+ \sin\frac{\theta}{2}\cos\frac{\phi}{2}\right] e^{\frac{{\bf i}(-\varphi-\varsigma+\psi+\chi)}{2}} & \left[\cos\frac{\theta}{2}\cos\frac{\phi}{2}- \sin\frac{\theta}{2}\sin\frac{\phi}{2}\right] e^{\frac{-{\bf i}(\varphi+\varsigma+\psi+\chi)}{2}} \end{pmatrix}},\label{T1} \end{gather} \begin{gather} T_1(\varphi^e,\theta^e,\psi^e)=\ar\begin{pmatrix} \mathfrak{M}^1_{-1-1} & \mathfrak{M}^1_{-10} & \mathfrak{M}^1_{-11}\\ \mathfrak{M}^1_{0-1} & \mathfrak{M}^1_{00} & \mathfrak{M}^1_{01}\\ \mathfrak{M}^1_{1-1} & \mathfrak{M}^1_{10} & \mathfrak{M}^1_{11} \end{pmatrix}=\ar \begin{pmatrix} e^{{\bf i}\varphi^e}\mathfrak{Z}^1_{-1-1}e^{{\bf i}\psi^e} & e^{{\bf i}\varphi^e}\mathfrak{Z}^1_{-10} & e^{{\bf i}\varphi^e} \mathfrak{Z}^1_{-11}e^{-{\bf i}\psi^e}\\ \mathfrak{Z}^1_{0-1}e^{{\bf i}\psi^e} & \mathfrak{Z}^1_{00} & \mathfrak{Z}^1_{01}e^{-{\bf i}\psi^e}\\ e^{-{\bf i}\varphi^e}\mathfrak{Z}^1_{1-1}e^{{\bf i}\psi^e} & e^{-{\bf i}\psi^e}\mathfrak{Z}^1_{10} & e^{-{\bf i}\varphi^e}\mathfrak{Z}^1_{11}e^{-{\bf i}\psi^e} \end{pmatrix}=\nonumber\\[0.3cm] =\ar\begin{pmatrix} e^{{\bf i}\varphi^e}\cos^2\frac{\theta^e}{2}e^{{\bf i}\psi^e} & \frac{{\bf i}}{\sqrt{2}}e^{{\bf i}\varphi^e}\sin\theta^e & -e^{{\bf i}\varphi^e} \sin^2\frac{\theta^e}{2}e^{-{\bf i}\psi^e}\\ \frac{{\bf i}}{\sqrt{2}}\sin\theta^ee^{{\bf i}\psi^e} & \cos\theta^e & \frac{{\bf i}}{\sqrt{2}}\sin\theta^ee^{-{\bf i}\psi^e}\\ -e^{-{\bf i}\varphi^e}\sin^2\frac{\theta^e}{2}e^{{\bf i}\psi^e} & \frac{{\bf i}}{\sqrt{2}}e^{-{\bf i}\varphi^e}\sin\theta^e & e^{-{\bf i}\varphi^e}\cos^2\frac{\theta^e}{2}e^{-{\bf i}\psi^e} \end{pmatrix}=\nonumber \end{gather} \begin{multline} {\renewcommand{\arraystretch}{1.1}=\left(\begin{array}{cc}\scriptstyle \left[\cos^2\frac{\theta}{2}\cos^2\frac{\phi}{2}-\frac{\sin\theta\sin\phi}{2}+ \sin^2\frac{\theta}{2}\sin^2\frac{\phi}{2}\right] e^{{\bf i}(\varphi+\varsigma+\psi+\chi)} &\scriptstyle \left[\frac{{\bf i}}{\sqrt{2}}(\cos\theta\sin\phi+\sin\theta\cos\phi)\right] e^{{\bf i}(\varphi+\varsigma)} \\ \scriptstyle\left[\frac{{\bf i}}{\sqrt{2}}(\cos\theta\sin\phi+\sin\theta\cos\phi)\right] e^{{\bf i}(\psi+\chi)} &\scriptstyle \cos\theta\cos\phi-\sin\theta\sin\phi \\ \scriptstyle-\left[\cos^2\frac{\theta}{2}\sin^2\frac{\phi}{2}+ \frac{\sin\theta\sin\phi}{2}+ \sin^2\frac{\theta}{2}\cos^2\frac{\phi}{2}\right] e^{{\bf i}(-\varphi-\varsigma+\psi+\chi)} &\scriptstyle \left[\frac{{\bf i}}{\sqrt{2}}(\cos\theta\sin\phi+\sin\theta\cos\phi)\right] e^{-{\bf i}(\varphi+\varsigma)} \end{array}\right.}\\ {\renewcommand{\arraystretch}{1.1}\left.\begin{array}{c}\scriptstyle -\left[\cos^2\frac{\theta}{2}\sin^2\frac{\phi}{2}+\frac{\sin\theta\sin\phi}{2}+ \sin^2\frac{\theta}{2}\cos^2\frac{\phi}{2}\right] e^{{\bf i}(\varphi+\varsigma-\psi-\chi)} \\ \scriptstyle\left[\frac{{\bf i}}{\sqrt{2}}(\cos\theta\sin\phi+\sin\theta\cos\phi)\right] e^{-{\bf i}(\psi+\chi)} \\ \scriptstyle\left[\cos^2\frac{\theta}{2}\cos^2\frac{\phi}{2}-\frac{\sin\theta\sin\phi}{2}+ \sin^2\frac{\theta}{2}\sin^2\frac{\phi}{2}\right] e^{-{\bf i}(\varphi+\varsigma+\psi+\chi)} \end{array}\right).}\label{T2} \end{multline} Spherical functions of the second type $f(\varphi^e,\theta^e)=\mathfrak{M}^m_l(\varphi^e,\theta^e,0)= e^{-{\bf i} m\varphi^e}\mathfrak{Z}^m_l(\cos\theta^e)$, where \[ \mathfrak{Z}^m_l(\cos\theta^e)=\sum^l_{k=-l}P^l_{mk}(\cos\theta)P^k_l(\cos\phi) \] is {\it an associated hyperspherical function}, are defined on the surface of the double 2-sphere (\ref{DBS}). The function $\mathfrak{Z}^m_l(\cos\theta^e)$ is an eigenfunction of the Laplace-Beltrami operator $\bigtriangleup_L(S^e_2)$ defined on the double 2-sphere, \[ \bigtriangleup_L(S^e_2)=\frac{\partial^2}{\partial\theta^e{}^2}+ \cot\theta^e\frac{\partial}{\partial\theta^e}+\frac{1}{\sin^2\theta^e} \frac{\partial^2}{\partial\varphi^e{}^2}. \] Hypergeometric-type formulae for $\mathfrak{Z}^m_l(\cos\theta^e)$ are \begin{multline} \mathfrak{Z}_l^{m}(\cos\theta^e)={\bf i}^{m}\sqrt{\frac{\Gamma(l+m+1)}{\Gamma(l-m+1)}} \cos^{2l}\frac{\theta}{2}\cos^{2l}\frac{\phi}{2} \sum^l_{k=-l}\tan^{m-k}\frac{\theta}{2}\tan^{k}\frac{\phi}{2}\times\\ \times\hypergeom{2}{1}{m-l,-k-l}{m-k+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-l,-l}{k+1}{-\tan^2\frac{\phi}{2}},\quad m\geq k; \nonumber \end{multline} \begin{multline} \mathfrak{Z}_l^{m}(\cos\theta^e)=\sqrt{\frac{\Gamma(l-m+1)}{\Gamma(l+m+1)}} \cos^{2l}\frac{\theta}{2}\cos^{2l}\frac{\phi}{2} \sum^l_{k=-l}{\bf i}^{2k-m}\tan^{k-m}\frac{\theta}{2}\tan^{k}\frac{\phi}{2}\times\\ \times\hypergeom{2}{1}{k-l,-m-l}{k-m+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-l,-l}{k+1}{-\tan^2\frac{\phi}{2}},\quad k\geq m. \nonumber \end{multline} We obtain an important particular case from the previous formulae at $m=n=0$. The function $\mathfrak{Z}_l(\cos\theta^e)\equiv \mathfrak{Z}^l_{00}(\cos\theta^e)$ is called {\it a zonal hyperspherical function}. The hypergeometric-type formula for $\mathfrak{Z}_l(\cos\theta^e)$ is \begin{multline} \mathfrak{Z}_l(\cos\theta^e)= \cos^{2l}\frac{\theta}{2}\cos^{2l}\frac{\phi}{2} \sum^l_{k=-l}{\bf i}^{2k}\tan^{k}\frac{\theta}{2}\tan^{k}\frac{\phi}{2}\times\\ \times\hypergeom{2}{1}{k-l,-l}{k+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-l,-l}{k+1}{-\tan^2\frac{\phi}{2}}. \nonumber \end{multline} In its turn, the function $f(\dot{\varphi}^e,\dot{\theta}^e)=e^{{\bf i}\dot{m}\dot{\varphi}^e} \mathfrak{Z}^{\dot{m}}_{\dot{l}}(\cos\dot{\theta}^e)$ (or $f(\dot{\theta}^e)= \mathfrak{Z}_{\dot{l}}(\cos\dot{\theta}^e)$) are defined on the surface of dual sphere (\ref{DDS}). Explicit expressions and hypergeometric-type formulae for $f(\dot{\varphi}^e,\dot{\theta}^e)$ are analogous to the previous expressions for $f(\varphi^e,\theta^e)$. \begin{sloppypar} Spherical functions of the third type $f(\varphi,\theta,\psi)=e^{-{\bf i} m\varphi}P^l_{mn}(\cos\theta)e^{-{\bf i} n\psi}$ (or $f(\varsigma,\phi,\chi)=e^{-{\bf i} m\varsigma}P^l_{mn}(\cos\phi) e^{-{\bf i} n\chi}$) are defined on the surface of the real 3-sphere $S^3=\SO(4)/\SO(3)$. These functions are general matrix elements of representations of the group $\SO(3)$. Therefore, we have here representations of $\SO(4)$ restricted to the subgroup $\SO(3)$. Namely,\end{sloppypar} \begin{equation}\label{Rest} \hat{T}^l\downarrow^{\SO(4)}_{\SO(3)}=\sum^l_{m=0}\oplus Q^m, \end{equation} where spherical functions $f(\varphi,\theta,\psi)$ of the representations $Q^m$ of $\SO(3)$ form an orthogonal basis in the Hilbert space $L^2(S^3)$. Various expressions and hypergeometric-type formulae for $f(\varphi,\theta,\psi)$ are given in \cite{Vil65,Var06}. Finally, spherical functions of the fourth type $f(\varphi,\theta)=e^{-{\bf i} m\varphi}P^m_{l}(\cos\theta)\sim Y^m_l(\varphi,\theta)$ (or $f(\varsigma,\phi)=e^{-{\bf i} m\varsigma}P^m_{l}(\cos\phi)\sim Y^m_l(\varsigma,\phi)$) are defined on the surface of the real 2-sphere. We have here representations $\hat{T}^l\downarrow^{\SO(4)}_{\SO(3)}$ of the type (\ref{Rest}), where associated spherical functions $f(\varphi,\theta)\sim Y^m_l(\varphi,\theta)$ of $Q^m$ form an orthogonal basis in $L^2(S^3)$. These representations are the most degenerate for the group $\SO(4)$. \section{Spherical functions of finite-dimensional representations of $\SO_0(1,4)$} Let us come back to the de Sitter group $\SO_0(1,4)$. It has been shown in the section 1 that spherical functions of the first type $f(\mathfrak{q})=\mathfrak{M}^\sigma_{mn}(\mathfrak{q})=e^{-{\bf i} m\varphi^q}\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)e^{-{\bf i} n\psi^q}$ are defined on the group manifold $\mathfrak{S}_{10}$ of $\SO_0(1,4)$. With the aim to find an explicit form of hyperspherical function $\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)$ of the group $\SO_0(1,4)$, we will use the addition theorem defined by the formulae (\ref{Add1})--(\ref{Add4}). Let $\cos(\theta+\phi-{\bf i}\tau)=\cos(\theta^e-{\bf i}\tau)=\cos\theta^q$ and $\varphi_2=0$, then the formulae (\ref{Add2})--(\ref{Add4}) take the form \begin{eqnarray} \cos\theta^q&=&\cos\theta^e\cosh\tau+{\bf i}\sin\theta^e\sinh\tau,\nonumber\\ e^{{\bf i}\varphi}&=&\frac{\sin\theta^e\cosh\tau-{\bf i}\cos\theta^e\sinh\tau}{\sin\theta^q}=1,\nonumber\\ e^{\frac{{\bf i}(\varphi+\psi)}{2}}&=&\frac{\cos\frac{\theta^e}{2}\cosh\frac{\tau}{2}+ {\bf i}\sin\frac{\theta^e}{2}\sinh\frac{\tau}{2}}{\cos\frac{\theta^q}{2}}=1. \nonumber \end{eqnarray} Hence it follows that $\varphi=\psi=0$ and formula (\ref{Add1}) can be written as \begin{equation}\label{HFSO14} \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=\sum^\sigma_{k=-\sigma}\mathfrak{Z}^\sigma_{mk}(\cos\theta^e) \mathfrak{P}^\sigma_{kn}(\cosh\tau), \end{equation} where $\mathfrak{Z}^\sigma_{mn}(\cos\theta^e)$ is the hyperspherical function of the compact subgroup $\SO(4)$ (see the formula (\ref{HFSO4})): \[ \mathfrak{Z}^\sigma_{mk}(\cos\theta^e)=\sum^\sigma_{t=-\sigma} P^\sigma_{mt}(\cos\theta)P^\sigma_{tk}(\cos\phi). \] It is easy to verify that if we take $\cos(\theta+\phi-{\bf i}\tau)=\cos(\phi+\theta^c)=\cos\theta^q$ and $\varphi_2=0$ in the formulae (\ref{Add2})--(\ref{Add4}), then we arrive at the function \[ \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=\sum\limits^\sigma_{k=-\sigma} P^\sigma_{mk}(\cos\phi)\mathfrak{Z}^\sigma_{kn}(\cos\theta^c), \] where \[ \mathfrak{Z}^\sigma_{kn}(\cos\theta^c)=\sum\limits^\sigma_{t=-\sigma} P^\sigma_{kt}(\cos\theta)\mathfrak{P}^\sigma_{tn}(\cosh\tau) \] is the hyperspherical function of the subgroup $\SO_0(1,3)$. In such a way, the hyperspherical function $\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)$ can be factorized with respect to the subgroups $\SO(4)$ and $\SO_0(1,3)$. Further, taking into account the expression for $\mathfrak{Z}^\sigma_{mk}(\cos\theta^e)$, we can rewrite (\ref{HFSO14}) in the following form: \begin{equation}\label{HFSO14b} \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=\sum^\sigma_{k=-\sigma} \sum^\sigma_{t=-\sigma}P^\sigma_{mt}(\cos\theta) P^\sigma_{tk}(\cos\phi)\mathfrak{P}^\sigma_{kn}(\cosh\tau). \end{equation} Analogously, for the factorization of $\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)$ with respect to the Lorentz subgroup $\SO_0(1,3)$ we have \[ \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=\sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma} P^\sigma_{mk}(\cos\phi)P^\sigma_{kt}(\cos\theta)\mathfrak{P}^\sigma_{tn}(\cosh\tau). \] We consider here only the factorization of $\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)$ with respect to the maximal compact subgroup $\SO(4)$. Thus, the formulae (\ref{HFSO14}) and (\ref{HFSO14b}) define {\it a hyperspherical function of the de Sitter group} $\SO_0(1,4)$ with respect to $\SO(4)$. Further, using (\ref{PPtan}), we obtain an explicit expression for $\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)$, \begin{multline} \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)= \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{m+k-2t} \sqrt{\Gamma(\sigma-m+1)\Gamma(\sigma+m+1)\Gamma(\sigma-t+1)\Gamma(\sigma+t+1)}\times\\ \cos^{2\sigma}\frac{\theta}{2}\tan^{m-t}\frac{\theta}{2}\times\\[0.2cm] \sum^{\min(\sigma-m,l+t)}_{j=\max(0,t-m)} \frac{{\bf i}^{2j}\tan^{2j}\dfrac{\theta}{2}} {\Gamma(j+1)\Gamma(\sigma-m-j+1)\Gamma(\sigma+t-j+1)\Gamma(m-t+j+1)}\times\\[0.2cm] \sqrt{\Gamma(\sigma-k+1)\Gamma(\sigma+k+1)\Gamma(\sigma-t+1)\Gamma(\sigma+t+1)} \cos^{2\sigma}\frac{\phi}{2}\tan^{k-t}\frac{\phi}{2}\times\\[0.2cm] \sum^{\min(\sigma-k,\sigma+t)}_{s=\max(0,t-k)} \frac{{\bf i}^{2s}\tan^{2s}\dfrac{\phi}{2}} {\Gamma(s+1)\Gamma(\sigma-k-s+1)\Gamma(\sigma+t-s+1)\Gamma(k-t+s+1)}\times\\[0.2cm] \sqrt{\Gamma(\sigma-n+1)\Gamma(\sigma+n+1)\Gamma(\sigma-k+1)\Gamma(\sigma+k+1)} \cosh^{2\sigma}\frac{\tau}{2}\tanh^{n-k}\frac{\tau}{2}\times\\[0.2cm] \sum^{\min(\sigma-n,\sigma+k)}_{p=\max(0,k-n)} \frac{\tanh^{2p}\dfrac{\tau}{2}} {\Gamma(p+1)\Gamma(\sigma-n-p+1)\Gamma(\sigma+k-p+1)\Gamma(n-k+p+1)}.\label{PPBtan} \end{multline} It is obvious that the functions $\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)$ can also be reduced to hypergeometric functions. Namely, these functions are expressed via the following multiple hypergeometric series\footnote{The hyperspherical functions $\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)$ of $\SO_0(1,4)$, $\mathfrak{Z}^l_{mn}(\cos\theta^e)$ of $\SO(4)$ and $\mathfrak{Z}^l_{mn}(\cos\theta^c)$ of $\SO_0(1,3)$ can be written in the form of hypergeometric functions of many variables \cite{AK26,Ext76}. So, the functions $\mathfrak{Z}^l_{mn}(\cos\theta^e)$ and $\mathfrak{Z}^l_{mn}(\cos\theta^c)$ can be expressed via the Appell functions, $\mathfrak{Z}^l_{mn}(\cos\theta^e)\sim \Appell{4}{a_1,a_2}{a_3,a_4}{x_1;x_2}$ and $\mathfrak{Z}^l_{mn}(\cos\theta^c)\sim\Appell{4}{a_1,a_2}{a_3,a_4}{x_1;y_1}$, where $x_1=\tan^2\theta/2$, $x_2=\tan^2\phi/2$, $y_1=\tanh^2\tau/2$. In its turn, the function $\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)$ is reduced to the Lauricella function, $\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)\sim \Lauricella{3}{a_1,a_2,a_3}{a_4,a_5}{x_1;x_2;y_1}$. From the relations $\spin(4)\inC\kern -0.2em \ell^+_{4,0}\simeqC\kern -0.2em \ell_{0,3}$, where $C\kern -0.2em \ell_{0,3}$ is the algebra of double biquaternions with a double quaternionic division ring $\K\simeq\BH\oplus\BH$; $\spin_+(1,3)\inC\kern -0.2em \ell^+_{1,3}\simeqC\kern -0.2em \ell_{3,0}$, where $C\kern -0.2em \ell_{3,0}$ is the algebra of complex biquaternions with a complex division ring $\K\simeq\hbox{\bb C}$; $\spin_+(1,4)\inC\kern -0.2em \ell^+_{1,4}\simeqC\kern -0.2em \ell_{1,3}$, where $C\kern -0.2em \ell_{1,3}$ is the space-time algebra with a quaternionic division ring $\K\simeq\BH$, we see that there is a close relationship between hypercomplex angles of the group $\spin_+(p,q)$, division rings of $C\kern -0.2em \ell^+_{p,q}$ from the one hand and hypergeometric functions of many variables from the other hand. A detailed consideration of this relationship comes beyond the framework of this paper and will be given in a separate work.} : \begin{multline} \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=\sqrt{\frac{\Gamma(\sigma+m+1)\Gamma(\sigma-n+1)} {\Gamma(\sigma-m+1)\Gamma(\sigma+n+1)}} \cos^{2\sigma}\frac{\theta}{2}\cos^{2\sigma}\frac{\phi}{2}\cosh^{2\sigma}\frac{\tau}{2}\times\\ \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{m-k}\tan^{m-t}\frac{\theta}{2}\tan^{t-k}\frac{\phi}{2} \tanh^{k-n}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{m-\sigma,-t-\sigma}{m-t+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{t-\sigma,-k-\sigma}{t-k+1}{-\tan^2\frac{\phi}{2}} \hypergeom{2}{1}{k-\sigma,-n-\sigma}{k-n+1}{\tanh^2\frac{\tau}{2}},\\ \quad m\geq t,\;t\geq k,\;k\geq n; \label{PPBFtan1} \end{multline} \begin{multline} \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=\sqrt{\frac{\Gamma(\sigma+m+1)\Gamma(\sigma-n+1)} {\Gamma(\sigma-m+1)\Gamma(\sigma+n+1)}} \cos^{2\sigma}\frac{\theta}{2}\cos^{2\sigma}\frac{\phi}{2}\cosh^{2\sigma}\frac{\tau}{2}\times\\ \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{m+k-2t}\tan^{m-t}\frac{\theta}{2}\tan^{k-t}\frac{\phi}{2} \tanh^{k-n}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{m-\sigma,-t-\sigma}{m-t+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-\sigma,-t-\sigma}{k-t+1}{-\tan^2\frac{\phi}{2}} \hypergeom{2}{1}{k-\sigma,-n-\sigma}{k-n+1}{\tanh^2\frac{\tau}{2}},\\ \quad m\geq t,\;k\geq t,\;k\geq n; \label{PPBFtan2} \end{multline} \begin{multline} \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=\sqrt{\frac{\Gamma(\sigma-m+1)\Gamma(\sigma+n+1)} {\Gamma(\sigma+m+1)\Gamma(\sigma-n+1)}} \cos^{2\sigma}\frac{\theta}{2}\cos^{2\sigma}\frac{\phi}{2}\cosh^{2\sigma}\frac{\tau}{2}\times\\ \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{k-m}\tan^{t-m}\frac{\theta}{2}\tan^{k-t}\frac{\phi}{2} \tanh^{n-k}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{t-\sigma,-m-\sigma}{t-m+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-\sigma,-t-\sigma}{k-t+1}{-\tan^2\frac{\phi}{2}} \hypergeom{2}{1}{n-\sigma,-k-\sigma}{n-k+1}{\tanh^2\frac{\tau}{2}},\\ \quad t\geq m,\;k\geq t,\;n\geq k; \label{PPBFtan3} \end{multline} \begin{multline} \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=\sqrt{\frac{\Gamma(\sigma-m+1)\Gamma(\sigma+n+1)} {\Gamma(\sigma+m+1)\Gamma(\sigma-n+1)}} \cos^{2\sigma}\frac{\theta}{2}\cos^{2\sigma}\frac{\phi}{2}\cosh^{2\sigma}\frac{\tau}{2}\times\\ \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{2t-m-k}\tan^{t-m}\frac{\theta}{2}\tan^{t-k}\frac{\phi}{2} \tanh^{n-k}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{t-\sigma,-m-\sigma}{t-m+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{t-\sigma,-k-\sigma}{t-k+1}{-\tan^2\frac{\phi}{2}} \hypergeom{2}{1}{n-\sigma,-k-\sigma}{n-k+1}{\tanh^2\frac{\tau}{2}},\\ \quad t\geq m,\;t\geq k,\;n\geq k; \label{PPBFtan4} \end{multline} \begin{multline} \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=\sqrt{\frac{\Gamma(\sigma-m+1)\Gamma(\sigma-n+1)} {\Gamma(\sigma+m+1)\Gamma(\sigma+n+1)}} \cos^{2\sigma}\frac{\theta}{2}\cos^{2\sigma}\frac{\phi}{2}\cosh^{2\sigma}\frac{\tau}{2}\times\\ \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{k-m} \frac{\Gamma(\sigma+k+1)}{\Gamma(\sigma-k+1)} \tan^{t-m}\frac{\theta}{2}\tan^{k-t}\frac{\phi}{2} \tanh^{k-n}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{t-\sigma,-m-\sigma}{t-m+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-\sigma,-t-\sigma}{k-t+1}{-\tan^2\frac{\phi}{2}} \hypergeom{2}{1}{k-\sigma,-n-\sigma}{k-n+1}{\tanh^2\frac{\tau}{2}},\\ \quad t\geq m,\;k\geq t,\;k\geq n; \label{PPBFtan5} \end{multline} \begin{multline} \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=\sqrt{\frac{\Gamma(\sigma-m+1)\Gamma(\sigma-n+1)} {\Gamma(\sigma+m+1)\Gamma(\sigma+n+1)}} \cos^{2\sigma}\frac{\theta}{2}\cos^{2\sigma}\frac{\phi}{2}\cosh^{2\sigma}\frac{\tau}{2}\times\\ \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{2t-m-k} \frac{\Gamma(\sigma+k+1)}{\Gamma(\sigma-k+1)} \tan^{t-m}\frac{\theta}{2}\tan^{t-k}\frac{\phi}{2} \tanh^{k-n}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{t-\sigma,-m-\sigma}{t-m+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{t-\sigma,-k-\sigma}{t-k+1}{-\tan^2\frac{\phi}{2}} \hypergeom{2}{1}{k-\sigma,-n-\sigma}{k-n+1}{\tanh^2\frac{\tau}{2}},\\ \quad t\geq m,\;t\geq k,\;k\geq n; \label{PPBFtan6} \end{multline} \begin{multline} \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=\sqrt{\frac{\Gamma(\sigma+m+1)\Gamma(\sigma+n+1)} {\Gamma(\sigma-m+1)\Gamma(\sigma-n+1)}} \cos^{2\sigma}\frac{\theta}{2}\cos^{2\sigma}\frac{\phi}{2}\cosh^{2\sigma}\frac{\tau}{2}\times\\ \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{m-k} \frac{\Gamma(\sigma-k+1)}{\Gamma(\sigma+k+1)} \tan^{m-t}\frac{\theta}{2}\tan^{t-k}\frac{\phi}{2} \tanh^{n-k}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{m-\sigma,-t-\sigma}{m-t+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{t-\sigma,-k-\sigma}{t-k+1}{-\tan^2\frac{\phi}{2}} \hypergeom{2}{1}{n-\sigma,-k-\sigma}{n-k+1}{\tanh^2\frac{\tau}{2}},\\ \quad m\geq t,\;t\geq k,\;n\geq k; \label{PPBFtan7} \end{multline} \begin{multline} \mathfrak{Z}^\sigma_{mn}(\cos\theta^q)=\sqrt{\frac{\Gamma(\sigma+m+1)\Gamma(\sigma+n+1)} {\Gamma(\sigma-m+1)\Gamma(\sigma-n+1)}} \cos^{2\sigma}\frac{\theta}{2}\cos^{2\sigma}\frac{\phi}{2}\cosh^{2\sigma}\frac{\tau}{2}\times\\ \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{m+k-2t} \frac{\Gamma(\sigma-k+1)}{\Gamma(\sigma+k+1)} \tan^{m-t}\frac{\theta}{2}\tan^{k-t}\frac{\phi}{2} \tanh^{n-k}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{m-\sigma,-t-\sigma}{m-t+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-\sigma,-t-\sigma}{k-t+1}{-\tan^2\frac{\phi}{2}} \hypergeom{2}{1}{n-\sigma,-k-\sigma}{n-k+1}{\tanh^2\frac{\tau}{2}},\\ \quad m\geq t,\;k\geq t,\;n\geq k. \label{PPBFtan8} \end{multline} As is known, matrix elements of finite-dimensional representations of $\SO_0(1,4)$ are expressed via the functions $f(\mathfrak{q})=\mathfrak{M}^\sigma_{mn}(\mathfrak{q})=e^{-{\bf i} m\varphi^q}\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)e^{-{\bf i} n\psi^q}$, where $\mathfrak{Z}^\sigma_{mn}(\cos\theta^q)$ is defined by (\ref{PPBtan}) or (\ref{PPBFtan1})--(\ref{PPBFtan8})\footnote{The functions $f(\mathfrak{q})=\mathfrak{M}^\sigma_{mn}(\mathfrak{q})$ are eigenfunctions of the Laplace-Beltrami operator $\bigtriangleup_L(\mathfrak{S}_{10})=-F$ defined on the group manifold $\mathfrak{S}_{10}$ of $\SO_0(1,4)$. An explicit expression for $\bigtriangleup_L(\mathfrak{S}_{10})=-F$ is given by the formula (\ref{FKO}).}. For example, let us calculate matrices of finite-dimensional representations at $\sigma=0,\,\frac{1}{2},\,1$: \begin{gather} T_0(\varphi^q,\theta^q,\psi^q)=1,\label{T3}\\[0.3cm] T_{\frac{1}{2}}(\varphi^q,\theta^q,\psi^q)=\ar\begin{pmatrix} \mathfrak{M}^{\frac{1}{2}}_{-\frac{1}{2}-\frac{1}{2}} & \mathfrak{M}^{\frac{1}{2}}_{\frac{1}{2}-\frac{1}{2}}\\ \mathfrak{M}^{\frac{1}{2}}_{-\frac{1}{2}\frac{1}{2}} & \mathfrak{M}^{\frac{1}{2}}_{\frac{1}{2}\frac{1}{2}} \end{pmatrix}=\ar\begin{pmatrix} e^{\frac{{\bf i}}{2}\varphi^q}\mathfrak{Z}^{\frac{1}{2}}_{-\frac{1}{2}-\frac{1}{2}}e^{\frac{{\bf i}}{2}\psi^q} & e^{\frac{{\bf i}}{2}\varphi^q}\mathfrak{Z}^{\frac{1}{2}}_{-\frac{1}{2}\frac{1}{2}}e^{-\frac{{\bf i}}{2}\psi^q}\\ e^{-\frac{{\bf i}}{2}\varphi^q}\mathfrak{Z}^{\frac{1}{2}}_{\frac{1}{2}-\frac{1}{2}}e^{\frac{{\bf i}}{2}\psi^q} & e^{-\frac{{\bf i}}{2}\varphi^q}\mathfrak{Z}^{\frac{1}{2}}_{\frac{1}{2}\frac{1}{2}}e^{-\frac{{\bf i}}{2}\psi^q} \end{pmatrix}=\nonumber\\[0.3cm] =\ar\begin{pmatrix} e^{\frac{{\bf i}}{2}\varphi^q}\cos\frac{\theta^q}{2}e^{\frac{{\bf i}}{2}\psi^q} & {\bf i} e^{\frac{{\bf i}}{2}\varphi^q}\sin\frac{\theta^q}{2}e^{-\frac{{\bf i}}{2}\psi^q}\\ {\bf i} e^{-\frac{{\bf i}}{2}\varphi^q}\sin\frac{\theta^q}{2}e^{\frac{{\bf i}}{2}\psi^q} & e^{-\frac{{\bf i}}{2}\varphi^q}\cos\frac{\theta^q}{2}e^{-\frac{{\bf i}}{2}\psi^q} \end{pmatrix}=\nonumber\\[0.3cm] \end{gather} \begin{multline} {\renewcommand{\arraystretch}{1.3} =\left(\begin{array}{c}\scriptstyle \left[\cos\frac{\theta}{2}\cosh\frac{\tau}{2}\cos\frac{\phi}{2}- \sin\frac{\theta}{2}\cosh\frac{\tau}{2}\sin\frac{\phi}{2}+ {\bf i}\sin\frac{\theta}{2}\sinh\frac{\tau}{2}\cos\frac{\phi}{2}+ {\bf i}\cos\frac{\theta}{2}\sinh\frac{\tau}{2}\sin\frac{\phi}{2}\right] e^{\frac{1}{2}(\epsilon+\varepsilon+\omega+{\bf i}\varphi+{\bf i}\psi-{\bf j}\chi+{\bf k}\varsigma)} \\ \scriptstyle\left[\cos\frac{\theta}{2}\sinh\frac{\tau}{2}\cos\frac{\phi}{2}- \sin\frac{\theta}{2}\sinh\frac{\tau}{2}\sin\frac{\phi}{2}+ {\bf i}\sin\frac{\theta}{2}\cosh\frac{\tau}{2}\cos\frac{\phi}{2}+ {\bf i}\cos\frac{\theta}{2}\cosh\frac{\tau}{2}\sin\frac{\phi}{2}\right] e^{\frac{1}{2}(\varepsilon+\omega-\epsilon+{\bf i}\psi-{\bf i}\varphi-{\bf j}\chi-{\bf k}\varsigma)} \end{array} \right.}\\ {\renewcommand{\arraystretch}{1.1} \left.\begin{array}{c} \scriptstyle\left[\cos\frac{\theta}{2}\sinh\frac{\tau}{2}\cos\frac{\phi}{2}- \sin\frac{\theta}{2}\sinh\frac{\tau}{2}\sin\frac{\phi}{2}+ {\bf i}\cos\frac{\theta}{2}\cosh\frac{\tau}{2}\sin\frac{\phi}{2}+ {\bf i}\sin\frac{\theta}{2}\cosh\frac{\tau}{2}\cos\frac{\phi}{2}\right] e^{\frac{1}{2}(\epsilon-\varepsilon-\omega+{\bf i}\varphi-{\bf i}\psi+{\bf j}\chi+{\bf k}\varsigma)} \\ \scriptstyle\left[\cos\frac{\theta}{2}\cosh\frac{\tau}{2}\cos\frac{\phi}{2}- \sin\frac{\theta}{2}\cosh\frac{\tau}{2}\sin\frac{\phi}{2}+ {\bf i}\cos\frac{\theta}{2}\sinh\frac{\tau}{2}\sin\frac{\phi}{2}+ {\bf i}\sin\frac{\theta}{2}\sinh\frac{\tau}{2}\cos\frac{\phi}{2}\right] e^{\frac{1}{2}(-\epsilon-\varepsilon-\omega-{\bf i}\varphi-{\bf i}\psi+{\bf j}\chi-{\bf k}\varsigma)} \end{array}\right)},\label{T4} \end{multline} \begin{gather} T_1(\varphi^q,\theta^q,\psi^q)=\ar\begin{pmatrix} \mathfrak{M}^1_{-1-1} & \mathfrak{M}^1_{-10} & \mathfrak{M}^1_{-11}\\ \mathfrak{M}^1_{0-1} & \mathfrak{M}^1_{00} & \mathfrak{M}^1_{01}\\ \mathfrak{M}^1_{1-1} & \mathfrak{M}^1_{10} & \mathfrak{M}^1_{11} \end{pmatrix}=\ar \begin{pmatrix} e^{{\bf i}\varphi^q}\mathfrak{Z}^1_{-1-1}e^{{\bf i}\psi^q} & e^{{\bf i}\varphi^q}\mathfrak{Z}^1_{-10} & e^{{\bf i}\varphi^q} \mathfrak{Z}^1_{-11}e^{-{\bf i}\psi^q}\\ \mathfrak{Z}^1_{0-1}e^{{\bf i}\psi^q} & \mathfrak{Z}^1_{00} & \mathfrak{Z}^1_{01}e^{-{\bf i}\psi^q}\\ e^{-{\bf i}\varphi^q}\mathfrak{Z}^1_{1-1}e^{{\bf i}\psi^q} & e^{-{\bf i}\varphi^q}\mathfrak{Z}^1_{10} & e^{-{\bf i}\varphi^q}\mathfrak{Z}^1_{11}e^{-{\bf i}\psi^q} \end{pmatrix}=\nonumber\\[0.3cm] =\ar\begin{pmatrix} e^{{\bf i}\varphi^q}\cos^2\frac{\theta^q}{2}e^{{\bf i}\psi^q} & \frac{{\bf i}}{\sqrt{2}}e^{{\bf i}\varphi^q}\sin\theta^q & -e^{{\bf i}\varphi^q} \sin^2\frac{\theta^q}{2}e^{-{\bf i}\psi^q}\\ \frac{{\bf i}}{\sqrt{2}}\sin\theta^qe^{{\bf i}\psi^q} & \cos\theta^q & \frac{{\bf i}}{\sqrt{2}}\sin\theta^qe^{-{\bf i}\psi^q}\\ -e^{-{\bf i}\varphi^q}\sin^2\frac{\theta^q}{2}e^{{\bf i}\psi^q} & \frac{{\bf i}}{\sqrt{2}}e^{-{\bf i}\varphi^q}\sin\theta^q & e^{-{\bf i}\varphi^q}\cos^2\frac{\theta^q}{2}e^{-{\bf i}\psi^q} \end{pmatrix},\label{T5} \end{gather} where \[ \sin\theta^q=\sin\theta\cos\phi\cosh\tau+\cos\theta\sin\phi\cosh\tau- {\bf i}\cos\theta\cos\phi\sinh\tau+{\bf i}\sin\theta\sin\phi\sinh\tau, \] \[ \cos\theta^q=\cos\theta\cos\phi\cosh\tau-\sin\theta\sin\phi\cosh\tau+ {\bf i}\cos\theta\sin\phi\sinh\tau+{\bf i}\sin\theta\cos\phi\sinh\tau, \] \begin{multline} \sin^2\frac{\theta^q}{2}=\sin^2\frac{\theta}{2}\cos^2\frac{\phi}{2} \cosh^2\frac{\tau}{2}+\frac{1}{2}\sin\theta\sin\phi\cosh\tau+ \cos^2\frac{\theta}{2}\sin^2\frac{\phi}{2}\cosh^2\frac{\tau}{2}-\\ -\frac{{\bf i}}{2}\left(\sin\frac{\theta}{2}\cos\frac{\phi}{2}+ \cos\frac{\theta}{2}\sin\frac{\phi}{2}\right)\sinh\tau- \cos^2\frac{\theta}{2}\cos^2\frac{\phi}{2}\sinh^2\frac{\tau}{2}- \sin^2\frac{\theta}{2}\sin^2\frac{\phi}{2}\sinh^2\frac{\tau}{2},\nonumber \end{multline} \begin{multline} \cos^2\frac{\theta^q}{2}=\cos^2\frac{\theta}{2}\cos^2\frac{\phi}{2} \cosh^2\frac{\tau}{2}-\frac{1}{2}\sin\theta\sin\phi\cosh\tau+ \sin^2\frac{\theta}{2}\sin^2\frac{\phi}{2}\cosh^2\frac{\tau}{2}+\\ +\frac{{\bf i}}{2}\left(\sin\frac{\theta}{2}\cos\frac{\phi}{2}+ \cos\frac{\theta}{2}\sin\frac{\phi}{2}\right)\sinh\tau- \sin^2\frac{\theta}{2}\cos^2\frac{\phi}{2}\sinh^2\frac{\tau}{2}- \cos^2\frac{\theta}{2}\sin^2\frac{\phi}{2}\sinh^2\frac{\tau}{2}.\nonumber \end{multline} It is easy to see that $T_{\frac{1}{2}}(\varphi^q,\theta^q,\psi^q)$ is the fundamental representation (\ref{Elem4}) of $\Sp(1,1)$. Spherical functions of the second type $f(\varphi^q,\theta^q)=\mathfrak{M}^m_\sigma(\varphi^q,\theta^q,0)=e^{-im\varphi^q} \mathfrak{Z}^m_\sigma(\cos\theta^q)$, where \begin{equation} \mathfrak{Z}^m_\sigma(\cos\theta^q)=\sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma} P^\sigma_{mt}(\cos\theta) P^\sigma_{tk}(\cos\phi)\mathfrak{P}^k_\sigma(\cosh\tau),\nonumber \end{equation} is {\it an associated hyperspherical function} of $\SO_0(1,4)$, are defined on the surface of the quaternion 2-sphere $S^2_q$. $\mathfrak{Z}^m_\sigma(\cos\theta^q)$ are eigenfunctions of the Laplace-Beltrami operator $\bigtriangleup_L(S^2_q)=-F$, \[ \bigtriangleup_L(S^2_q)=\frac{\partial^2}{\partial{\theta^q}^2}+ \cot\theta^q\frac{\partial}{\partial\theta^q}+ \frac{1}{\sin^2\theta^q}\frac{\partial^2}{\partial{\varphi^q}^2}. \] Hypergeometric-type formulae for $\mathfrak{Z}^m_\sigma(\cos\theta^q)$ are \begin{multline} \mathfrak{Z}^m_\sigma(\cos\theta^q)=\sqrt{\frac{\Gamma(\sigma+m+1)}{\Gamma(\sigma-m+1)}} \cos^{2\sigma}\frac{\theta}{2}\cos^{2\sigma}\frac{\phi}{2}\cosh^{2\sigma}\frac{\tau}{2}\times\\ \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{m-k}\tan^{m-t}\frac{\theta}{2}\tan^{t-k}\frac{\phi}{2} \tanh^{k}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{m-\sigma,-t-\sigma}{m-t+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{t-\sigma,-k-\sigma}{t-k+1}{-\tan^2\frac{\phi}{2}} \hypergeom{2}{1}{k-\sigma,-\sigma}{k+1}{\tanh^2\frac{\tau}{2}},\\ \quad m\geq t,\;t\geq k; \nonumber \end{multline} \begin{multline} \mathfrak{Z}^m_\sigma(\cos\theta^q)=\sqrt{\frac{\Gamma(\sigma+m+1)}{\Gamma(\sigma-m+1)}} \cos^{2\sigma}\frac{\theta}{2}\cos^{2\sigma}\frac{\phi}{2}\cosh^{2\sigma}\frac{\tau}{2}\times\\ \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{m+k-2t}\tan^{m-t}\frac{\theta}{2}\tan^{k-t}\frac{\phi}{2} \tanh^{k}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{m-\sigma,-t-\sigma}{m-t+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-\sigma,-t-\sigma}{k-t+1}{-\tan^2\frac{\phi}{2}} \hypergeom{2}{1}{k-\sigma,-\sigma}{k+1}{\tanh^2\frac{\tau}{2}},\\ \quad m\geq t,\;k\geq t; \nonumber \end{multline} \begin{multline} \mathfrak{Z}^m_\sigma(\cos\theta^q)=\sqrt{\frac{\Gamma(\sigma-m+1)}{\Gamma(\sigma+m+1)}} \cos^{2\sigma}\frac{\theta}{2}\cos^{2\sigma}\frac{\phi}{2}\cosh^{2\sigma}\frac{\tau}{2}\times\\ \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{k-m} \frac{\Gamma(\sigma+k+1)}{\Gamma(\sigma-k+1)} \tan^{t-m}\frac{\theta}{2}\tan^{k-t}\frac{\phi}{2} \tanh^{k}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{t-\sigma,-m-\sigma}{t-m+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-\sigma,-t-\sigma}{k-t+1}{-\tan^2\frac{\phi}{2}} \hypergeom{2}{1}{k-\sigma,-\sigma}{k+1}{\tanh^2\frac{\tau}{2}},\\ \quad t\geq m,\;k\geq t; \nonumber \end{multline} \begin{multline} \mathfrak{Z}^m_\sigma(\cos\theta^q)=\sqrt{\frac{\Gamma(\sigma-m+1)}{\Gamma(\sigma+m+1)}} \cos^{2\sigma}\frac{\theta}{2}\cos^{2\sigma}\frac{\phi}{2}\cosh^{2\sigma}\frac{\tau}{2}\times\\ \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{2t-m-k} \frac{\Gamma(\sigma+k+1)}{\Gamma(\sigma-k+1)} \tan^{t-m}\frac{\theta}{2}\tan^{t-k}\frac{\phi}{2} \tanh^{k}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{t-\sigma,-m-\sigma}{t-m+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{t-\sigma,-k-\sigma}{t-k+1}{-\tan^2\frac{\phi}{2}} \hypergeom{2}{1}{k-\sigma,-\sigma}{k+1}{\tanh^2\frac{\tau}{2}},\\ \quad t\geq m,\;t\geq k. \nonumber \end{multline} The latter formulae hold at any $k$ when $\sigma$ is an half-integer number. When $\sigma$ is an integer number, these formulae hold at $k=0,1,\ldots,\sigma-1,\sigma$. At $k=-\sigma,-\sigma+1,\ldots,0$ we must replace the function $\hypergeom{2}{1}{k-\sigma,-\sigma}{k+1}{\tanh^2\frac{\tau}{2}}$ via $\hypergeom{2}{1}{-\sigma,-k-\sigma}{-k+1}{\tanh^2\frac{\tau}{2}}$ and $\tanh^k\frac{\tau}{2}$ via $\tanh^{-k}\frac{\tau}{2}$. At $m=n=0$ we obtain {\it a zonal hyperspherical function} $\mathfrak{Z}_\sigma(\cos\theta^q)\equiv \mathfrak{Z}^\sigma_{00}(\cos\theta^q)$ of the group $\SO_0(1,4)$. Namely, \begin{multline} \mathfrak{Z}_\sigma(\cos\theta^q)= \cos^{2\sigma}\frac{\theta}{2}\cos^{2\sigma}\frac{\phi}{2}\cosh^{2\sigma}\frac{\tau}{2}\times\\ \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{k} \frac{\Gamma(\sigma+k+1)}{\Gamma(\sigma-k+1)} \tan^{t}\frac{\theta}{2}\tan^{k-t}\frac{\phi}{2} \tanh^{k}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{t-\sigma,-\sigma}{t+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-\sigma,-t-\sigma}{k-t+1}{-\tan^2\frac{\phi}{2}} \hypergeom{2}{1}{k-\sigma,-\sigma}{k+1}{\tanh^2\frac{\tau}{2}}, \quad k\geq t; \nonumber \end{multline} \begin{multline} \mathfrak{Z}_\sigma(\cos\theta^q)= \cos^{2\sigma}\frac{\theta}{2}\cos^{2\sigma}\frac{\phi}{2}\cosh^{2\sigma}\frac{\tau}{2}\times\\ \sum^\sigma_{k=-\sigma}\sum^\sigma_{t=-\sigma}{\bf i}^{-k} \frac{\Gamma(\sigma-k+1)}{\Gamma(\sigma+k+1)} \tan^{-t}\frac{\theta}{2}\tan^{t-k}\frac{\phi}{2} \tanh^{-k}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{-\sigma,-t-\sigma}{-t+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{t-\sigma,-k-\sigma}{t-k+1}{-\tan^2\frac{\phi}{2}} \hypergeom{2}{1}{-\sigma,-k-\sigma}{-k+1}{\tanh^2\frac{\tau}{2}}, \quad t\geq k. \nonumber \end{multline} In its turn, the functions $f(\dot{\varphi}^q,\dot{\theta}^q)=e^{{\bf i} m\dot{\varphi}^q} \mathfrak{Z}^{\dot{m}}_{\dot{\sigma}}(\cos\dot{\theta}^q)$ (or $f(\dot{\theta}^q)=\mathfrak{Z}_{\dot{\sigma}}(\cos\dot{\theta}^q)$) are defined on the surface of the dual quaternion sphere $\dot{S}^2_q$. Explicit expressions and hypergeometric-type formulae for $f(\dot{\varphi}^q,\dot{\theta}^q)$ are analogous to the previous expressions for $f(\varphi^q,\theta^q)$. Spherical functions of the fourth type $f(\varphi,\theta,\psi)=\mathfrak{M}^\sigma_{mn}(\varphi,\theta,\psi)= e^{-{\bf i} m\varphi}P^\sigma_{mn}(\cos\theta)e^{-{\bf i} n\psi}$ (or $f(\varsigma,\phi,\chi)=\mathfrak{M}^\sigma_{mn}(\varsigma,\phi,\chi)= e^{-{\bf i} m\varsigma}P^\sigma_{mn}(\cos\phi) e^{-{\bf i} n\chi}$) are defined on the surface of the real 3-sphere $S^3=\SO(4)/\SO(3)$. Let $L^2(S^3)$ be a Hilbert space of the functions defined on the sphere $S^3$ in the space $\R^4$. Since $S^3\sim\SO_0(1,4)/P\sim K/M$, then the representations of the principal nonunitary (spherical) series $T_{\omega\sigma}$ are defined by the complex number $\sigma$ and an irreducible unitary representation $\omega$ of the subgroup $M=\SO(3)$. Thus, representations of the group $\SO_0(1,4)$, which have a class 1 with respect to $K=\SO(4)$, are realized in the space $L^2(S^3)$. At this point, spherical functions of the representations $Q^m$ of $\SO(4)$ form an orthogonal basis in $L^2(S^3)$. Therefore, we have here representations of $\SO_0(1,4)$ restricted to the subgroup $\SO(4)$: \[ \hat{T}^\sigma\downarrow^{\SO_0(1,4)}_{\SO(4)}=\sum^l_{m=0}\oplus Q^m. \] \subsection{Spherical functions on the hyperboloid and their applications to hydrogen atom problem} In 1935, using a stereographic projection of the momentum space onto a four-dimensional sphere, Fock showed \cite{Fock} that Schr\"{o}dinger equation for hydrogen atom is transformed into an integral equation for hyperspherical functions defined on the surface of the four-dimensional sphere. This discovery elucidates an intrinsic nature of an additional degeneration of the energy levels in hydrogen atom, and also it allows one to write important relations for wavefunctions (for example, Fock wrote simple expressions for the density matrix of the system of wavefunctions for energy levels with an arbitrary quantum number $n$). In 1968, authors of the work \cite{BBC68} showed that Fock integral equation can be written in the form of a Klein-Gordon-type equation for spherical functions defined on the surface of the four-dimensional hyperboloid. ``Square roots" of the Klein-Gordon-type equation are Dirac-like equations (in the paper \cite{BBC68} these equations are called Majorana-type equations), or more general Gel'fand-Yaglom-type equations \cite{GY48}. Equations of this type were first considered by Dirac in 1935 \cite{Dir35}. Here there is an analogy with the usual formulation of the Dirac equation for a hydrogen atom in the Minkowski space-time, but the main difference lies in the fact that Dirac-like equations are defined on the four-dimensional hyperboloid\footnote{As is known, this hyperboloid can be understood as the four-dimensional Minkowski space-time endowed globally with a constant negative curvature.} immersed into a five-dimensional de Sitter space. So, spherical functions of the third type $f(\epsilon,\tau,\varepsilon,\omega)= \mathfrak{M}^\sigma_{mn}(\epsilon,\tau,\varepsilon,\omega)=e^{ -m\epsilon}\mathfrak{P}^\sigma_{mn}(\cosh\tau)e^{-n(\varepsilon+\omega)}$ are defined on the upper sheet $H^4_+$ of the four-dimensional hyperboloid $\left[{\bf x},{\bf x}\right]=1$, where $\mathfrak{P}^\sigma_{mn}(\cosh\tau)$ is a Jacobi function\footnote{Representations of the group $\SU(1,1)\simeq\SL(2,\R)$, known also as a three-dimensional Lorentz group, are expressed via the functions $\mathfrak{P}^\sigma_{mn}(\cosh\tau)$.} considered in details by Vilenkin \cite{Vil65}. The functions $\mathfrak{M}^\sigma_{mn}(\epsilon,\tau,\varepsilon,\omega)$ are eigenfunctions of the Laplace-Beltrami operator $\bigtriangleup_L(H^4_+)=-F$ defined on $H^4_+$: \[ \left[\bigtriangleup_L(H^4_+)-\sigma(\sigma+3)\right] \mathfrak{M}^\sigma_{mn}(\epsilon,\tau,\varepsilon,\omega)=0, \] where \[ \bigtriangleup_L(H^4_+)=-\frac{\partial^2}{\partial\tau^2}-\coth\tau \frac{\partial}{\partial\tau}-\frac{1}{\sinh^2\tau}\left[ \frac{\partial^2}{\partial\epsilon^2}-2\cosh\tau\frac{\partial^2} {\partial\epsilon\partial(\varepsilon+\omega)}+\frac{\partial^2} {\partial(\varepsilon+\omega)^2}\right]. \] Or, \[ \left[-\frac{d^2}{d\tau^2}-\coth\tau\frac{d}{d\tau}+\frac{m^2+n^2-2mn\cosh\tau} {\sinh^2\tau}-\sigma(\sigma+3)\right]\mathfrak{P}^\sigma_{mn}(\cosh\tau)=0. \] After substitution $y=\cosh\tau$ this equation can be rewritten as \[ \left[(y^2-1)\frac{d^2}{dy^2}+2y\frac{d}{dy}-\frac{m^2+n^2-2mny}{y^2-1}+ \sigma(\sigma+3)\right]\mathfrak{P}^\sigma_{mn}(y)=0. \] Let us construct a quasiregular representation of the group $\SO_0(1,4)$ on the functions $f(x)$ from $H^4_+$, where $x=(\epsilon,\tau,\varepsilon,\omega)$. Let $L^2(H^4_+)$ be a Hilbert space of the functions on the hyperboloid $H^4_+$ with a scalar product \[ \langle f_1,f_2\rangle=\int\limits_{H^4_+}\overline{f_1(x)}f_2(x)d\mu(x)= \int\limits^\infty_0\int\limits^\infty_0\int\limits^\infty_0 \int\limits^\infty_0\overline{\mathfrak{P}^{\sigma_1}_{mn}(\cosh\tau)} \mathfrak{P}^{\sigma_2}_{mn}(\cosh\tau)e^{-2m\epsilon-2n(\omega+\varepsilon)} \sinh\tau d\tau d\epsilon d\varepsilon d\omega, \] where $d\mu(x)$ is an invariant measure on $H^4_+$ with respect to $\SO_0(1,4)$. This measure is defined by an equality $d\mu(x)=\sinh\tau d\tau d\epsilon d\varepsilon d\omega$. In accordance with (\ref{QEA2}) the range of variables $\epsilon$, $\tau$, $\varepsilon$, $\omega$ is $(-\infty,+\infty)$, but we consider here the upper sheet of the hyperboloid; therefore, the range of these variables is $(0,\infty)$. {\it A quasiregular representation} $T$ in the space $L^2(H^4_+)$ is defined by the formula \[ T(\mathfrak{q})f(x)=f(\mathfrak{q}^{-1}x),\quad x\in H^4_+. \] It is easy to show that this representation is unitary. However, $T$ is reducible, and in accordance with Gel'fand-Graev theorem \cite{GGV62} is decomposed into a direct integral of irreducible representations $T^\sigma$ of the principal unitary series ($\sigma=-3/2+i\rho$, $0<\rho<\infty$). Analogously, a quasiregular representation of the group $\SO_0(1,4)$ in a Hilbert space $L^2(C^4_+)$ of the functions on the upper sheet $C^4_+$ of the cone $C^4$ ($C^4_+:\;x^2_0-x^2_1-x^2_2-x^2_3-x^2_4=0,\;x_0>0$) has the following form: \[ T(g)f(x)=f(g^{-1}x),\quad x\in C^4_+. \] This representation is unitary with respect to a scalar product \[ \langle f_1,f_2\rangle=\int\limits_{C^4_+}\overline{f_1(x)}f_2(x)d\mu(x) \] defined on $L^2(C^4_+)$. Here $d\mu(x)$ is an invariant measure on $C^4_+$ with respect to $\SO_0(1,4)$. This representation is reducible. Irreducible unitary representations of the group $\SO_0(1,4)$ can be constructed in a Hilbert space of homogeneous functions on the cone \cite{Vil65}. Let us consider applications of the spherical functions $f(\epsilon,\tau,\varepsilon,\omega)$ to hydrogen and antihydrogen atom problems (about antihydrogen atom see \cite{EH99}). As it has been shown in the work \cite{BBC68} when the internal motion can be described by algebraic methods, as in the case of hydrogen atom, the proposed equation for the motion of the system as a whole (motion of the c.m.) is equivalent to a Majorana-type equation, free from the well-known difficulties such as a spacelike solution. As is known, the Bethe-Salpeter equation for two spinors of masses $m_1$ and $m_2$, \[ (\hat{p}_1-m_1)(\hat{p}_2-m_2)\psi(p_1,p_2)=\frac{{\bf i}}{2\pi} \int\int G(p_1,p_2;p^\prime_1,p^\prime_2)\psi(p^\prime_1,p^\prime_2) dp^\prime_1dp^\prime_2, \] in the ladder approximation can be written as follows: \[ (c_1\hat{P}^{(1)}+\hat{p}^{(1)}-m_1)(c_2\hat{P}^{(2)}-\hat{p}^{(2)}-m_2) \psi_P(p)=\frac{{\bf i}}{2\pi}\int G(q)\psi_P(p+q)dq, \] where \begin{gather} P=p_1+p_2,\quad p=c_2p_1-c_1p_2,\nonumber\\ c_1=m_1/(m_1+m_2),\quad c_2=m_2/(m_1+m_2),\nonumber \end{gather} the metric is $g_{\mu\nu}=+1,\,-1,\,-1,\,-1$, and the superscripts on $\hat{P}^{(i)}$ and $\hat{p}^{(i)}$ refer to the $\gamma$ matrices. In this case, projection operators can be defined as \begin{equation}\label{Proj} \Lambda_\pm^{(i)}=[\mathcal{E}_i(p)\pm\mathcal{K}_i]/2\mathcal{E}_i(p), \end{equation} where \begin{gather} \mathcal{E}_i=\left[P^2(m^2_i-p^2)+(p\cdot P)^2\right]^{1/2},\nonumber\\ \mathcal{K}_1=\left[m_1\hat{P}^{(1)}-{\bf i} P^\mu\sigma_{\mu\nu}^{(1)}p^\nu\right],\quad\mathcal{K}_2=\left[m_1\hat{P}^{(2)}+{\bf i} P^\mu\sigma_{\mu\nu}^{(2)}p^\nu\right]\nonumber \end{gather} with \[ \sigma^{(i)}_{\mu\nu}=(1/2{\bf i})\left[\gamma^{(i)}_\mu,\gamma^{(i)}_\nu\right]. \] Further, using the operators (\ref{Proj}), we obtain \begin{equation}\label{Equat} (P^2-\mathcal{K}^2_1-\mathcal{K}^2_2)\varphi(p^T)=-(\Lambda^{(1)}_+\Lambda^{(2)}_+ -\Lambda^{(1)}_-\Lambda^{(2)}_-)\hat{P}^{(1)}\hat{P}^{(2)}\int G(p^T-l)\varphi(l)\delta(l\cdot p)dl, \end{equation} where $p^T_\mu=p_\mu-p^Lu_\mu$ is the transverse relative momenta, and $p^L=p\cdot u$, $u^\mu=P^\mu/|P|$, $\phi(l)=\int^{+\infty}_{-\infty}\psi(l,q^L)dq^L$. The approximation \[ \Lambda^{(1)}_+\Lambda^{(2)}_+ -\Lambda^{(1)}_-\Lambda^{(2)}_-=+1 \] means that we take only positive-energy states for the constituents. On the other hand, the choice \[ \Lambda^{(1)}_+\Lambda^{(2)}_+ -\Lambda^{(1)}_-\Lambda^{(2)}_-=-1 \] would have meant taking only negative-energy states for the system and would correspond to charge conjugation for the c.m. motion. Since $\Lambda^i_+=1$ is equivalent to \[ \mathcal{K}_i=\mathcal{E}_i=(m^2_i-(p^T)^2)^{1/2}|P|, \] then the equation (\ref{Equat}) can be written as \[ \left[P^2-|P|(m_1+m_2-(p^T)^2/2\mu)\right]\varphi(p^T)=P^2\int G(p^T-l)\varphi(l)\delta(l\cdot P)dl, \] where \[ \mu=\frac{m_1m_2}{m_1+m_2}. \] In the case of hydrogen atom this equation has the form \begin{equation}\label{Hyd} \left[|P|-(m_1+m_2-(p^T)^2/2\mu)\right]\varphi(p^T)=|P|\frac{e^2}{2\pi} \int\frac{1}{(p^T-l)^2}\delta(l\cdot P)\varphi(l)dl. \end{equation} Using the Fock stereographic projection \cite{Fock,BI66} \[ \xi_\mu=2ap_\mu(a^2-p^2),\quad\xi_4=(a^2+p^2)/(a^2-p^2),\quad \mu=0,\ldots, 3, \] where $p^2=p_\mu p^\mu$ and $a$ is an arbitrary constant, we will project stereographically the four-dimensional $p$-space on a five-dimensional hyperboloid. This projection allows us to rewrite the equation (\ref{Hyd}) in the form of a Klein-Gordon-type equation \begin{equation}\label{KG} (P^2-\mathcal{K}^2)\Psi_P=0 \end{equation} with \[ \mathcal{K}=m_1+m_2-\mu e^4/2N, \] and $N^2$ is the operator $D^T+1$, where $D^T$ is the angular part of the four-dimensional Laplace operator. $\Psi_P(\xi_a)$ form a basis for a representation of the de Sitter group $\SO_0(1,4)$. A ``square root" of the Klein-Gordon-type equation (\ref{KG}) is a Majorana-type equation \begin{equation}\label{MTE1} \left[\Gamma\cdot P-(m_1+m_2)N+e^4\mu/2N\right]\Psi_P=0 \end{equation} or, \begin{equation}\label{MTE2} \left[\Gamma\cdot P+(m_1+m_2)N-e^4\mu/2N\right]\dot{\Psi}_P=0 \end{equation} where $\Gamma$-matrices behave like components of a five-vector in $\SO_0(1,4)$. Equations (\ref{MTE1}) and (\ref{MTE2}) describe hydrogen and antihydrogen atoms, respectively. In the equations (\ref{KG})--(\ref{MTE2}) the functions $\Psi_P$ are eigenfunctions of the Laplace-Beltrami operator defined on the surface of the five-dimensional hyperboloid (more precisely speaking, on the upper sheet $H^4_+$ of this hyperboloid for the equation (\ref{MTE1}) and on the lower sheet $H^4_-$ for (\ref{MTE2})). As it has been shown previously, this hyperboloid is a homogeneous space of the de Sitter group $\SO_0(1,4)$. On the other hand, spherical functions $\Psi_p$ are solutions of the equations (\ref{KG})--(\ref{MTE2}), that is, they are wavefunctions, and for that reason $\Psi_P$ play a crucial role in the hydrogen (antihydrogen) atom problem. Let us consider in brief solutions (wavefunctions) of the Majorana-type equations (\ref{MTE1}) and (\ref{MTE2}). With this end in view we must introduce an {\it inhomogeneous de Sitter group} $\ISO_0=\SO_0(1,4)\odot T_5$, which is a semidirect product of the subgroup $\SO_0(1,4)$ (connected component) of five-dimensional rotations and a subgroup $T_5$ of five-dimensional translations of the de Sitter space $\R^{1,4}$. The subgroup $T_5$ is a direct product of five one-dimensional translation groups $T_1$, $T_5=T_1\otimes T_1\otimes T_1\otimes T_1\otimes T_1$. At this point, each group $T_1$ is isomorphic to the group $\R^+$ of all positively defined real numbers. At the restriction to $H^4_+$, the maximal homogeneous space ${\cal M}_{15}=\R^{1,4}\times\mathfrak{S}_{10}$ of $\ISO_0(1,4)$ is reduced to ${\cal M}_9=\R^{1,4}\times H^4_+$. Let $F(x,\epsilon,\tau,\varepsilon,\omega)$ be a square integrable function on ${\cal M}_9$, that is, \[ \int\limits_{H^4_+}\int\limits_{T_5}|F|^2d^5xd^4g<+\infty, \] then in the case of finite-dimensional representations of $\SO_0(1,4)$ there is an expansion of $F(x,\epsilon,\tau,\varepsilon,\omega)$ in a Fourier-type integral \begin{equation}\label{FTI} F(x,\epsilon,\tau,\varepsilon,\omega)=\sum^\infty_{\sigma=0} \sum^\sigma_{m,n=-\sigma}\int\limits_{T_5}\boldsymbol{\alpha}^\sigma_{mn} e^{{\bf i} px}e^{-m\epsilon-n(\varepsilon+\omega)}\mathfrak{P}^\sigma_{mn}(\cosh\tau)d^5x, \end{equation} where \[ \boldsymbol{\alpha}^\sigma_{mn}=\frac{(-1)^{m-n}(2\sigma+3)}{16\pi^2} \int\limits_{H^4_+}\int\limits_{T_5}Fe^{-{\bf i} px}\mathfrak{P}^\sigma_{mn}(\cosh\tau)e^{-m\epsilon-n(\varepsilon+\omega)}d^5xd^4g, \] and $d^4g=\sinh\tau d\tau d\epsilon d\varepsilon d\omega$ is a Haar measure on the hyperboloid $H^4_+$. Further, let $T$ be an unbounded region in $\R^{1,4}$ and let $\Sigma$ be a surface of the hyperboloid $H^4_+$ (correspondingly, $\dot{\Sigma}$, for the sheet $H^4_-$), then it needs to find a function $\boldsymbol{\psi}(g)=(\psi^m_P(g),\dot{\psi}^{\dot{m}}_P(g))^T$ in the all region $T$. $\boldsymbol{\psi}(g)$ is a continuous function (everywhere in $T$), including the surfaces $\Sigma$ and $\dot{\Sigma}$. At this point, $\left.\phantom{\frac{x}{x}}\psi^m_{P}(g)\right|_\Sigma= F_{m}(g)$, $\left.\phantom{\frac{x}{x}}\dot{\psi}^{\dot{m}}_P(g)\right|_{\dot{\Sigma}}= \dot{F}_{\dot{m}}(g)$, where $F_{m}(g)$ and $\dot{F}_{\dot{m}}(g)$ are square integrable functions (boundary conditions) defined on the surfaces $\Sigma$ and $\dot{\Sigma}$, respectively. Following the method proposed in \cite{Var03,Var03c,Var03d,Var05}, we can find solutions of the boundary value problem in the form of Fourier type series \begin{equation}\label{FT1} \psi^m_P=\sum^\infty_{\sigma=0}\sum_k\boldsymbol{f}_{\sigma mk}(r) \sum^\sigma_{n=-\sigma}\boldsymbol{\alpha}^m_{\sigma n} \mathfrak{M}^\sigma_{mn}(\epsilon,\tau,\varepsilon,\omega), \end{equation} \begin{equation}\label{FT2} \dot{\psi}^{\dot{m}}_P(g)=\sum^\infty_{\dot{\sigma}=0}\sum_{\dot{k}} \overset{\ast}{\boldsymbol{f}}_{\dot{\sigma}\dot{m}\dot{k}}(r^\ast) \sum^{\dot{\sigma}}_{\dot{n}=-\dot{\sigma}}\boldsymbol{\alpha}^{\dot{m}} _{\dot{\sigma}\dot{n}}\overset{\ast}{\mathfrak{M}}{}^{\dot{\sigma}}_{\dot{m}\dot{n}} (\epsilon,\tau,\varepsilon,\omega), \end{equation} where \[ \boldsymbol{\alpha}^m_{\sigma n}=\frac{(-1)^n(2\sigma+3)}{16\pi^2} \int\limits_{H^4_+}F_m\mathfrak{M}^\sigma_{mn}(\epsilon,\tau,\varepsilon,\omega) \sinh\tau d\tau d\epsilon d\varepsilon d\omega, \] \[ \boldsymbol{\alpha}^{\dot{m}}_{\dot{\sigma} \dot{n}}=\frac{(-1)^{\dot{n}}(2\dot{\sigma}+3)}{16\pi^2} \int\limits_{H^4_-}F_{\dot{m}}\overset{\ast}{\mathfrak{M}}{}^{\dot{\sigma}} _{\dot{m}\dot{n}}(\epsilon,\tau,\varepsilon,\omega) \sinh\tau d\tau d\epsilon d\varepsilon d\omega. \] The indices $k$ and $\dot{k}$ numerate equivalent representations. $\mathfrak{M}^\sigma_{mn}(\epsilon,\tau,\varepsilon,\omega)$ ($\overset{\ast}{\mathfrak{M}}{}^{\dot{\sigma}} _{\dot{m}\dot{n}}(\epsilon,\tau,\varepsilon,\omega)$) are hyperspherical functions defined on the surface $\Sigma$ ($\dot{\Sigma}$) of the four-dimensional hyperboloid $H^4$ of the radius $r$ ($r^\ast$) ($H^4$ can be understood as a four-dimensional sphere with an imaginary radius $r$), $\boldsymbol{f}_{\sigma mk}(r)$ and $\overset{\ast}{\boldsymbol{f}}_{\dot{\sigma}\dot{m}\dot{k}}(r^\ast)$ are radial functions. Taking into account the subgroup $T_5$, we can rewrite the wavefunctions (\ref{FT1}) and (\ref{FT2}) in terms of Fourier-type integrals (\ref{FTI}) (field operators). \section{Spherical functions of unitary representations of $\SO_0(1,4)$} Spherical functions $\mathfrak{M}^l_{mn}(\varphi^q,\theta^q,\psi^q)$, considered in the section 4, define matrix elements of non-unitary finite-dimensional representations of the group $\SO_0(1,4)$. Following the analogue between $\spin_+(1,3)\simeq\SL(2,\hbox{\bb C})$ and $\spin_+(1,4)\simeq\Sp(1,1)$, we can define finite-dimensional (spinor) representations of $\SO_0(1,4)$ in the space of symmetric polynomials $\Sym_{(k,r)}$ as follows\footnote{As is known, any proper Lorentz transformation $\mathfrak{g}$ corresponds to a fractional linear transformation of the complex plane with the matrix $\begin{pmatrix}\alpha & \beta\\ \gamma & \delta\end{pmatrix}\in\SL(2,\hbox{\bb C})$ \cite{GMS}. In its turn, any proper de Sitter transformation $\mathfrak{q}$ can be identified with a fractional linear transformation $w=(az+b)(cz+d)^{-1}$ of the anti-quaternion plane with the matrix $\begin{bmatrix}a & b\\ c & d\end{bmatrix}\in\Sp(1,1)$ (about quaternion and anti-quaternion planes and their fractional linear transformations see \cite{Roz55}).}: \begin{equation}\label{TenRep} T_{\mathfrak{q}}q(z,\overline{z})=(cz+d)^{l_0+l_1-1} \overline{(cz+d)}^{l_0-l_1+1}q\left(\frac{az+b}{cz+d}; \frac{\overline{az+b}}{\overline{cz+d}}\right), \end{equation} where $a,b,c,d\in\BH$, $k=l_0+l_1-1$, $r=l_0-l_1+1$, and the pair $(l_0,l_1)$ defines an irreducible representation of $\SO_0(1,4)$ in the Diximier-Str\"{o}m basis \cite{Dix61,Str69}: \[ M_3\mid j^\prime,m^\prime,q,q;l_0,m,l_1\rangle=m^\prime\mid j^\prime,m^\prime,q,q;l_0,m,l_1\rangle, \] \[ M_\pm\mid j^\prime,m^\prime,q,q;l_0,m,l_1\rangle=\left[(j^\prime\mp m^\prime)(j^\prime\pm m^\prime+1)\right]^{\frac{1}{2}}\mid j^\prime,m^\prime+1,q,q;l_0,m,l_1\rangle, \] \begin{multline} P_3\mid j^\prime,m^\prime,q,q;l_0,m,l_1\rangle=-\alpha(j^\prime+1;q,q) \left[(j^\prime+1)^2-m^2\right]^{\frac{1}{2}}\mid j^\prime+1,m^\prime,q,q;l_0,m,l_1\rangle+\\ +\frac{m^\prime(q+1)q}{j^\prime(j^\prime+1)}\mid j^\prime,m^\prime,q,q;l_0,m,l_1\rangle-\alpha(j^\prime;q,q) \left[{j^\prime}^2-m^2\right]^{\frac{1}{2}}\mid j^\prime-1,m^\prime,q,q;l_0,m,l_1\rangle,\nonumber \end{multline} \begin{multline} P_\pm\mid j^\prime,m^\prime,q,q;l_0,m,l_1\rangle=\\ =\pm\alpha(j^\prime+1;q,q) \left[(j^\prime\pm m^\prime+1)(j^\prime\pm m^\prime+2)\right]^{\frac{1}{2}}\mid j^\prime+1,m^\prime\pm 1,q,q;l_0,m,l_1\rangle+\\ +\frac{(q+1)q}{j^\prime(j^\prime+1)}\left[(j^\prime\mp m^\prime) (j^\prime\pm m^\prime+1)\right]^{\frac{1}{2}}\mid j^\prime,m^\prime\pm 1,q,q;l_0,m,l_1\rangle\mp\\ \mp\alpha(j^\prime;q,q)\left[(j^\prime\mp m^\prime)(j^\prime\mp m^\prime-1)\right]^{\frac{1}{2}}\mid j^\prime-1,m^\prime\pm 1,q,q;l_0,m,l_1\rangle,\nonumber \end{multline} \begin{multline} P_0\mid j^\prime,m^\prime,q,q;l_0,m,l_1\rangle=a(q,q;l_0,l_1)\left[(q+j^\prime+2) (q-j^\prime+1)\right]^{\frac{1}{2}}\mid j^\prime,m^\prime,q+1,q;l_0,m,l_1\rangle+\\ +a(q-1,q;l_0,l_1)\left[(q+j^\prime+1)(q-j^\prime)\right]^{\frac{1}{2}}\mid j^\prime,m^\prime,q-1,q;l_0,m,l_1\rangle+\\ +b(q,q;l_0,l_1)\left[(j^\prime-q)(j^\prime+q+1)\right]^{\frac{1}{2}}\mid j^\prime,m^\prime,q,q+1;l_0,m,l_1\rangle+\\ +b(q,q-1;l_0,l_1)\left[(j^\prime+q)(j^\prime-q+1)\right]^{\frac{1}{2}}\mid j^\prime,m^\prime,q,q-1;l_0,m,l_1\rangle,\nonumber \end{multline} where $M_\pm=M_1\pm{\bf i} M_2$, $P_\pm=P_1\pm{\bf i} P_2$ and \[ \alpha(j^\prime;q,q)=\frac{1}{j^\prime}\left[\frac{({j^\prime}^2-q^2) ((q+1)^2-{j^\prime}^2)}{(2j^\prime+1)(2j^\prime-1)}\right]^{\frac{1}{2}}, \] \[ a(q,q;l_0,l_1)=\left[\frac{(q-l_0+1)(q+l_0+2)((q+\frac{3}{2})^2+l^2_1)} {4(2q+1)(q+1)}\right]^{\frac{1}{2}}, \] \[ b(q,q;l_0,l_1)=\left[\frac{(l_0-q)(l_0+q+1)((q+\frac{1}{2})^2+l^2_1)} {4(2q+1)(q+1)}\right]^{\frac{1}{2}}. \] The relations between the numbers $l_0$, $l_1$ and $\sigma$, $\dot{\sigma}$ are given by the following formulae: \[ (l_0,l_1)=(\sigma,\sigma+1),\quad(l_0,l_1)=(-\dot{\sigma},\dot{\sigma}+1), \] whence it immediately follows that \begin{equation}\label{RelLL1} \sigma=\frac{l_0+l_1-1}{2},\quad\dot{\sigma}=\frac{l_0-l_1+1}{2}. \end{equation} In the case of principal series representations of $\SO_0(1,4)$ we have\footnote{This relation is a particular case of the most general formula $l_1=-\frac{1}{2}(n-1)+{\bf i}\rho$ for the principal series representations of $\SO_0(1,n)$ \cite{Boy71}.} $l_1=-\frac{3}{2}+{\bf i}\rho$, $\rho\in\R$. Using formulae (\ref{HFSO14b}), (\ref{PPBtan}) and (\ref{RelLL1}), we find that matrix elements of the principal series representations of the group $\SO_0(1,4)$ have the form \begin{multline} \mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho,l_0}_{mn}(\mathfrak{q})= e^{-m(\epsilon+{\bf i}\varphi+{\bf k}\varsigma)-n(\varepsilon+\omega+{\bf i}\psi-{\bf j}\chi)} \times\\ \sum^{l_0}_{k=-l_0}\sum^{l_0}_{t=-l_0}{\bf i}^{m+k-2t} \sqrt{\Gamma(l_0-m+1)\Gamma(l_0+m+1)\Gamma(l_0-t+1)\Gamma(l_0+t+1)}\times\\ \cos^{2l_0}\frac{\theta}{2}\tan^{m-t}\frac{\theta}{2}\times\\[0.2cm] \sum^{\min(l_0-m,l_0+t)}_{j=\max(0,t-m)} \frac{{\bf i}^{2j}\tan^{2j}\dfrac{\theta}{2}} {\Gamma(j+1)\Gamma(l_0-m-j+1)\Gamma(l_0+t-j+1)\Gamma(m-t+j+1)}\times\\[0.2cm] \sqrt{\Gamma(l_0-k+1)\Gamma(l_0+k+1)\Gamma(l_0-t+1)\Gamma(l_0+t+1)} \cos^{2l_0}\frac{\phi}{2}\tan^{k-t}\frac{\phi}{2}\times\\[0.2cm] \sum^{\min(l_0-k,l_0+t)}_{s=\max(0,t-k)} \frac{{\bf i}^{2s}\tan^{2s}\dfrac{\phi}{2}} {\Gamma(s+1)\Gamma(l_0-k-s+1)\Gamma(l_0+t-s+1)\Gamma(k-t+s+1)}\times\\[0.2cm] \sqrt{\Gamma(-\tfrac{1}{2}+{\bf i}\rho-n)\Gamma(-\tfrac{1}{2}+{\bf i}\rho+n) \Gamma(-\tfrac{1}{2}+{\bf i}\rho-k)\Gamma(-\tfrac{1}{2}+{\bf i}\rho+k)} \cosh^{-3+2{\bf i}\rho}\frac{\tau}{2}\tanh^{n-k}\frac{\tau}{2}\times\\[0.2cm] \sum^{\infty}_{p=\max(0,k-n)} \frac{\tanh^{2p}\dfrac{\tau}{2}} {\Gamma(p+1)\Gamma(-\tfrac{1}{2}+{\bf i}\rho-n-p) \Gamma(-\tfrac{1}{2}+{\bf i}\rho+k-p)\Gamma(n-k+p+1)}.\label{PPBtanP} \end{multline} From the latter expression it follows that spherical function $f(\mathfrak{q})$ of the principal series can be defined by means of the function \[ \mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho,l_0}_{mn}(\mathfrak{q})= e^{-m(\epsilon+{\bf i}\varphi+{\bf k}\varsigma)}\mathfrak{Z}^{-\frac{3}{2}+{\bf i}\rho,l_0}_{mn} (\cos\theta^q)e^{-n(\varepsilon+\omega+{\bf i}\psi-{\bf j}\chi)}, \] where \[ \mathfrak{Z}^{-\frac{3}{2}+{\bf i}\rho,l_0}_{mn}(\cos\theta^q)= \sum^{l_0}_{k=-l_0}\sum^{l_0}_{t=-l_0}P^{l_0}_{mt}(\cos\theta) P^{l_0}_{tk}(\cos\phi)\mathfrak{P}^{-\frac{3}{2}+{\bf i}\rho}_{kn}(\cosh\tau). \] Let us now express the spherical function $\mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho,l_0}_{mn}(\mathfrak{q})$ of the principal series representations of $\SO_0(1,4)$ via multiple hypergeometric series. Using formulae (\ref{PPBtanP}) and (\ref{PPBFtan1})--(\ref{PPBFtan2}), we find \begin{multline} \mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho,l_0}_{mn}(\mathfrak{q})= e^{-m(\epsilon+{\bf i}\varphi+{\bf k}\varsigma)-n(\varepsilon+\omega+{\bf i}\psi-{\bf j}\chi)} \times\\ \sqrt{\frac{\Gamma(l_0+m+1)\Gamma(-\frac{1}{2}+{\bf i}\rho-n)}{\Gamma(l_0-m+1) \Gamma(-\frac{1}{2}+{\bf i}\rho+n)}} \cos^{2l_0}\frac{\theta}{2}\cos^{2l_0}\frac{\phi}{2}\cosh^{-3+2{\bf i}\rho}\frac{\tau}{2} \times\\ \sum^{l_0}_{k=-l_0}\sum^{l_0}_{t=-l_0}{\bf i}^{m-k} \sqrt{\frac{\Gamma(l_0-k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho+k)} {\Gamma(l_0+k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho-k)}} \tan^{m-t}\frac{\theta}{2}\tan^{t-k}\frac{\phi}{2} \tanh^{k-n}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{m-l_0,-t-l_0}{m-t+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{t-l_0,-k-l_0}{t-k+1}{-\tan^2\frac{\phi}{2}}\times\\ \hypergeom{2}{1}{k+\frac{3}{2}-{\bf i}\rho,-n+\frac{3}{2}-{\bf i}\rho}{k-n+1}{\tanh^2\frac{\tau}{2}}, \quad m\geq t,\;t\geq k,\;k\geq n; \label{PPBFtanP1} \end{multline} \begin{multline} \mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho,l_0}_{mn}(\mathfrak{q})= e^{-m(\epsilon+{\bf i}\varphi+{\bf k}\varsigma)-n(\varepsilon+\omega+{\bf i}\psi-{\bf j}\chi)} \times\\ \sqrt{\frac{\Gamma(l_0+m+1)\Gamma(-\frac{1}{2}+{\bf i}\rho-n)}{\Gamma(l-m_0+1) \Gamma(-\frac{1}{2}+{\bf i}\rho+n)}} \cos^{2l_0}\frac{\theta}{2}\cos^{2l_0}\frac{\phi}{2} \cosh^{-3+2{\bf i}\rho}\frac{\tau}{2}\times\\ \sum^{l_0}_{k=-l_0}\sum^{l_0}_{t=-l_0}{\bf i}^{m+k-2t} \sqrt{\frac{\Gamma(l_0-k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho+k)} {\Gamma(l_0+k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho-k)}} \tan^{m-t}\frac{\theta}{2} \tan^{k-t}\frac{\phi}{2} \tanh^{k-n}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{m-l_0,-t-l_0}{m-t+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-l_0,-t-l_0}{k-t+1}{-\tan^2\frac{\phi}{2}}\times\\ \hypergeom{2}{1}{k+\frac{3}{2}-{\bf i}\rho,-n+\frac{3}{2}-{\bf i}\rho}{k-n+1} {\tanh^2\frac{\tau}{2}}, \quad m\geq t,\;k\geq t,\;k\geq n; \label{PPBFtanP2} \end{multline} \begin{multline} \mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho,l_0}_{mn}(\mathfrak{q})= e^{-m(\epsilon+{\bf i}\varphi+{\bf k}\varsigma)-n(\varepsilon+\omega+{\bf i}\psi-{\bf j}\chi)} \times\\ \sqrt{\frac{\Gamma(l_0-m+1)\Gamma(-\frac{1}{2}+{\bf i}\rho+n)}{\Gamma(l_0+m+1) \Gamma(-\frac{1}{2}+{\bf i}\rho-n)}} \cos^{2l_0}\frac{\theta}{2}\cos^{2l_0}\frac{\phi}{2} \cosh^{-3+3{\bf i}\rho}\frac{\tau}{2}\times\\ \sum^{l_0}_{k=-l_0}\sum^{l_0}_{t=-l_0}{\bf i}^{k-m} \sqrt{\frac{\Gamma(l_0+k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho-k)} {\Gamma(l_0-k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho+k)}} \tan^{t-m}\frac{\theta}{2}\tan^{k-t}\frac{\phi}{2} \tanh^{n-k}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{t-l_0,-m-l_0}{t-m+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-l_0,-t-l_0}{k-t+1}{-\tan^2\frac{\phi}{2}}\times\\ \hypergeom{2}{1}{n+\frac{3}{2}-{\bf i}\rho,-k+\frac{3}{2}-{\bf i}\rho}{n-k+1} {\tanh^2\frac{\tau}{2}}, \quad t\geq m,\;k\geq t,\;n\geq k; \label{PPBFtanP3} \end{multline} \begin{multline} \mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho,l_0}_{mn}(\mathfrak{q})= e^{-m(\epsilon+{\bf i}\varphi+{\bf k}\varsigma)-n(\varepsilon+\omega+{\bf i}\psi-{\bf j}\chi)} \times\\ \sqrt{\frac{\Gamma(l_0-m+1)\Gamma(-\frac{1}{2}+{\bf i}\rho+n)}{\Gamma(l_0+m+1) \Gamma(-\frac{1}{2}+{\bf i}\rho-n)}} \cos^{2l_0}\frac{\theta}{2}\cos^{2l_0}\frac{\phi}{2} \cosh^{-3+2{\bf i}\rho}\frac{\tau}{2}\times\\ \sum^{l_0}_{k=-l_0}\sum^{l_0}_{t=-l_0}{\bf i}^{2t-m-k} \sqrt{\frac{\Gamma(l_0+k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho-k)} {\Gamma(l_0-k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho+k)}} \tan^{t-m}\frac{\theta}{2}\tan^{t-k}\frac{\phi}{2} \tanh^{n-k}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{t-l_0,-m-l_0}{t-m+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{t-l_0,-k-l_0}{t-k+1}{-\tan^2\frac{\phi}{2}}\times\\ \hypergeom{2}{1}{n+\frac{3}{2}-{\bf i}\rho,-k+\frac{3}{2}-{\bf i}\rho}{n-k+1} {\tanh^2\frac{\tau}{2}}, \quad t\geq m,\;t\geq k,\;n\geq k; \label{PPBFtanP4} \end{multline} \begin{multline} \mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho,l_0}_{mn}(\mathfrak{q})= e^{-m(\epsilon+{\bf i}\varphi+{\bf k}\varsigma)-n(\varepsilon+\omega+{\bf i}\psi-{\bf j}\chi)} \times\\ \sqrt{\frac{\Gamma(l_0-m+1)\Gamma(-\frac{1}{2}+{\bf i}\rho-n)}{\Gamma(l_0+m+1) \Gamma(-\frac{1}{2}+{\bf i}\rho+n)}} \cos^{2l_0}\frac{\theta}{2}\cos^{2l_0}\frac{\phi}{2} \cosh^{-3+2{\bf i}\rho}\frac{\tau}{2}\times\\ \sum^{l_0}_{k=-l_0}\sum^{l_0}_{t=-l_0}{\bf i}^{k-m} \sqrt{\frac{\Gamma(l_0+k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho+k)} {\Gamma(l_0-k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho-k)}} \tan^{t-m}\frac{\theta}{2}\tan^{k-t}\frac{\phi}{2} \tanh^{k-n}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{t-l_0,-m-l_0}{t-m+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-l_0,-t-l_0}{k-t+1}{-\tan^2\frac{\phi}{2}}\times\\ \hypergeom{2}{1}{k+\frac{3}{2}-{\bf i}\rho,-n+\frac{3}{2}-{\bf i}\rho}{k-n+1} {\tanh^2\frac{\tau}{2}}, \quad t\geq m,\;k\geq t,\;k\geq n; \label{PPBFtanP5} \end{multline} \begin{multline} \mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho,l_0}_{mn}(\mathfrak{q})= e^{-m(\epsilon+{\bf i}\varphi+{\bf k}\varsigma)-n(\varepsilon+\omega+{\bf i}\psi-{\bf j}\chi)} \times\\ \sqrt{\frac{\Gamma(l_0-m+1)\Gamma(-\frac{1}{2}+{\bf i}\rho-n)}{\Gamma(l_0+m+1) \Gamma(-\frac{1}{2}+{\bf i}\rho+n)}} \cos^{2l_0}\frac{\theta}{2}\cos^{2l_0}\frac{\phi}{2} \cosh^{-3+2{\bf i}\rho}\frac{\tau}{2}\times\\ \sum^{l_0}_{k=-l_0}\sum^{l_0}_{t=-l_0}{\bf i}^{2t-m-k} \sqrt{\frac{\Gamma(l_0+k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho+k)} {\Gamma(l_0-k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho-k)}} \tan^{t-m}\frac{\theta}{2}\tan^{t-k}\frac{\phi}{2} \tanh^{k-n}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{t-l_0,-m-l_0}{t-m+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{t-l_0,-k-l_0}{t-k+1}{-\tan^2\frac{\phi}{2}}\times\\ \hypergeom{2}{1}{k+\frac{3}{2}-{\bf i}\rho,-n+\frac{3}{2}-{\bf i}\rho}{k-n+1} {\tanh^2\frac{\tau}{2}}, \quad t\geq m,\;t\geq k,\;k\geq n; \label{PPBFtanP6} \end{multline} \begin{multline} \mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho,l_0}_{mn}(\mathfrak{q})= e^{-m(\epsilon+{\bf i}\varphi+{\bf k}\varsigma)-n(\varepsilon+\omega+{\bf i}\psi-{\bf j}\chi)} \times\\ \sqrt{\frac{\Gamma(l_0+m+1)\Gamma(-\frac{1}{2}+{\bf i}\rho+n)}{\Gamma(l_0-m+1) \Gamma(-\frac{1}{2}+{\bf i}\rho-n)}} \cos^{2l_0}\frac{\theta}{2}\cos^{2l_0}\frac{\phi}{2} \cosh^{-3+2{\bf i}\rho}\frac{\tau}{2}\times\\ \sum^{l_0}_{k=-l_0}\sum^{l_0}_{t=-l_0}{\bf i}^{m-k} \sqrt{\frac{\Gamma(l_0-k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho-k)} {\Gamma(l_0+k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho+k)}} \tan^{m-t}\frac{\theta}{2}\tan^{t-k}\frac{\phi}{2} \tanh^{n-k}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{m-l_0,-t-l_0}{m-t+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{t-l_0,-k-l_0}{t-k+1}{-\tan^2\frac{\phi}{2}}\times\\ \hypergeom{2}{1}{n+\frac{3}{2}-{\bf i}\rho,-k+\frac{3}{2}-{\bf i}\rho}{n-k+1} {\tanh^2\frac{\tau}{2}}, \quad m\geq t,\;t\geq k,\;n\geq k; \label{PPBFtanP7} \end{multline} \begin{multline} \mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho,l_0}_{mn}(\mathfrak{q})= e^{-m(\epsilon+{\bf i}\varphi+{\bf k}\varsigma)-n(\varepsilon+\omega+{\bf i}\psi-{\bf j}\chi)} \times\\ \sqrt{\frac{\Gamma(l_0+m+1)\Gamma(-\frac{1}{2}+{\bf i}\rho+n)}{\Gamma(l_0-m+1) \Gamma(-\frac{1}{2}+{\bf i}\rho-n)}} \cos^{2l_0}\frac{\theta}{2}\cos^{2l_0}\frac{\phi}{2} \cosh^{-3+2{\bf i}\rho}\frac{\tau}{2}\times\\ \sum^{l_0}_{k=-l_0}\sum^{l_0}_{t=-l_0}{\bf i}^{m+k-2t} \sqrt{\frac{\Gamma(l_0-k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho-k)} {\Gamma(l_0+k+1)\Gamma(-\frac{1}{2}+{\bf i}\rho+k)}} \tan^{m-t}\frac{\theta}{2}\tan^{k-t}\frac{\phi}{2} \tanh^{n-k}\frac{\tau}{2}\times\\ \hypergeom{2}{1}{m-l_0,-t-l_0}{m-t+1}{-\tan^2\frac{\theta}{2}} \hypergeom{2}{1}{k-l_0,-t-l_0}{k-t+1}{-\tan^2\frac{\phi}{2}}\times\\ \hypergeom{2}{1}{n+\frac{3}{2}-{\bf i}\rho,-k+\frac{3}{2}-{\bf i}\rho}{n-k+1} {\tanh^2\frac{\tau}{2}}, \quad m\geq t,\;k\geq t,\;n\geq k. \label{PPBFtanP8} \end{multline} Spherical functions of the second type $f(\varphi^q,\theta^q)=\mathfrak{M}^m_{-\frac{3}{2}+{\bf i}\rho,l_0}(\varphi^q,\theta^q,0)$ of the principal series are defined as \[ \mathfrak{M}^m_{-\frac{3}{2}+{\bf i}\rho,l_0}(\varphi^q,\theta^q,0)=e^{-{\bf i} m\varphi^q}\mathfrak{Z}^m_{-\frac{3}{2}+{\bf i}\rho,l_0}(\cos\theta^q), \] where \[ \mathfrak{Z}_{-\frac{3}{2}+{\bf i}\rho,l_0}^{m}(\cos\theta^q)= \sum^{l_0}_{k=-l_0}\sum^{l_0}_{t=-l_0}P^{l_0}_{mt}(\cos\theta) P^{l_0}_{tk}(\cos\phi)\mathfrak{P}_{-\frac{3}{2}+{\bf i}\rho}^{k}(\cosh\tau). \] Hypergeometric-type formulae for the functions $f(\varphi^q,\theta^q)$ follow directly from (\ref{PPBFtanP1}) to (\ref{PPBFtanP8}) at $n=0$. Spherical functions of the third type $f(\epsilon,\tau,\varepsilon,\omega)= \mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho}_{mn}(\epsilon,\tau,\varepsilon,\omega)$ for the principal series representations have the form \[ \mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho}_{mn}(\epsilon,\tau,\varepsilon,\omega)= e^{-m\epsilon}\mathfrak{P}^{-\frac{3}{2}+{\bf i}\rho}_{mn}(\cosh\tau)e^{-n(\varepsilon+\omega)}. \] The hypergeometric-type formulae are \begin{multline} \mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho}_{mn}(\epsilon,\tau,\varepsilon,\omega)= e^{-m\epsilon-n(\varepsilon+\omega)}\sqrt{\frac{\Gamma({\bf i}\rho+m-\frac{1}{2}) \Gamma({\bf i}\rho-n-\frac{1}{2})}{\Gamma({\bf i}\rho-m-\frac{1}{2}) \Gamma({\bf i}\rho+n-\frac{1}{2})}}\times\\ \cosh^{-3+2{\bf i}\rho}\frac{\tau}{2}\tanh^{m-n}\frac{\tau}{2} \hypergeom{2}{1}{m-{\bf i}\rho-\frac{1}{2},-n-{\bf i}\rho-\frac{1}{2}}{m-n+1} {\tanh^2\frac{\tau}{2}},\quad m\ge n;\nonumber \end{multline} \begin{multline} \mathfrak{M}^{-\frac{3}{2}+{\bf i}\rho}_{mn}(\epsilon,\tau,\varepsilon,\omega)= e^{-m\epsilon-n(\varepsilon+\omega)}\sqrt{\frac{\Gamma({\bf i}\rho+n-\frac{1}{2}) \Gamma({\bf i}\rho-m-\frac{1}{2})}{\Gamma({\bf i}\rho-n-\frac{1}{2}) \Gamma({\bf i}\rho+m-\frac{1}{2})}}\times\\ \cosh^{-3+2{\bf i}\rho}\frac{\tau}{2}\tanh^{n-m}\frac{\tau}{2} \hypergeom{2}{1}{n-{\bf i}\rho-\frac{1}{2},-m-{\bf i}\rho-\frac{1}{2}}{n-m+1} {\tanh^2\frac{\tau}{2}},\quad n\ge m.\nonumber \end{multline} In like manner we can define conjugated spherical functions $f(\mathfrak{q})=\mathfrak{M}^{-\frac{3}{2}-{\bf i}\rho,l_0}_{\dot{m}\dot{n}}(\mathfrak{q})$, $f(\dot{\varphi}^q,\dot{\theta}^q)=\mathfrak{M}^{\dot{m}}_{-\frac{3}{2}-{\bf i}\rho,l_0} (\dot{\varphi}^q,\dot{\theta}^q,0)$ and $\dot{f}(\epsilon,\tau,\varepsilon,\omega)=\mathfrak{M}^{-\frac{3}{2}-{\bf i}\rho}_{\dot{m} \dot{n}}(\epsilon,\tau,\varepsilon,\omega)$, since a conjugated representation of $\SO_0(1,4)$ is defined by the pair $\pm(l_0,-l_1)$.
1,108,101,565,833
arxiv
\section{Introduction} \subsection{Introduction} \label{intro} A function $f :[0,1]^2 \rightarrow \mathbb{R}$ is $(c,\alpha)$-mixed H\"older if $$ |f(x^\prime,y) - f(x,y)| \le c |x^\prime - x|^\alpha, \qquad |f(x,y^\prime) - f(x,y)| \le c |y^\prime - y|^\alpha, $$ and $$ |f(x^\prime,y^\prime) - f(x,y^\prime) - f(x^\prime,y) + f(x,y)| \le c (|x^\prime-x| |y^\prime -y|)^\alpha, $$ for all $x,x^\prime,y,y^\prime \in [0,1]$. For example, if $f : [0,1]^2 \rightarrow \mathbb{R}$ satisfies $$ \left|\frac{\partial f}{\partial x} \right| \le c, \quad \left|\frac{\partial f}{\partial y} \right| \le c, \quad \text{and} \quad \left| \frac{\partial f}{\partial x \partial y} \right| \le c \quad \text{on $[0,1]^2$}, $$ then by the mean value theorem $f$ is $(c,1)$-mixed H\"older. In 1963, Smolyak \cite{Smolyak1963} discovered a surprising approximation result for mixed H\"older functions. \begin{lemma}[Smolyak] \label{smolyak} Suppose that $f:[0,1]^2 \rightarrow \mathbb{R}$ is $(c,\alpha)$-mixed H\"older. Then $$ f(x,y) = \sum_{k=0}^m f(x_k,y_{m-k}) - \sum_{k=1}^{m} f(x_{k-1},y_{m-k}) + \mathcal{O} \left(m 2^{-\alpha m} \right), $$ where $x_k$ is the center of the dyadic interval of length $2^{-k}$ that contains $x$, and $y_j$ is the center of the dyadic interval of length $2^{-j}$ that contains $y$. \end{lemma} Observe that the point $(x_k,y_j)$ is the center of a dyadic rectangle of width $2^{-k}$ and height $2^{-j}$; thus, Lemma \ref{smolyak} is a statement about approximating mixed H\"older functions by linear combinations of function values at the center of dyadic rectangles of area $2^{-m}$ and $2^{-m+1}$. We remark that Smolyak \cite{Smolyak1963} actually presented a general $d$-dimensional version of Lemma \ref{smolyak}, and that the ideas of Smolyak were expanded upon by Str\"omberg \cite{Stromberg1998}, and have been developed into a computational tool called sparse grids, see \cite{BungartzGriebel2004}. The proof of Lemma \ref{smolyak} involves a telescoping series argument and is included below; throughout, we use the notation $f \lesssim g$ when $f \le C g$ for some constant $C > 0$. \begin{proof}[Proof of Lemma \ref{smolyak}] Fix $(x,y) \in [0,1]^2$. For notational brevity set $f_k^j := f(x_k,y_j)$. First, we approximate $f(x,y)$ by the center $f_m^m$ of a $2^{-m}$ by $2^{-m}$ square. Clearly, $$ |f(x,y) - f_m^m| \lesssim 2^{-\alpha m}. $$ Expanding $f_{m}^m$ in successive telescoping series in $\{x_k\}_{k=1}^m$ and $\{y_j\}_{j=1}^m$ gives $$ f_m^m = \sum_{j=1}^m \sum_{k=1}^m \left( f_k^j - f_{k-1}^j - f_k^{j-1} + f_{k-1}^{j-1} \right) + \sum_{l=1}^m \left( f_l^0 - f_{l-1}^0 + f_0^l - f_0^{l-1} \right) + f_0^0. $$ Since $f$ is $(c,\alpha)$-mixed H\"older, it follows that the terms of the double sum satisfy $$ \left| f_k^j - f_{k-1}^j - f_k^{j-1} + f_{k-1}^{j-1} \right| \lesssim 2^{-\alpha(j + k)}. $$ Thus, we can bound the sum of terms in the double sum such that $j + k > m$ by $$ \sum_{j=1}^m \sum_{k=m-j+1}^m \left| f_k^j - f_{k-1}^j - f_k^{j-1} + f_{k-1}^{j-1} \right| \lesssim \sum_{j=1}^m \sum_{k=m-j+1}^m 2^{-\alpha(j+k)} \lesssim m 2^{-\alpha m}. $$ Removing these terms from the double sum and collapsing the telescoping series leaves only terms $f_j^k$ such that $j+k \in \{m, m-1\}$; in particular, we conclude that $$ \left| f_{m}^{m} - \left( \sum_{l=0}^m f_l^{m-l} - \sum_{l=1}^{m} f_{l-1}^{m-l} \right) \right| \lesssim m 2^{-\alpha m}, $$ which completes the proof. \end{proof} \begin{remark} The proof began by approximating $f(x,y)$ to error $\mathcal{O}(2^{-\alpha m})$ by the function value at the center of the dyadic square with side length $2^{-m}$ which contains $(x,y)$. However, it would require $2^{2 m}$ function values to approximate $f$ at every point in the unit square using this method. In contrast, the telescoping argument in the proof of Lemma \ref{smolyak} achieves an approximation error of $\mathcal{O}(m 2^{-\alpha m})$ while only using function values at the center of dyadic rectangles of area $2^{-m}$ and $2^{-m+1}$; the total number of such rectangles is $(m+1) 2^m + m 2^{m-1}$. \end{remark} \subsection{Main result} Informally speaking, Lemma \ref{smolyak} says that if we are given a specific set of $\sim n \ln n$ samples of a $(c,\alpha)$-mixed H\"older function $f : [0,1]^2 \rightarrow \mathbb{R}$, then we are able to compute an approximation $\tilde{f}$ of $f$ such that $$ \|f - \tilde{f}\|_{L^\infty} = \mathcal{O}(n^{-\alpha} \log n), \quad \text{and} \quad \|f - \tilde{f}\|_{L^2} = \mathcal{O}(n^{-\alpha} \log n), $$ where the $L^2$-norm estimate follows directly from the $L^\infty$-norm estimate. Our main result relaxes the sampling requirement to $\sim n \log^2 n$ random samples and achieves the same $L^2$-norm error estimate up to $\log$ factors. \begin{theorem} \label{thm1} Suppose $f : [0,1]^2 \rightarrow \mathbb{R}$ is a $(c,\alpha)$-mixed H\"older function that we sample at $l$ points $X_1,\ldots,X_l$ chosen uniformly at random from the unit square. Let the location of these points and the function values $f(X_1),\ldots,f(X_l)$ be given. If $l \ge c_1 n \log^2 n$, then we can compute an approximation $\tilde{f}$ such that $$ \|f - \tilde{f} \|_{L^2} = \mathcal{O}(n^{-\alpha} \log^{3/2} n), $$ with probability at least $1 - n^{2-c_1}$, where the implicit constant only depends on the constants $c > 0$ and $c_1 > 0$. \end{theorem} When $\alpha > 1/2$ the theorem implies that we can integrate mixed H\"older functions on the unit square with an error rate that is better than the Monte Carlo rate of $\mathcal{O}(n^{-1/2})$ with high probability. \begin{corollary} \label{cor1} Under the assumptions of Theorem \ref{thm1}, if $l \ge c_1 n \log^2 n$, then we can compute an approximation $I$ of the integral of $f$ on $[0,1]^2$ such that $$ \int_{[0,1]^2} f(x) dx = I + \mathcal{O}(n^{-\alpha} \log^{3/2} n), $$ with probability at least $1 - n^{2 -c_1}$. \end{corollary} The proof of this corollary follows immediately from the $L^2$-norm estimate from Theorem \ref{thm1} and the Cauchy Schwarz inequality. \begin{remark} \label{rmk1} The computational cost of computing $\tilde{f}$ is $\mathcal{O}(n \log^3 n)$ operations of pre-computation, and then $\mathcal{O}(\log n)$ operations for each point evaluation. Furthermore, after pre-computation we can compute the integral of $\tilde{f}$ on the unit square in $\mathcal{O}(n)$ operations. The construction of $\tilde{f}$ is described in \S \ref{proofmain}. \end{remark} \begin{remark} \label{spin} An advantage of using random samples and Theorem \ref{thm1} to approximate a mixed H\"older function over using samples at the center of dyadic rectangles and Lemma \ref{smolyak} is the ability to perform spin cycling. For simplicity of exposition, assume that $f : [0,1]^2 \rightarrow \mathbb{R}$ is a mixed H\"older function on the torus. Let $X_1,\ldots,X_l$ be chosen uniformly at random from $[0,1]^2$, and let the function values $f(X_1),\ldots,f(X_l)$ be given. By Theorem \ref{thm1} we can compute an approximation $\tilde{f}$ of the function $f$; however, as described in \S \ref{proofmain} the computation of $\tilde{f}$ is dependent on the dyadic decomposition of $[0,1]^2$, and this dependence will create artifacts. We call the following method of removing these artifacts spin cycling. Let $\zeta \in [0,1]^2$ be given, and define $f_\zeta(x) = f(x - \zeta)$ where addition is performed on the torus. By considering the function values $f(X_1),\ldots,f(X_l)$ as values of the function $f_\zeta$ at the uniformly random sample of points $X_1+\zeta,\ldots,X_l+\zeta$, we can use Theorem \ref{thm1} to compute an approximation $\tilde{f}_\zeta$ of the function $f_\zeta$. It follows that $\tilde{f}_\zeta(x + \zeta)$ is an approximation of $f$ with the same accuracy guarantees as $\tilde{f}$. However, the shift $\zeta$ has changed the relation of the function values to the dyadic decomposition of $[0,1]^2$, and thus has changed the resulting artifacts. In general, we can consider a sequence of shifts $\zeta_1,\ldots,\zeta_q \in [0,1]^2$ and define $$ \bar{f}(x) = \frac{1}{q} \sum_{k=1}^q \tilde{f}_{\zeta_k}(x + \zeta_k) \quad \text{for} \quad x \in [0,1]^2, $$ where $\tilde{f}_{\zeta_j}$ is the approximation of the function $f_{\zeta_j}$ computed via Theorem \ref{thm1} using the shift operation described above. We say that $\bar{f}$ is an approximation via Theorem \ref{thm1} with $q$ spin cycles. In \S \ref{example} we provide empirical evidence that spin cycling removes artifacts. We note that when $l \ge c_1 n \log^2 n$ and $c_1 > 2 + \log(q)/\log(n)$, it follows that the accuracy claims of Theorem \ref{thm1} hold for all function $\tilde{f}_{\zeta_j}$ for $j = 1,\ldots,q$ with high probability. The assumption that $f$ is mixed H\"older on the torus can be relaxed by handling the boundaries appropriately. We emphasize that spin cycling is not possible when using a fixed sample of points at the center of dyadic rectangles and Lemma \ref{smolyak} as any shift moves the points away from the center of dyadic rectangles, which is prohibitive for using Lemma \ref{smolyak}. \end{remark} \section{Preliminaries} \subsection{Notation} Let $\mathcal{D}$ denote the set of dyadic intervals in $[0,1]$; more precisely, $$ \mathcal{D} := \left\{ \left[ (j-1)2^{-k}, j 2^{-k} \right) \subset \mathbb{R} : k \in \mathbb{Z}_{\ge 0} \wedge j \in \{1,\ldots,2^k\} \right\}. $$ We say that $R = I \times J$ is a dyadic rectangle in the unit square if $I,J \in \mathcal{D}$. The number of dyadic rectangles in the unit square of area $2^{-m}$ is $$ (m+1) 2^m = \# \left\{ R = I \times J : |R| = 2^{-m} \wedge I,J \in \mathcal{D} \right\}. $$ In particular, for each $k = 0,\ldots,m$ there are $2^m$ distinct dyadic rectangles of width $2^{-k}$ and height $2^{m-k}$, which are disjoint and cover the unit square. We illustrate the dyadic rectangles in the unit square of area at least $2^{-3}$ in Figure \ref{fig1}. \begin{figure}[h!] \includegraphics[width=.4\textwidth]{fig01-eps-converted-to.pdf} \caption{The dyadic rectangles of area at least $2^{-3}$ in the unit square.} \label{fig1} \end{figure} Recall that Lemma \ref{smolyak} approximates the value $f(x)$ of a mixed H\"older function by a linear combination of the function values at the centers of dyadic rectangles of area $2^{-m}$ and $2^{-m+1}$ that contain the point $x$. Thus, with respect to the illustration in Figure \ref{fig1}, the approximation formula of Lemma \ref{smolyak} consists of adding the function values at the center of the dyadic rectangles in the lowest row which contain $x$, and subtracting the function values at the center of the dyadic rectangles in the second lowest row which contain $x$. \subsection{Randomized Kaczmarz} In addition to properties of dyadic rectangles, we will use a result of Strohmer and Vershynin \cite{StrohmerVershynin2009} regarding the convergence of a randomized Kaczmarz algorithm. Specifically, Strohmer and Vershynin show that a specific randomized Kaczmarz algorithm converges exponentially fast at a rate that only depends on how well the matrix is conditioned. The following lemma is a special case of their result, which will be sufficient for our purposes. \begin{lemma}[Strohmer, Vershynin] \label{randomkaczmarz} Let $A$ be an $N \times n$ matrix where $N \ge n$ whose rows are of equal magnitude, and let $A w = b$ be a consistent linear system of equations. Suppose that $l$ indices $I_1,\ldots,I_l$ are chosen uniformly at random from $\{1,\ldots,N\}$. Let an initial guess at the solution $v_0$ be given. For $k = 1,\ldots,l$ define $$ v_k := v_{k-1} + \frac{b_{I_k} -\langle a_{I_k}, v_{k-1} \rangle }{\|a_{I_k}\|_{\ell^2}^2} a_{I_k}, $$ where $a_j$ denotes the $j$-th row of $A$, and $b_j$ denotes the $j$-th entry of $b$. Then $$ \mathbb{E} \|v_k-w\|^2_{\ell^2} \le (1 - \kappa^{-2})^k \|v_0 - w\|_{\ell^2}^2, $$ for $k = 1,\ldots,l$, where $\kappa^2 := \sum_{j=1}^n \sigma_j^2/\sigma_n^2$ and $\sigma_1,\ldots,\sigma_n$ are the singular values of $A$. \end{lemma} The rate of convergence of the algorithm is determined by the constant $\kappa$, which only depends on the singular values of the matrix $A$. This constant $\kappa$ can be viewed as a type of condition number for the matrix $A$, and can be equivalently defined as the Frobenius norm of $A$ multiplied by the operator norm of the left inverse of $A$. We remark that the convergence of the randomized Kaczmarz algorithm for inconsistent linear systems $A w \approx b + \varepsilon$ is analyzed by Needell \cite{Needell2010}. In the proof of the main result we use Lemma \ref{randomkaczmarz} in combination with a modified version of the error analysis in \cite{Needell2010}. \subsection{Organization} The remainder of the paper consists of the proof of Theorem \ref{thm1} in \S \ref{proofmain} followed by discussion in \S \ref{discussion}. The proof of Theorem \ref{thm1} is organized as follows. In \S \ref{embed}, we define an embedding of the points in the unit square into a larger finite dimensional vector space. In \S \ref{martingale}, we show that inner products of vectors with the defined embedding coordinates have a martingale interpretation. In \S \ref{functional}, we show that mixed H\"older functions can be approximated by linear functionals in the embedding coordinates. In \S \ref{random}, we show that the randomized Kaczmarz algorithm can be used to solve a specifically constructed system. In \S \ref{complete}, we use the developed tools to complete the proof of Theorem \ref{thm1}. Finally, in \S \ref{proofrmk} we prove the computational cost claims of Remark \ref{rmk1}. \section{Proof of Theorem \ref{thm1}} \label{proofmain} \subsection{Embedding points} \label{embed} Recall, that there are $m 2^{m-1}$ dyadic rectangles of area $2^{-m+1}$ in the unit square $[0,1]^2$. Let $$ T_1,\ldots,T_{m 2^{m-1}} $$ be an enumeration of these rectangles such that the rectangles $$ T_{k 2^{m-1}+1},\ldots,T_{(k+1) 2^{m-1}} $$ have width $2^{-k}$ and height $2^{k-m+1}$ for $k = 0,\ldots,m-1$. Let $T^+_j$ and $T^-_j$ denote the left and right halves of $T_j$, respectively. Furthermore, let $$ R_1,\ldots,R_{2^m} $$ be an enumeration of the dyadic rectangles of width $1$ and height $2^{-m}$. \begin{definition} \label{def1} We define an embedding $\Psi : [0,1]^2 \rightarrow \mathbb{R}^{(m+2) 2^{m-1}}$ entry-wise by $$ \Psi_j(x) = \chi_{R_j}(x) \quad \text{for} \quad j = 1,\ldots,2^{m}, $$ and $$ \Psi_{2^m +j}(x) = \frac{1}{\sqrt{2}}\left( \chi_{T_j^+}(x) - \chi_{T_j^-}(x) \right), \quad \text{for} \quad j = 1,\ldots,m 2^{m-1}, $$ where $\chi_{R}$ denotes the indicator function for the rectangle $R$. \end{definition} Fix $x \in [0,1]^2$, and let $\beta_0$ be the index of the dyadic rectangle $R_{\beta_0}$ of width $1$ and height $2^{-m}$ that contains $x$. Then, for $k=1,\ldots,m$, let $\beta_k - 2^m$ be the index of the dyadic rectangle $T_{\beta_k - 2^m}$ of width $2^{-k+1}$ and height $2^{k-m}$ that contains $x$. Set $\xi_0 := \Psi_{\beta_0}(x) = 1$, and $$ \xi_k := \Psi_{\beta_k}(x) = \frac{1}{\sqrt{2}} \left( \chi_{T^+_{\beta_k - 2^m}}(x) - \chi_{T^-_{\beta_k - 2^m}}(x) \right), \quad \text{for} \quad k = 1,\ldots,m, $$ such that $\xi_k$ is $+1/\sqrt{2}$ or $-1/\sqrt{2}$ depending on if $x$ is contained in the left or right half of $T_{\beta_k-2^m}$, respectively. Then, if $v \in \mathbb{R}^{(m+2)2^{m-1}}$ we have $$ \langle \Psi(x), v \rangle = \sum_{k=0}^m \xi_k v_{\beta_k}, $$ where $\langle \cdot , \cdot \rangle$ denotes the $(m+2)2^{m-1}$-dimensional Euclidean inner product. In the following section we show that partial sums of this inner product can be interpreted as martingales. \subsection{Martingale interpretation} \label{martingale} Suppose that $x \in [0,1]^2$ is chosen uniformly at random, and let the indices $\beta_0,\ldots,\beta_m$ and the scalars $\xi_0,\ldots,\xi_m$ be defined as above. Let $v \in \mathbb{R}^{(m+2) 2^{m-1}}$ be a fixed unit vector. We define the partial sum $Y_r$ by $$ Y_r = \sum_{k=0}^{r} \xi_k v_{\beta_k}, \quad \text{for} \quad r = 0,\ldots,m. $$ We assert that $\{Y_r\}_{k=0}^m$ is a martingale with respect to $\{\beta_0,\xi_1,\ldots,\xi_m\}$, that is, $$ \mathbb{E} \left( Y_{k+1} \big| \beta_0,\xi_1,\ldots,\xi_k \right) = Y_k, \quad \text{for} \quad k = 0,\ldots,m-1. $$ Indeed, this martingale property can be seen by interpreting the partial sums from a geometric perspective. Recall that $\beta_0$ determines the dyadic rectangle $R_{\beta_0}$ of width $1$ and height $2^{-m}$ that contains $x$. Therefore, $\beta_0$ determines the dyadic rectangle $T_{\beta_1 - 2^m}$ of width $1$ and height $2^{-m+1}$ that contains $x$. However, $\beta_0$ provides no information about $\xi_1 = \pm 1/\sqrt{2}$, which is positive or negative depending on if the point $x$ is in the left or right side of $T_{\beta_1-2^m}$, respectively. It follows that $$ \mathbb{E} \left( Y_1 \big| \beta_0 \right) = \frac{1}{2} \left( v_{\beta_0} + \frac{1}{\sqrt{2}} v_{\beta_1} \right) + \frac{1}{2} \left( v_{\beta_0} - \frac{1}{\sqrt{2}} v_{\beta_1} \right) = Y_0. $$ More generally, $\beta_k$ and $\xi_k$ determine $\beta_{k+1}$ since together $\beta_k$ and $\xi_k$ determine the dyadic rectangle $T_{\beta_k - 2^m}^{\operatorname{sgn} \xi_k}$ of width $2^{-k}$ and height $2^{m-k}$, which contains $x$. This, in turn, determines the rectangle $T_{\beta_{k+1}-2^m}$ of width $2^{-k}$ and height $2^{k-m+1}$ which contains $x$, but provides no information about which side (left or right) of this rectangle the point $x$ is contained in, that is to say, no information about $\xi_{k+1}$. Hence $$ \mathbb{E} \left( Y_{k+1} \big| \beta_0,\xi_1,\ldots,\xi_k \right) = \frac{1}{2} \left( Y_k + \frac{1}{\sqrt{2}} v_{\beta_{k+1}} \right) + \frac{1}{2} \left( Y_k - \frac{1}{\sqrt{2}} v_{\beta_{k+1}} \right) = Y_k. $$ This martingale property of the partial sums has several useful consequences. \begin{lemma} \label{mart} Suppose that $X$ is chosen uniformly at random from the unit square, and set $Y = \Psi(X)$. Let $v \in \mathbb{R}^{(m+2) 2^{m-1}}$ be a fixed vector of unit length. Then, $$ \mathbb{E} |\langle Y, v \rangle|^2 = 2^{-m}. $$ \end{lemma} \begin{proof} Let $\beta_0,\ldots,\beta_m$ and $\xi_0,\ldots,\xi_m$ be as defined above such that $$ \mathbb{E} |\langle Y, v \rangle|^2 = \mathbb{E} \left| \sum_{k=0}^m \xi_k v_{\beta_k} \right|^2 = \sum_{k_1,k_2=0}^m \mathbb{E} \xi_{k_1} \xi_{k_2} v_{\beta_{k_1}} v_{\beta_{k_2}}. $$ If $k_1 > k_2$, then $\xi_{k_2}$ and $\beta_{k_2}$ are determined by $\beta_0,\xi_1,\ldots,\xi_{{k_1}-1}$; we conclude that $$ \mathbb{E} \left( \xi_{k_1} \xi_{k_2} v_{\beta_{k_1}} v_{\beta_{k_2}} \right) = \mathbb{E} \left( \xi_{k_2} v_{\beta_{k_2}} \mathbb{E} \left( \xi_{k_1} v_{\beta_{k_1}} \big| \beta_0,\xi_1,\ldots,\xi_{k_1-1} \right) \right) = 0, $$ where the finally equality follows from the fact that the expected value of $\xi_{k_1} v_{\beta_{k_1}}$ conditional on $\beta_0,\xi_1,\ldots,\xi_{k_1-1}$ is zero by the above described martingale property. An identical argument holds for the case when $k_1 < k_2$ so it follows that $$ \sum_{k_1,k_2=0}^m \mathbb{E} \xi_{k_1} \xi_{k_2} v_{\beta_{k_1}} v_{\beta_{k_2}} = \sum_{k=0}^m \mathbb{E} \xi_{k}^2 v_{\beta_k}^2 = \mathbb{E} v_{\beta_0}^2 + \frac{1}{2} \sum_{k=1}^m \mathbb{E} v_{\beta_k}^2. $$ We can compute this expectation explicitly by noting that the probability that $x$ is contained a given dyadic rectangle is proportional to its area; specifically, we have $$ \mathbb{E} v_{\beta_0}^2 + \frac{1}{2} \sum_{k=1}^m \mathbb{E} v_{\beta_k}^2 = \frac{1}{2^m} \sum_{j=1}^{2^m} v_j^2 + \frac{1}{2} \frac{1}{2^{m-1}} \sum_{k=1}^m \sum_{j=1}^{2^{m-1}} v_{(k+1)2^{m-1} + j}^2 = 2^{-m}, $$ where the final equality follows from collecting terms and using the assumption that $v$ is a unit vector in $\mathbb{R}^{(m+2)2^{m-1}}$. \end{proof} Since embedding $\Psi : [0,1]^2 \rightarrow \mathbb{R}^{(m+2)2^{m-1}}$ is defined using indicator functions of dyadic rectangles of area $2^{-m}$ in $[0,1]^2$, it follows that $\Psi$ is constant on $2^{-m}$ by $2^{-m}$ dyadic squares since all points in such a square are contained in the same collection of dyadic rectangles of area $2^{-m}$ in $[0,1]^2$. This observation leads the following corollary of Lemma \ref{mart}. \begin{corollary} \label{corsing} Let $x_1,\ldots,x_{2^{2m}}$ be a sequence of points such that each $2^{-m}$ by $2^{-m}$ dyadic square contains exactly one point. Let $A$ be the $2^{2m} \times (m+2) 2^{m-1}$ matrix whose $j$-th row is $\Psi(x_j)$. Then, $$ \sigma_1 = \cdots = \sigma_{(m+2)2^{m-1}} = 2^{m/2}, $$ where $\sigma_1,\ldots,\sigma_{(m+2)2^{m-1}}$ are the singular values of $A$. \end{corollary} \begin{proof} Let $v \in \mathbb{R}^{(m+2)2^{m-1}}$ be an arbitrary unit vector. We have $$ \|A v\|_{\ell^2}^2 = \sum_{j=1}^{2^{2m}} |\langle \Psi(x_j), v \rangle|^2. $$ However, since all points in each $2^{-m}$ by $2^{-m}$ dyadic square have the same embedding, and since the measures of all such dyadic squares are equal, we have $$ \sum_{j=1}^{2^{2m}} |\langle \Psi(x_j), v \rangle|^2 = 2^{2m} \mathbb{E} | \langle Y, v \rangle|^2, $$ where $Y := \Psi(X)$ for a point $X$ chosen uniformly at random from the unit square. By Lemma \ref{mart} we conclude that $$ \|A v \|_{\ell^2}^2 = 2^{2m} \mathbb{E} | \langle Y, v \rangle|^2 = 2^m, $$ and since $v$ was an arbitrary unit vector the proof is complete. \end{proof} \subsection{Approximation by linear functionals} \label{functional} So far we have constructed an embedding $\Psi : [0,1]^2 \rightarrow \mathbb{R}^{(m+2)2^{m-1}}$, and we have shown that inner products of the form $\langle \Psi(X), v \rangle$ are related to martingales. We have used this relation to show that the collection of all possible embedding vectors form a matrix whose singular values are all $2^{m/2}$. Next, we show that a mixed H\"older function can be approximated by a linear functional in the embedding coordinates. \begin{lemma} \label{lem2} Let $f : [0,1]^2 \rightarrow \mathbb{R}$ be a $(c,\alpha)$-mixed H\"older function. Then, there exists a vector $w \in \mathbb{R}^{(m+2)2^{m-1}}$ such that $$ f(x) = \langle \Psi(x), w \rangle + \mathcal{O}(m 2^{-\alpha m}), \quad \text{for all} \quad x \in [0,1]^2, $$ where the vector $w$ depends on $f$, but is independent of $x$ and is explicitly defined below in Definition \ref{def2}. \end{lemma} We construct the vector $w$ using a scheme similar to the construction of Haar wavelets. Let $\mathcal{D}_k^j$ be the collection of dyadic rectangles contained in the unit square of width $2^{-k}$ and area $2^{-j}$. For a given dyadic rectangle $R$, we define $s_r(R)$ by $$ s_r(R) := \sum_{R^\prime \in \mathcal{D}_{r}^{m} : |R \cap R^\prime| > 0} f(c_{R^\prime}) - \sum_{R^\prime \in \mathcal{D}_{r}^{m-1} : |R \cap R^\prime| > 0} f(c_{R^\prime}), $$ where $c_{R^\prime}$ is the center of $R^\prime$. Observe that the first sum in the definition of $s_r(R)$ is over the dyadic rectangles of width $2^{-r}$ and area $2^{-m}$ that intersect $R$, while the second sum is over the dyadic rectangles of width $2^{-r}$ and area $2^{-m+1}$ that intersect $R$. \begin{definition} \label{def2} We define the vector $w \in \mathbb{R}^{(m+2)2^{m-1}}$ entry-wise by $$ w_j = \sum_{r=0}^m 2^{-r} s_r \left(R_j \right) \quad \text{for} \quad j = 1,\ldots,2^m, $$ and $$ w_{2^m+j} = \frac{2^{k_j}}{\sqrt{2}}\sum_{r= k_j}^m 2^{-r}\left( s_r \left( T_{j}^+ \right) - s_r\left( T_{j}^- \right) \right), \quad \text{for} \quad j = 1,\ldots,m 2^{m-1}, $$ where $k_j :=\lfloor j 2^{-k} \rfloor$ is such that $2^{-k_j}$ is the width of the rectangle $T_{j}$. \end{definition} \begin{proof}[Proof of Lemma \ref{lem2}] Let $x$ be a fixed point in the unit square $[0,1]^2$. Recall that we can express the inner product $$ \langle \Psi(x), w \rangle = \sum_{k=0}^m \xi_k w_{\beta_k}, $$ where the scalars $\xi_0,\ldots,\xi_m$ and the indicies $\beta_0,\ldots,\beta_m$ are as defined above. First, let us rewrite Lemma \ref{smolyak} using this notation. We have $$ f(x) = f\left( c_{R_{\beta_0}} \right) + \sum_{k=1}^m f \left( c_{T_{\beta_k - 2^m}^{\operatorname{sgn} \xi_k}} \right) - \sum_{k=1}^m f \left( c_{T_{\beta_k - 2^m}} \right) + \mathcal{O}(m 2^{-\alpha m}). $$ Indeed, by definition $R_{\beta_0}$ is the dyadic rectangle of width $1$ and height $2^{-m}$ that contains $x$, $T_{\beta_k-2^m}^{\operatorname{sgn} \xi_k}$ is the dyadic rectangle of width $2^{-k}$ and height $2^{m-k}$ that contains $x$, and $T_{\beta_k-2^m}$ is the dyadic rectangle of width $2^{-k+1}$ and height $2^{m-k}$ that contains $x$. Thus, to complete the proof it suffices to show that $$ \sum_{k=0}^m \xi_k w_{\beta_k} = f\left( c_{R_{\beta_0}} \right) + \sum_{k=1}^m f \left( c_{T_{\beta_k - 2^m}^{\operatorname{sgn} \xi_k}} \right) - \sum_{k=1}^m f \left( c_{T_{\beta_k - 2^m}} \right). $$ Let us start by considering the terms $\xi_0 w_{\beta_0},\ldots,\xi_m w_{\beta_m}$ of the summation expression for the inner product $\langle \Psi(x), w \rangle$. By the defintion of $w$ we have $$ \xi_0 w_{\beta_0} = s_0(R_{\beta_0}) + \sum_{r=1}^m 2^{-r+1} \frac{ s_r(T_{\beta_1}^+) + s_r(T_{\beta_1}^-)}{2}, $$ and $$ \xi_k w_{\beta_k} = \sum_{r= k}^m 2^{-r+k} \operatorname{sgn} \xi_k \frac{s_r \left( T_{\beta_k-2^m}^+ \right) - s_r\left( T_{\beta_k-2^m}^- \right)}{2}. $$ for $k = 1,\ldots,m$. We assert that if we start summing at $r = k +1$ we have $$ \sum_{r=k+1}^m 2^{-r +k} s_r \left( T_{\beta_k-2^m}^{\operatorname{sgn} \xi_k} \right) = \sum_{r=k+1}^m 2^{-r +k + 1} \frac{ s_r \left( T_{\beta_{k}-2^m}^{+} \right)+ s_r \left( T_{\beta_{k}-2^m}^{-} \right)}{2}. $$ Indeed, observe that $T_{\beta_k-2^m}^{\operatorname{sgn} \xi_k}$ is the dyadic rectangle of width $2^{-k}$ and height $2^{m-k}$ that contains $x$. We have that $$ T_{\beta_k - 2^m}^{\operatorname{sgn} \xi_k} \subset T_{\beta_{k+1} - 2^m}^{+} \cup T_{\beta_{k+1}-2^m}^- = T_{\beta_{k+1} - 2^m}, $$ since $T_{\beta_{k+1} - 2^m}$ is the dyadic rectangle of width $2^{-k}$ and height $2^{k-m+1}$ that contains $x$. However, when $r \ge k+1$ we are summing of dyadic rectangles of height at least $2^{m-k+1}$, and any dyadic rectangle of height at least $2^{k-m+1}$ that intersects $T_{\beta_{k+1} - 2^m}$ must also intersect $T_{\beta_{k}-2^m}^{\operatorname{sgn} \xi_k}$ so we conclude the above equality. By applying the identity iteratively as we add each term $\xi_k w_{\beta_k}$ we conclude that $$ \sum_{k=0}^m \xi_k w_{\beta_k} = s_0(R_{\beta_0}) + \sum_{k=1}^m s_r \left(T_{\beta_k - 2^m}^{\operatorname{sgn} \xi_k} \right). $$ Next, we observe that $$ s_0(R_{\beta_0}) = f \left(c_{R_{\beta_0}} \right) - f \left( c_{T_{\beta_1 - 2^m}} \right), $$ and that for $k = 1,\ldots,m-1$. $$ s_k \left(T_{\beta_k - 2^m}^{\operatorname{sgn} \xi_k} \right) = f \left(c_{T_{\beta_k - 2^m}^{\operatorname{sgn} \xi_k}} \right) - f \left(c_{T_{\beta_{k+1} - 2^m}} \right). $$ However, observe that when $r = m$ we have $$ s_m(R) := \sum_{R^\prime \in \mathcal{D}_{m}^{m} : |R \cap R^\prime| > 0} f(c_{R^\prime}) - \sum_{R^\prime \in \mathcal{D}_{m}^{m-1} : |R \cap R^\prime| > 0} f(c_{R^\prime}), $$ and there are no rectangles in the set $\mathcal{D}_m^{m-1}$, which is the set of rectangles of width $2^{-m}$ and area $2^{-m+1}$ that are contained in the unit square; indeed, such a rectangle would need to have height $2$, which is prohibitive. We conclude that $$ s_m\left( T_{\beta_m - 2^m}^{\operatorname{sgn} \xi_m} \right) = f \left( c_{T_{\beta_m - 2^m}^{\operatorname{sgn} \xi_m}} \right), $$ Recall that we have already shown that $$ \sum_{k=0}^m \xi_k w_{\beta_k} = s_0(R_{\beta_0}) + \sum_{k=1}^m s_k \left(T_{\beta_k -2^m}^{\operatorname{sgn} \xi_k} \right); $$ substituting in the derived expressions for $s_0(R_{\beta_0})$ and $s_k(T_{\beta_k}^{\operatorname{sgn} \xi_k})$ gives $$ \sum_{k=0}^m \xi_k w_{\beta_k} = f\left( c_{R_{\beta_0}} \right) + \sum_{k=1}^m f \left( c_{T_{\beta_k - 2^m}^{\operatorname{sgn} \xi_k}} \right) - \sum_{k=1}^m f \left( c_{T_{\beta_k - 2^m}} \right), $$ which completes the proof. \end{proof} \subsection{Random projections} \label{random} We have established that in the embedding coordinates $\Psi(x)$ of a point $x \in [0,1]^2$ that Smolyak's Lemma can be rephrased as a result about approximating mixed H\"older functions by linear functionals. Moreover, using the martingale interpretation of inner products of vectors with $\Psi(x)$ we were able to explicitly compute the singular values of the matrix of all possible embedding vectors. In the following we combine these ideas using the randomized Kaczmarz algorithm of Strohmer and Vershynin \cite{StrohmerVershynin2009}. Suppose that $x_1,\ldots,x_{2^{2m}}$ is a sequence of points that contains exactly one point in each $2^{-m}$ by $2^{-m}$ dyadic square in $[0,1]^2$. Let $A$ be the $2^{2m} \times (m+2)2^{m-1}$ dimensional matrix whose $j$-th row is $\Psi(x_j)$. Since the embedding $\Psi(x)$ has $1$ entry of magnitude $1$ and $m$ entries of magnitude $1/\sqrt{2}$, see Definition \ref{def1}, we have $$ \|\Psi(x) \|_{\ell^2} = \sqrt{1+m/2}, $$ for all $x \in [0,1]^2$, and it follows that all of the rows of $A$ have equal magnitude. Suppose that $f : [0,1]^2 \rightarrow \mathbb{R}$ is a $(c,\alpha)$-mixed H\"older function. By Lemma \ref{lem2}, there exists a vector $w$ such that $$ | f(x) - \langle \Psi(x), w \rangle | \lesssim m 2^{-\alpha m}. $$ Define $$ \bar{f}(x) := \langle \Psi(x), w \rangle, $$ for all $x \in [0,1]^2$. If $b$ is the $2^{2m}$-dimensional vector whose $j$-th entry is $\bar{f}(x_j)$, then we have a consistent linear system of equations $$ A w = b. $$ By Corollary \ref{corsing} the condition number $\kappa^2$ of $A$ satisfies $$ \kappa^2 := \sum_{j=1}^{(m+2)2^{m-1}} \sigma_j^2/\sigma_{(m+2)2^{m-1}}^2 = (m+2) 2^{m-1}, $$ where $\sigma_1,\ldots,\sigma_{(m+2)2^{m-1}}$ are the singular values of $A$. Observe that sampling points uniformly at random from $[0,1]^2$ and applying the embedding $\Psi$ is equivalent to choosing rows uniformly at random from $A$. Thus, the following result is a direct consequence of applying Lemma \ref{randomkaczmarz} to the consistent linear system of equations $A w = b$ that we constructed above. \begin{lemma} \label{lemrand} Suppose that $l$ points $X_1,\ldots,X_l$ are sampled uniformly at random from $[0,1]^2$. Given an initial vector $v_0 \in \mathbb{R}^{(m+2)2^{m-1}}$, define $$ v_k := v_{k-1} + \frac{\bar{f}(X_k) - \langle \Psi(X_k), v_{k-1} \rangle}{1+m/2} \Psi(X_k). $$ Then, $$ \mathbb{E} \|v_k - w\|^2_{\ell^2} \le \left(1 - \frac{1}{(m+2) 2^{m-1}} \right)^k \|v_0 -w \|^2_{\ell^2}, $$ for $k = 1,\ldots,l$. \end{lemma} Note that $v_k$ is defined using $\bar{f}(x)$ rather than $f(x)$ so that the definition of $v_k$ corresponds to running the randomized Kaczmarz algorithm on the consistent linear system $A w = b$. When we complete the proof of Theorem \ref{thm1} in the following section, we estimate the error caused by replacing $\bar{f}(x)$ by $f(x) = \bar{f}(x) + \mathcal{O}(m 2^{-\alpha m})$. We remark that the expected error for the randomized Kaczmarz algorithm for inconsistent linear systems is analyzed by Needell \cite{Needell2010}. Since we need an error estimate that holds with high probability we perform a modified version of the error analysis of Needell. \subsection{Proof of Theorem \ref{thm1}} \label{complete} In this section, we combine the developed tools to complete the proof of Theorem \ref{thm1}. \begin{proof}[Proof of Theorem \ref{thm1}] Suppose that $f : [0,1]^2 \rightarrow \mathbb{R}$ is a $(c,\alpha)$-mixed H\"older function that is sampled at $l$ points $X_1,\ldots,X_l$ chosen uniformly at random from $[0,1]^2$. For some initial vector $v_0^* \in \mathbb{R}^{(m+2)2^{m-1}}$, define $$ v_k^* := v_{k-1}^* + \frac{f(X_k) - \langle \Psi(X_k), v_{k-1}^* \rangle}{1+m/2} \Psi(X_k), $$ for $k = 1,\ldots,l$. Recall that by Lemma \ref{lem2} there exists a vector $w$ such that $$ |f(x) - \langle \Psi(x), w \rangle | \lesssim m 2^{-\alpha m}, $$ and recall that $\bar{f}(x) := \langle \Psi(x), w \rangle$. Suppose that $\varepsilon_k := f(X_k) - \bar{f}(X_k)$. We can write $$ v_k^* = v_k + e_k, $$ where $v_k$ is the vector defined in Lemma \ref{lemrand} by $$ v_k := v_{k-1} + \frac{\bar{f}(X_k) - \langle \Psi(X_k), v_{k-1} \rangle}{1+m/2} \Psi(X_k), $$ and $e_k$ is an error term defined by $$ e_k := e_{k-1} + \frac{\varepsilon_k - \langle \Psi(X_k), e_{k-1} \rangle}{1+m/2} \Psi(X_k). $$ By orthogonality we have $$ \|e_k\|_{\ell^2}^2 = \left\|e_{k-1} - \frac{\langle \Psi(X_k),e_{k-1} \rangle}{1+m/2} \Psi(X_k) \right\|^2_{\ell^2} + \frac{\varepsilon_k^2}{(1+m/2)^2} \| \Psi(X_k)\|^2_{\ell^2}. $$ It follows that $$ \|e_k\|_{\ell^2}^2 \le \|e_{k-1}\|_{\ell^2}^2 + \frac{\varepsilon_k^2}{1+m/2} \lesssim k m 2^{-2 \alpha m}. $$ By the triangle inequality we have $$ \|v^*_k - w\|_{\ell^2} \lesssim \|v_k - w\|_{\ell^2} + \sqrt{k m} 2^{-\alpha m}. $$ Next, we estimate $\|v_k - w\|_{\ell^2}$. From Lemma \ref{lemrand} we have $$ \mathbb{E} \|v_k - w\|^2_{\ell^2} \le \left(1 - \frac{1}{(m+2) 2^{m-1}} \right)^k \|v_0 -w \|^2_{\ell^2}. $$ Thus, if $l \ge c_1 \log(2^m) (m+2) 2^{m-1}$, then we have $$ \mathbb{E} \|v_l - w\|^2_{\ell^2} \le 2^{-c_1 m} \|v_0 - w\|_{\ell^2}^2. $$ By the possibility of considering the function $f - f(X_1)$ instead of $f$, we may assume that $|f| \le 2c$ on $[0,1]^2$. It follows that $\|w\|_{\ell^\infty} \le 3c$ when $m$ is large enough. Therefore, if we initialize $v_0$ as the zero vector we have $$ \|v_0 - w\|_{\ell^2}^2 \le 9c^2 (m+2) 2^{m-1}. $$ From this estimate and our above analysis it follows that $$ \mathbb{E} \|v_l - w\|^2_{\ell^2} \le \frac{9 c^2}{2} (m+2) 2^{(1-c_1) m}, $$ when $l \ge c_1 \log(2^m) (m+2) 2^{m-1}$. Observe that $$ l = c_1 \log(2^m) (m+2) 2^{m-1} \le c_1 \log^2(2^m) 2^m, $$ when $m$ is sufficiently large. Thus, if $l \ge c_1 \log^2(2^m) 2^m$, then by Markov's inequality $$ \mathbb{P}(\|v_l - w\|_{\ell^2}^2 \ge m^3 2^{(1-2 \alpha) m}) \le \frac{\mathbb{E} \|v_l-w\|_{\ell^2}^2}{m^3 2^{(1-2\alpha)m}} \le 2^{(2-c_1) m}, $$ when $m$ is large enough in terms of $c$. Recall that we previously showed that $$ \|v_l^* - w\|_{\ell^2} \lesssim \|v_l - w\|_{\ell^2} + \sqrt{m l} 2^{-\alpha m}. $$ If $l = \lceil c_1 \log(2^m)^2 2^m \rceil$, then by our estimate on $\|v_l-w\|_{\ell^2}$ it follows that $$ \|v_l^* - w \|_{\ell^2} \lesssim m^{3/2} 2^{(1/2-\alpha)m}, $$ with probability at least $1 - 2^{(2-c_1)m}$. By Corollary \ref{corsing}, the operator norm of $A$ is $2^{m/2}$, so it follows that $$ \| A v_l^* - A w\|_{\ell^2} \lesssim m^{3/2} 2^{(1-\alpha)m}, $$ with probability at least $1 - 2^{(2-c_1)m}$. Thus, if we define the function $\tilde{f} : [0,1]^2 \rightarrow \mathbb{R}$ by $$ \tilde{f}(x) := \langle \Psi(x), v_l^* \rangle, $$ then we have the estimate $$ \sqrt{\int_{[0,1]^2} |\tilde{f}(x) - \bar{f}(x)|^2 dx} = \|\tilde{f} - \bar{f}\|_{L^2} \lesssim 2^{-\alpha m} m^{3/2}, $$ with probability at least $1 - 2^{(2 - c_1)m}$. Since $\|\bar{f} - f\|_{L^2} \lesssim 2^{-\alpha m} m$ it follows that $\|\tilde{f} - f\|_{L^2} \lesssim 2^{-\alpha m} m^{3/2}$. Setting $n :=2^m$ completes the proof. \end{proof} \subsection{Proof of Remark \ref{rmk1}} \label{proofrmk} It remains to verify the computational cost claims of Remark \ref{rmk1}. \begin{proof}[Proof of Remark \ref{rmk1}] Let $n = 2^m$. The computation of $v_l^*$ described in Lemma \ref{lemrand} consists of $\mathcal{O}(n \log^2 n)$ iterations of $\mathcal{O}(\log n)$ operations for a total of $\mathcal{O}(n \log^3 n)$ operations of pre-computation. Then, since $\Psi(x)$ is supported on $\mathcal{O}(\log n)$ entries, the inner product $\langle \Psi(x), v_l^* \rangle$ requires $\mathcal{O}(\log n)$ operations. Finally, approximating the integral of $f$ amounts to approximating the function $f$ at each point, taking the sum, and dividing by $n^2$: $$ \left| \int_{[0,1]^2} f(x) - \frac{1}{n^2} \left\langle \sum_{j=1}^{n^2} \Psi(x_j), w \right\rangle \right| \lesssim n^{-\alpha} \log^{3/2} n. $$ However, this naive approach would require $\mathcal{O}(n^2)$ operations. Instead, we make the observation that $$ \sum_{j=1}^{n^2} \Psi(x_j) = g, $$ where $g$ is the vector whose first $n$ entries are equal to $1$ and which is zero elsewhere. Indeed, after the first $n$ entries, each entry is $+1/\sqrt{2}$ and $-1/\sqrt{2}$ for an equal number of embedding vectors. It follows that $$ \left| \int_{[0,1]^2} f(x) - \frac{1}{n} \left\langle g, w \right\rangle \right| \lesssim n^{-\alpha} \log^{3/2} n; $$ the computation of the inner product $\langle g,w \rangle$ only requires $\mathcal{O}(n)$ operations so the proof is complete. \end{proof} \section{Discussion} \label{discussion} \subsection{Illustration} \label{example} Suppose that $f : [0,1]^2 \rightarrow \mathbb{R}$ is the function defined by $$ f(x,y) = \sin(20 x^2 + 10 y) \sin(\pi x) \sin(\pi y), \quad \text{for} \quad (x,y) \in [0,1]^2. $$ The function $f$ is $(c,1)$-mixed H\"older for some $c > 0$ since the partial derivatives $\partial f/\partial x$, $\partial f/\partial y$, and $\partial^2 f / (\partial x \partial y)$ are bounded in $[0,1]^2$. As a baseline, in Figure \ref{fig2} we plot the function $f$, and the approximation of $f$ via the method of Smolyak (Lemma \ref{smolyak}) with $m = 7$ such that $n := 2^m = 128$. \begin{figure}[h!] \begin{tabular}{cc} \includegraphics[width=.45\textwidth]{fig02a-eps-converted-to.pdf} & \includegraphics[width=.45\textwidth]{fig02b-eps-converted-to.pdf} \\ \end{tabular} \caption{Function $f$ (left) and approximation via Lemma \ref{smolyak} (right).} \label{fig2} \end{figure} Next, we set $c_1 = 8$ and sample $l = c_1 n \log^2 n$ points uniformly at random from $[0,1]^2$. In Figure \ref{fig3}, we plot the approximation of $f$ via Theorem \ref{thm1}, and the approximation of $f$ via Theorem \ref{thm1} with $n=128$ spin cycles. In particular, the spin cycles are performed by considering $f$ as a function on the torus, generating a sequence of random shifts $\zeta_1,\ldots,\zeta_{n} \in [0,1]^2$, and using the method of Remark \ref{spin}. \begin{figure}[h!] \begin{tabular}{cc} \includegraphics[width=.45\textwidth]{fig03a-eps-converted-to.pdf} & \includegraphics[width=.45\textwidth]{fig03b-eps-converted-to.pdf} \end{tabular} \caption{Approximation via Theorem \ref{thm1} (left) and approximation via Theorem \ref{thm1} with $128$ spin cycles as in Remark \ref{spin} (right).} \label{fig3} \end{figure} The plots in Figure \ref{fig3} provide empirical evidence that spin cycling as described in Remark \ref{spin} reduces artifacts. Developing quantitative estimates for improvements in approximation accuracy resulting from spin cycling is an interesting theoretical problem for future study. \subsection{Discussion} There are several possible extensions and applications of Theorem \ref{thm1}. Informally speaking, we have shown that in $2$-dimensions the sampling requirements for the method of Smolyak \cite{Smolyak1963} can be relaxed from a specific set of points at the center of dyadic rectangles to a similar number of random samples. As previously noted, Smolyak \cite{Smolyak1963} presented a general $d$-dimensional version of Lemma \ref{smolyak} so an immediate question for future study is the extension of Theorem \ref{thm1} to the $d$-dimensional cube. This would require defining a more sophisticated embedding $\Psi$ that retains an analog of the martingale property established in \S \ref{martingale}. It may also be interesting to consider generalizations of Theorem \ref{thm1} to abstract dyadic trees as discussed by M.~Gavish and R.~R.~Coifman in \cite{GavishCoifman2012}. There are also interesting theoretical questions in $2$-dimensions related to random matrix theory. Given a collection of $l$ points $X_1,\ldots,X_l$ chosen uniformly at random from $[0,1]^2$, we can consider the $l \times (m+2)2^{m-1}$ dimensional matrix $B$ whose $j$-th row is $\Psi(X_j)$, where $\Psi$ is the embedding defined in Definition \ref{def1}. The rows of $B$ are independent, and the inner product of a vector with a row of $B$ is a martingale sum, see \S \ref{martingale}. It would be interesting to develop quantitative high probability estimates on the singular values of $B$. Finally, we note that the method of Smolyak \cite{Smolyak1963} has been developed into a computational method called sparse grids, see \cite{BungartzGriebel2004}. The relaxation to random sampling and the ability to perform spin cycles may prove useful for certain applications. In particular, it may be interesting to consider applications of Theorem \ref{thm1} in the Fourier domain, where the mixed H\"older condition is very natural. Recently, M.~Griebel and J.~Hamaekers \cite{GriebelHamaekers2014} have developed a fast discrete Fourier transform on sparse grids, which could potentially be used in combination with Theorem \ref{thm1}. \subsection*{Acknowledgements} The author would like to thank Raphy Coifman for many fruitful discussions.
1,108,101,565,834
arxiv
\section{Introduction} In the growing world of machine learning and data analytics, scholars are finding new and innovative ways to solve real-world problems. One solution comes by way of an intersection between healthcare, sports statistics, and data science. Within the realm of Major League Baseball (MLB), pitchers are regarded as the most important roster position. They often are among the highest paid players and are crucial to a franchise’s success, but they are more at risk to suffer an injury that sidelines them for over a complete season. The ulnar collateral ligament (UCL) is a small ligament in the elbow that controls the strength and stability of a pitcher’s throwing arm. Due to repetitive strain, it is not uncommon for pitchers to tear it partially or completely during their career. Repairing this injury requires UCL reconstruction surgery, as known informally as Tommy John surgery. In this podium abstract, we want to investigate whether we can use machine learning techniques to predict the UCL injury by analyzing online pitcher data. There are a multitude of previous related work that focuses on using machine learning methods to predict injuries or the risk of injuries within many different sports~\cite{casals2017sports,huang2021data,jauhiainen2021new,whiteside2016predictors}. For example, Whiteside et al.~\cite{whiteside2016predictors} relied on only \emph{\textbf{104} pitchers} as input information for which they based their significant findings, and used linear regression, naïve bayes and support vector machine (SVM) as the classifier. Huang and Jiang~\cite{huang2021data} proposed and defined an artificial intelligence (AI) framework that can be used to develop AI injury prediction solutions across many sports. However, using only \emph{\textbf{21} soccer players} which takes away from the validity of their accuracy measures since there could be bias within such a minimal number of subjects. Furthermore, Jauhiainen et al. (2020) used the physical data from \emph{\textbf{314} young basketball and floorball players} to train linear regression and random forest to predict injury risk. Although existing work can predict the risk of injuries, the small size of dataset may lead to biased predictions. To solve this issue, in this abstract, we aim to create a new, large, and publicly available dataset that is recorded from pitchers’ play to predict the need of Tommy John Surgery. Besides, several machine learning approaches such as K-Nearest Neighbors, Naïve Bayes, and Decision Trees, are used to validate the usability of this new dataset. \section{Methods} \subsection{Dataset Creation} With the rise of Sabermetrics, the statistical analysis of baseball, it has become quite easy to access player statistics from nearly the beginning of the MLB. The data used for this research comes from Stathead (\url{https://stathead.com/}), a publicly sourced sports almanac that is specialized toward information research across all American professional sports. From Stathead, 47 features are recorded on 8,503 pitchers in each of the rookie seasons. Since the first Tommy John procedure took place in 1974, all the pitcher data collected are from after that year, and only contain statistics regarding a pitcher’s rookie reason. These features range from physical features like height, weight, and handedness as well as pitching statistics such as total games played, total inning pitched, hits allowed, runs allowed, and others of that nature. The target variable comes from another publicly sourced repository that logs all professional baseball players who have undergone Tommy John (\url{https://bit.ly/3hIY9Ox}). By joining the two datasets by player name, a complete data pool was formed having both the prior discussed features and a binary target of undergoing the surgery or not per pitcher. We split the collect datasets into training and testing sets with a ratio 8:2 according to the the year information. Though it is not uncommon for MLB pitchers to face this injury, there is class imbalance among the Tommy John binary classification target. A ratio of nearly 10:1 is observed with there being 7,677 negative cases (no injury) and 826 positive cases (injury occurred). \subsection{Data Preprocessing and Model Training} The data are clean and complete for the most part. Any null data points are handled according to what best fit the feature type. That is being replaced with a mean value of a column or a zero value where appropriate. There are also some variables such as the pitchers’ team, if they played in the National or American League, and handedness which are converted into numerical classifiers. Five supervised learning models are explored in this work, including K-Nearest Neighbors (KNN), Naïve Bayes (NB), tree algorithms such as XGboost, Random Forest (RF), and regular Decision Tree (DT), and multiple layer perceptron (MLP). Since this is an imbalanced classification problem, it requires manipulation in sampling to the training set to increase model learning for the positive class. In this work, we use oversampling of the positive target classification. In addition, grid search cross-validation is used to hyperparameter-tuning. In conjunction, feature selection is also explored due to the high dimensionality of the data most likely being detrimental in both predictive ability and computational efficiency. To do this, K-Best feature selection is used to reduce the features used in modeling from 47 down to 13. This not only raises testing the performance in training and testing, but it also significantly reduces run times that exponentially increases due to cross-validation and the nature of tree-based models that are explored. \section{Results} Since the dataset is imbalanced, and we use the Receiver Operating Characteristic/Area Under the Curve (ROCAUC) score as the evaluation metric. Table~\ref{table:ta1} shows the results on different approaches. We can observe that although MLP is a simple deep learning-based approach, it achieves the best performance compared with traditional classification methods. These results show that using advanced deep learning models may be helpful for boosting the performance. \begin{table}[!h] \centering \begin{tabular}{|c|c|c|c|c|c|c|} \hline \textbf{Model} & KNN & NB & XGboost & RF & DT & MLP\\\hline \textbf{ROCAUC} & 0.5702 & 0.5463 & 0.6068 & 0.6143& 0.6329 & \textbf{0.6740} \\ \hline \end{tabular} \caption{Result comparison.} \label{table:ta1} \end{table} \section{Discussion} As we mentioned before, this dataset is significantly imbalanced. To make the two classes balanced, we tried three sampling techniques: oversampling of the positive target classification, undersampling of the negative target classification, and SMOTE sampling which combines the tactics of oversampling and undersampling. However, we found that the oversampling of the positive cases served the best in training models. In the future, we will explore more advanced sampling techniques to further improve the performance. Though the predictive capability of the models discussed is not as strong as the researchers hoped to have found, a relationship is present. Injury prediction is a complex task and requires a great deal of data manipulation through sampling and feature selection to correctly undertake any form of adequate modeling. It is hoped that this research will serve as a foundation to future progress in this area as a proof of concept has been established. It has powerful implications for not only the MLB, but across all professional sports and maybe even higher-level college athletics. \makeatletter \renewcommand{\@biblabel}[1]{\hfill #1.} \makeatother \bibliographystyle{vancouver}
1,108,101,565,835
arxiv
\section{\uppercase{Polarization basis vectors} \label{sec:polarizationBasis}} Assuming $k^A=(k^0,0,0,k^3)$, we define the polarization basis vectors for four-vectors as \begin{align*} &\epsilon^A_{(1^-,1)}=\frac{1}{\sqrt{{2}}}\left( \begin{array}{c} 0\\ 1\\ i\\ 0\\\end{array} \right), \;\;\;\epsilon^A_{(1^-,-1)}=\frac{1}{\sqrt{{2}}}\left( \begin{array}{c} 0\\ -1\\ i\\ 0\\\end{array} \right), \\ &\epsilon^A_{(1^-,0)}=\frac{1}{k}\left( \begin{array}{c} k^3\\ 0\\ 0\\ k^0\\\end{array} \right), \;\;\; \epsilon^A_{(0^+,0)}=\frac{1}{k}\left( \begin{array}{c} k^0\\ 0\\ 0\\ k^3\\\end{array} \right). \numberthis \end{align*} The basis vectors satisfy the orthonormal and completeness conditions, \begin{align} \epsilon^{*A}_{(J_1^{P_1},m_1)}\epsilon_{A,(J_2^{P_2},m_2)} &= P_1\delta_{J_1,J_2}\delta_{P_1,P_2}\delta_{m_1,m_2}, \label{eqn:basisOrtho1} \\ \sum_{J,P,m} P\,\epsilon^{A}_{(J^P,m)}\epsilon^*_{B,(J^P,m)} &= \delta^{A}_{B}. \label{eqn:basisComplete1} \end{align} For the higher rank tensors, we can apply the addition rules for angular momentum. For example, a $(2,0)$-tensor $f^{AB}$ can be decomposed as \begin{align*} f^{AB} &\in (\mathbf{0}^+\oplus \mathbf{1}^-)\!\otimes\!(\mathbf{0}^+\oplus \mathbf{1}^-)\\ &=\!(\mathbf{0}^+\otimes \mathbf{0}^+)\!\oplus\! (\mathbf{0}^+\otimes \mathbf{1}^-)\!\oplus\! (\mathbf{1}^-\otimes \mathbf{0}^+)\!\oplus\! (\mathbf{1}^-\otimes \mathbf{1}^-)\\ &= \mathbf{0}^+ \oplus \mathbf{1}^- \oplus \mathbf{1}^- \oplus (\mathbf{0}^+\oplus \mathbf{1}^+\oplus \mathbf{2}^+). \numberthis\label{20decomp} \end{align*} The polarization basis is obtained using Clebsch-Gordan coefficients\footnote{We adopt the notation of the Particle Data Group, which can be found at \url{http://pdg.lbl.gov/2008/reviews/clebrpp.pdf}.}. For example, some basis elements $\epsilon^{AB}_{(J_1^{P_1},J_2^{P_2},J'^{P'},m_{J'})}$ for $J_1^{P_1}\otimes J_2^{P_2}$ are \begin{align*} &\epsilon^{AB}_{(1^-,1^-,2^+,+2)}= \epsilon^{A}_{(1^-,1)}\otimes\epsilon^{B}_{(1^-,1)}, \\ &\epsilon^{AB}_{(1^-,1^-,2^+,+1)}= \!\frac{1}{\sqrt{2}}\!\left( \epsilon^{A}_{(1^-,1)}\otimes\epsilon^{B}_{(1^-,0)} + \epsilon^{A}_{(1^-,0)}\otimes\epsilon^{B}_{(1^-,1)} \right). \numberthis \end{align*} Moreover, one can decompose any $(2,0)$ tensor into $f^{AB}=\mathfrak{s}^{AB}+\mathfrak{a}^{AB}$, where $\mathfrak{s}$ is symmetric and $\mathfrak{a}$ is antisymmetric. One observes from the Clebsch-Gordan coefficients table that the $\mathbf{2^+}$ and $\mathbf{0^+}$ sectors are symmetric in $A$ and $B$, whereas the $\mathbf{1^+}$ sector is antisymmetric. One may thus make a linear combination of the two $\mathbf{1^-}$ sectors to obtain a symmetric sector and an antisymmetric sector, \begin{align} \epsilon^{AB}_{(\texttt{sym},1^-,m)} &\equiv \frac{1}{\sqrt{2}}\left( \epsilon^{AB}_{(0^+,1^-,1^-,m)} + \epsilon^{AB}_{(1^-,0^+,1^-,m)} \right) \\ \epsilon^{AB}_{(\texttt{ant},1^-,m)} &\equiv \frac{1}{\sqrt{2}}\left( \epsilon^{AB}_{(0^+,1^-,1^-,m)} - \epsilon^{AB}_{(1^-,0^+,1^-,m)} \right). \end{align} Hence, we can conclude that the symmetric part of (\ref{20decomp}) is $\mathbf{2^+} \oplus \mathbf{1^-} \oplus \mathbf{0^+} \oplus \mathbf{0^+}$, which has $5+3+1+1=10$ degrees of freedom, and the antisymmetric part is $\mathbf{1^+} \oplus \mathbf{1^-}$, which has $3+3=6$ degrees of freedom, all as expected. One can similarly decompose the $A^{ABC}$ fields, which are antisymmetric on $A$ and $B$, into \begin{align*} A^{ABC} &\in (\mathbf{1}^+\oplus \mathbf{1}^-)\otimes (\mathbf{0}^+\oplus \mathbf{1}^-)\\ &= \mathbf{1}^+ \oplus (\mathbf{0}^-\oplus\mathbf{1}^-\oplus\mathbf{2}^-)\oplus\mathbf{1}^-\oplus(\mathbf{0}^+\oplus\mathbf{1}^+\oplus\mathbf{2}^+). \\ &= \mathbf{0}^-\oplus\mathbf{0}^+\oplus 2(\mathbf{1}^-)\oplus 2(\mathbf{1}^+)\oplus \mathbf{2}^-\oplus \mathbf{2}^+, \end{align*} for which the basis is straightforwardly constructed following an analogous approach to that illustrated above. The bases for higher rank tensors satisfy similar orthonormality and completeness conditions to \eqref{eqn:basisOrtho1} and \eqref{eqn:basisComplete1}: \begin{align} &\epsilon^{*\alpha}_{(i_1,J_1^{P_1},m_1)}\epsilon_{\alpha,(i_2,J_2^{P_2},m_2)} = P_1\delta_{i_1,i_2}\delta_{J_1,J_2}\delta_{P_1,P_2}\delta_{m_1,m_2} \label{eqn:basisOrtho} \\ &\sum_{i,j,P,m}\left(P\,\epsilon^{\alpha}_{(i,J^P,m)}\epsilon^*_{\beta,(i,J^P,m)} \right) = \mathbb{I}^{\alpha}_{\beta}, \label{eqn:basisComplete} \end{align} where $i$ is the label of the basis in the spin sector $J^P$, as there might be more than one basis in a sector. The $\alpha$ and $\beta$ indices are shorthand for some generic indices, such as $\alpha=A_1A_2...A_n$. We can write the basis vectors together with its corresponding column vector $\mathbf{e}_{a}$ indicating the field (see (10) in \cite{Lin2019a}) in bra-ket notation $\ket{i,J^P,m}$, and the SPOs in \cite{Lin2019a} are related with those polarization basis vectors by \begin{equation} \hat{P}_{ij}(J^P) = \sum_{m}\ket{i,J^P,m}\bra{j,J^P,m}. \end{equation} Note that the bras and kets here do not denote a quantum state, but are used merely to denote the field decomposition in a straightforward manner. We are taking inspiration from \cite{Dicus2005, Buoninfante2016} in this section. \section{\uppercase{Proca and Stueckelberg theories} \label{sec:AppProca}} In this appendix, we illustrate the methods used in this paper in the context of the more familiar and much simpler Proca and Stueckelberg theories. Proca theory contains a massive vector field $B_\mu$ and has the free-field Lagrangian, \begin{equation} \mathcal{L}_{\text{Pr}} =-\tfrac{1}{4} \left(\de_\mu B_\nu - \de_\nu B_\mu \right) \left(\de^\mu B^\nu - \de^\nu B^\mu \right) + \tfrac{1}{2} m^2 B_\mu B^\mu, \end{equation} with $m>0$, which has no gauge freedoms. The corresponding SPOs are \begin{equation} \mathsf{P}(0^+) = \bordermatrix{ ~ & B_{\mu} \cr B^*_{\rho} & \Omega_{\mu\rho} }\;,\qquad \mathsf{P}(1^-) = \bordermatrix{ ~ & B_{\mu} \cr B^*_{\rho} & \Theta_{\mu\rho} }, \end{equation} where $\Omega^{\mu\rho}=k^{\mu} k^{\rho}/k^2$, and $\Theta^{\mu\rho}=\eta^{\mu\rho}-k^{\mu} k^{\rho}/k^2$. The $a$ matrices of the theory are \begin{equation} a(0^+) = \bordermatrix{ ~ & B_{\mu} \cr B^*_{\mu} & m^2 }\;,\;\; a(1^-) = \bordermatrix{ ~ & B_{\mu} \cr B^*_{\mu} & -k^2+m^2 }, \end{equation} which are identical to the $b$ matrices because there are no gauge invariances and source constraints. Therefore, the $0^+$ sector is nonpropagating and the $1^-$ sector corresponds to a $k^{-2}$ propagator. Thus, Proca theory satisfies the alternative PCR condition in \cite{Lin2019a}, and hence we classify it as PCR. Conversely, Proca theory clearly violates Sezgin's original PCR condition in \cite{Sezgin1980}. Indeed, Proca theory is generally considered to be non-PCR in the literature, because the propagator is \begin{equation} D(k)_{\mu\nu} = \frac{\eta_{\mu\nu} - \frac{k_\mu k_\nu}{m^2}}{k^2 - m^2}, \end{equation} so some components of it become $\sim k^0$ when $k^2 \rightarrow \infty$ and the offending term $k_\mu k_\nu$ cannot be eliminated by the renormalization procedure \cite{Ruegg2003}. Using the polarization basis method mentioned in the main text, however, we can integrate out the nonpropagating $0^+$ part. The free Lagrangian then becomes $\mathcal{L}_{\text{Pr}}$ with the condition $\de^\mu B_\mu=0$, and the resulting propagator goes as $k^{-2}$, so the theory is PCR. One may gain some insight into this apparent contradiction by noting that Proca theory may be considered as a gauge-fixed version of a gauge theory, namely the Stueckelberg theory, for which the Lagrangian is \cite{Stueckelberg1938,Stueckelberg1938a,VanHees2003} \begin{align*} \mathcal{L}_{\text{St}} =& -\tfrac{1}{4} \left(\de_\mu B_\nu - \de_\nu B_\mu \right) \left(\de^\mu B^\nu - \de^\nu B^\mu \right) + \tfrac{1}{2} m^2 B_\mu B^\mu \\ &+ \tfrac{1}{2}\de_\mu \phi \de^\mu \phi + m \phi \de_\mu B^\mu \label{eqn:LagStueck} \numberthis \end{align*} and which possesses the gauge invariance, \begin{equation} B'_\mu = B_\mu + \de_\mu \Lambda, \qquad \phi' = \phi + m \Lambda. \end{equation} The nonzero $a$ matrices are \begin{align} &a(0^+) = \bordermatrix{ ~ & \phi & B_{\mu} \cr \phi^* & k^2 & -i k m \cr B^*_{\mu} & i k m & m^2 },\\ &a(1^-) = \bordermatrix{ ~ & B_{\mu} \cr B^*_{\mu} & -k^2+m^2 }, \end{align} and the corresponding SPOs are \begin{equation} \mathsf{P}(0^+) = \bordermatrix{ ~ & \phi & B_{\mu} \cr \phi^* & 1 & \tilde{k}_\mu \cr B^*_{\rho} & \tilde{k}_\rho & \Omega_{\mu\rho} }\;,\qquad \mathsf{P}(1^-) = \bordermatrix{ ~ & B_{\mu} \cr B^*_{\rho} & \Theta_{\mu\rho} }, \end{equation} where $\tilde{k}\text{}_\mu=k\text{}_\mu/\sqrt{k^2}$. As might be expected, the matrix $a(0^+)$ is singular, with rank one, and so we can choose to keep either the $\phi$ column/row or the $B$ column/row. If we choose to keep $B$, then one recovers Proca's theory. If we instead choose to keep $\phi$, then the $b^{-1}$ matrices all go as $\sim k^{-2}$ in the high-energy limit and the theory thus satisfies the original PCR condition. Hence, Stueckelberg theory is PCR, and so Proca theory must also be PCR, since the two theories are physically equivalent. Thus, our alternative PCR criterion succeeds in identifying Proca theory as being PCR, whereas the theory violates the original PCR criterion. \bgroup \def\arraystretch{1.5} \setlength\tabcolsep{0.2cm} \begin{longtable*}[e]{@{\extracolsep{\fill}}rlll} \caption{Parameter conditions for the PC renormalizable critical cases that are ghost and tachyon free and have both massless and massive propagating modes. The parameters listed in ``Additional conditions'' must be nonzero to prevent the theory becoming a different critical case.} \label{tab:PGTUnitaryAndPCMlMv2} \\* \toprule \#&Critical condition&Additional conditions&No-ghost-and-tachyon condition\\* \colrule \noalign{\vspace{3pt} \endfirsthead \noalign{\nobreak\vspace{3pt} \botrule \endlastfoot \refrownumber{row:Case01} & \makecell[cl]{$r_1,\frac{r_3}{2}-r_4,t_1,t_3,\lambda =0$} &\makecell[cl]{$r_2,r_3,2 r_3+r_5,r_3+2 r_5,t_2$} & $t_2>0, r_2<0, r_3 \left(2 r_3+r_5\right) \left(r_3+2 r_5\right)<0$ \\ \refrownumber{row:Case02} & \makecell[cl]{$r_1,\frac{r_3}{2}-r_4,t_1,\lambda =0$} &\makecell[cl]{$r_2,r_1-r_3,2 r_3+r_5,r_1+r_3+2 r_5,t_2,t_3$} & $t_2>0, r_2<0, r_3 \left(2 r_3+r_5\right) \left(r_3+2 r_5\right)<0$ \\ \refrownumber{row:Case03} & \makecell[cl]{$r_1,r_3,r_4,t_1+t_2,t_3,\lambda =0$} &\makecell[cl]{$r_2,r_1+r_5,2 r_1+r_5,t_1,t_2$} & $r_2<0, r_5<0, t_1<0$ \\ \refrownumber{row:Case04} & \makecell[cl]{$r_2,r_1-r_3,r_4,t_1+t_2,t_3,\lambda =0$} &\makecell[cl]{$r_1,r_1+r_5,2 r_1+r_5,t_1,t_2$} & $t_1>0, r_1+r_5<0, r_1<0$ \\ \refrownumber{row:Case05} & \makecell[cl]{$r_2,r_1-r_3,r_4,t_2,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_1,r_1+r_5,2 r_1+r_5,t_1,t_3$} & $r_5>0, 2 r_1+r_5>0, t_1>0, r_1<0$ \\ \refrownumber{row:Case06} & \makecell[cl]{$r_1,2 r_3-r_4,t_1+t_2,t_3,\lambda =0$} &\makecell[cl]{$r_2,r_1-r_3,r_1-2 r_3-r_5,2 r_3+r_5,t_1,t_2$} & $r_2<0, 2 r_3+r_5<0, t_1<0$ \\ \refrownumber{row:Case07} & \makecell[cl]{$r_2,2 r_1-2 r_3+r_4,t_1+t_2,t_3,\lambda =0$} &\makecell[cl]{$r_1,r_1-r_3,r_1-2 r_3-r_5,2 r_3+r_5,t_1,t_2$} & $t_1>0, r_1<0, 2 r_3+r_5<r_1$ \\ \end{longtable*} \egroup \setcounter{magicrownumbers}{0} \bgroup \def\arraystretch{1.5} \setlength\tabcolsep{0.2cm} \begin{longtable*}[e]{@{\extracolsep{\fill}}rHccl} \caption{Particle content of the PC renormalizable critical cases that are ghost and tachyon free and have both massless and massive propagating modes. All of these cases have 2 massless d.o.f. in propagating modes, and also a massive mode. The column ``$b$ sectors'' describes the diagonal elements in the $b^{-1}$ matrix of each spin-parity sector in the sequence $\{0^-,0^+,1^-,1^+,2^-,2^+\}$. Here and in \Cref{tab:PGTUnitaryAndPCMl,tab:PGTUnitaryAndPCMv} it is notated as $\varphi^n_{v}$ or $\varphi^n_{l}$, where $\varphi$ is the field, $-n$ is the power of $k$ in the element in the $b^{-1}$ matrix when $k$ goes to infinity, $v$ means massive pole, and $l$ means massless pole. If $n=\infty$, it represents that the diagonal element is zero. If $n\leq0$, the field is not propagating. The ``$|$'' notation denotes the different form of the elements of the $b^{-1}$ matrices in different choices of gauge fixing, and the ``$\&$'' connects the diagonal elements in the same $b^{-1}$ matrix. The superscript ``N'' represents that there is nonzero off-diagonal term in the $b^{-1}$ matrix.} \label{tab:PGTUnitaryAndPCMlMv} \\ \toprule \#&Critical condition&\makecell[cl]{Massless\\ mode d.o.f.}&\makecell[cl]{Massive \\mode}&$b$ sectors\\* \colrule \noalign{\vspace{3pt} \endfirsthead \multicolumn{5}{l}{TABLE~\ref{tab:PGTUnitaryAndPCMlMv} (continued) \rule{0pt}{12pt}\\ \noalign{\vspace{1.5pt}} \colrule\rule{0pt}{12pt} \#&Critical condition&\makecell[cl]{Massless\\ mode d.o.f.}&\makecell[cl]{Massive \\mode}&$b$ sectors\\* \colrule \noalign{\vspace{3pt} \endhead \noalign{\nobreak\vspace{3pt} \colrule \endfoot \noalign{\nobreak\vspace{3pt} \botrule \endlastfoot \rownumber & \makecell[cl]{$r_1,\frac{r_3}{2}-r_4,t_1,t_3,\lambda =0$} &2 & $0^-$ & $\left\{A\text{}_{\text{v}}^{2},\times,A\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\times,A\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1,\frac{r_3}{2}-r_4,t_1,\lambda =0$} &2 & $0^-$ & $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\times,A\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1,r_3,r_4,$\\$t_1+t_2,t_3,\lambda =0$} &2 & $0^-$ & $\left\{A\text{}_{\text{v}}^{2},\times,\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_2,r_1-r_3,r_4,$\\$t_1+t_2,t_3,\lambda =0$} &2 & $2^-$ & $\left\{A\text{}_{\text{}}^{0},\times,\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_2,r_1-r_3,r_4,$\\$t_2,t_1+t_3,\lambda =0$} &2 & $2^-$ & $\left\{\times,A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1,2 r_3-r_4,$\\$t_1+t_2,t_3,\lambda =0$} &2 & $0^-$ & $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_2,2 r_1-2 r_3+r_4,$\\$t_1+t_2,t_3,\lambda =0$} &2 & $2^-$ & $\left\{A\text{}_{\text{}}^{0},A\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \end{longtable*} \egroup \newcounter{tmp} \setcounter{tmp}{\value{magicrownumbers}} \bgroup \def\arraystretch{1.5} \begin{longtable*}[e]{@{\extracolsep{\fill}}rlll} \caption{Parameter conditions for the PC renormalizable critical cases that are ghost and tachyon free and have only massless propagating modes. The cases found previously in \cite{Lin2019a} are indicated with an asterisk followed by its original numbering.} \label{tab:PGTUnitaryAndPCMl2}\\ \toprule \#&Critical condition&Additional condition&No-ghost-and-tachyon condition\\* \colrule \noalign{\vspace{3pt} \endfirsthead \multicolumn{4}{l}{TABLE~\ref{tab:PGTUnitaryAndPCMl2} (continued) \rule{0pt}{12pt}\\ \noalign{\vspace{1.5pt}} \colrule\rule{0pt}{12pt} \#&Critical Condition&Additional Condition&No-ghost-and-tachyon Condition\\* \colrule \noalign{\vspace{3pt} \endhead \noalign{\nobreak\vspace{3pt} \colrule \endfoot \noalign{\nobreak\vspace{3pt} \botrule \endlastfoot \refrownumber{row:Case08} & \makecell[cl]{$r_2,r_1-r_3,r_4,t_1,t_2,\lambda =0$} &\makecell[cl]{$r_1,r_1+r_5,2 r_1+r_5,t_3$} & $r_1 \left(r_1+r_5\right) \left(2 r_1+r_5\right)<0$ \\ $^{\ast 1}$\refrownumber{row:Case09} & \makecell[cl]{$r_2,r_1-r_3,r_4,t_1,t_2,t_3,\lambda =0$} &\makecell[cl]{$r_1,r_1+r_5,2 r_1+r_5$} & $r_1 \left(r_1+r_5\right) \left(2 r_1+r_5\right)<0$ \\ $^{\ast 3}$\refrownumber{row:Case10} & \makecell[cl]{$r_1,r_2,\frac{r_3}{2}-r_4,t_1,t_2,t_3,\lambda =0$} &\makecell[cl]{$r_3,2 r_3+r_5,r_3+2 r_5$} & $r_3 \left(2 r_3+r_5\right) \left(r_3+2 r_5\right)<0$ \\ $^{\ast 4}$\refrownumber{row:Case11} & \makecell[cl]{$r_1,\frac{r_3}{2}-r_4,t_1,t_2,t_3,\lambda =0$} &\makecell[cl]{$r_2,r_3,2 r_3+r_5,r_3+2 r_5$} & $r_3 \left(2 r_3+r_5\right) \left(r_3+2 r_5\right)<0$ \\ \refrownumber{row:Case12} & \makecell[cl]{$r_1,r_2,\frac{r_3}{2}-r_4,t_1,t_3,\lambda =0$} &\makecell[cl]{$r_3,2 r_3+r_5,r_3+2 r_5,t_2$} & $r_3 \left(2 r_3+r_5\right) \left(r_3+2 r_5\right)<0$ \\ $^{\ast 2}$\refrownumber{row:Case13} & \makecell[cl]{$r_2,2 r_1-2 r_3+r_4,t_1,t_2,t_3,\lambda =0$} &\makecell[cl]{$r_1,r_1-r_3,r_1-2 r_3-r_5,2 r_3+r_5$} & $r_1 \left(r_1-2 r_3-r_5\right) \left(2 r_3+r_5\right)>0$ \\ \refrownumber{row:Case14} & \makecell[cl]{$r_1,r_2,\frac{r_3}{2}-r_4,t_1,t_2,\lambda =0$} &\makecell[cl]{$2 r_3-r_4,2 r_3+r_5,r_4+r_5,t_3$} & $r_3 \left(2 r_3+r_5\right) \left(r_3+2 r_5\right)<0$ \\ \refrownumber{row:Case15} & \makecell[cl]{$r_1,r_2,\frac{r_3}{2}-r_4,t_1,\lambda =0$} &\makecell[cl]{$r_3,2 r_3+r_5,r_3+2 r_5,t_2,t_3$} & $r_3 \left(2 r_3+r_5\right) \left(r_3+2 r_5\right)<0$ \\ \refrownumber{row:Case16} & \makecell[cl]{$r_1,\frac{r_3}{2}-r_4,t_1,t_2,\lambda =0$} &\makecell[cl]{$r_2,r_3,2 r_3+r_5,r_3+2 r_5,t_3$} & $r_3 \left(2 r_3+r_5\right) \left(r_3+2 r_5\right)<0$ \\ \refrownumber{row:Case17} & \makecell[cl]{$r_1,r_2,r_3,r_4,t_1+t_2,t_3,\lambda =0$} &\makecell[cl]{$r_1+r_5,2 r_1+r_5,t_1,t_2$} & $r_5<0, t_1\neq 0$ \\ \refrownumber{row:Case18} & \makecell[cl]{$r_1,r_2,r_3,r_4,t_2,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_1+r_5,2 r_1+r_5,t_1,t_3$} & $r_5>0, t_1\neq 0$ \\ \refrownumber{row:Case19} & \makecell[cl]{$r_1,r_2,2 r_3-r_4,t_1+t_2,t_3,\lambda =0$} &\makecell[cl]{$r_1-r_3,r_1-2 r_3-r_5,2 r_3+r_5,t_1,t_2$} & $r_3<-\frac{r_5}{2}, t_1\neq 0$ \\ \end{longtable*} \egroup \setcounter{magicrownumbers}{\value{tmp}} \bgroup \def\arraystretch{1.5} \setlength\tabcolsep{0.2cm} \begin{longtable*}[e]{@{\extracolsep{\fill}}rHcl} \caption{Particle content of the PC renormalizable critical cases that are ghost and tachyon free and have only massless propagating modes. All of these cases have 2 massless d.o.f. of propagating mode. The cases found previously in \cite{Lin2019a} are indicated with an asterisk followed by its original numbering.} \label{tab:PGTUnitaryAndPCMl}\\ \toprule \#&Critical Condition&\makecell[cl]{Massless\\ mode d.o.f.}&$b$ sectors\\* \colrule \noalign{\vspace{3pt} \endfirsthead \multicolumn{4}{l}{TABLE~\ref{tab:PGTUnitaryAndPCMl} (continued) \rule{0pt}{12pt}\\ \noalign{\vspace{1.5pt}} \colrule\rule{0pt}{12pt} \#&Critical Condition&\makecell[cl]{Massless\\ mode d.o.f.}&$b$ sectors\\* \colrule \noalign{\vspace{3pt} \endhead \noalign{\nobreak\vspace{3pt} \colrule \endfoot \noalign{\nobreak\vspace{3pt} \botrule \endlastfoot \refrownumber{row:Case3bSec} & \makecell[cl]{$r_2=r_1-r_3=r_4=t_1=t_2=\lambda =0$} & 2 & $\left\{\times,A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{l}}^{2},A\text{}_{\text{l}}^{2},\times\right\}$ \\ $^{\ast 1}$\rownumber & \makecell[cl]{$r_2=r_1-r_3=r_4=t_1=t_2=t_3=\lambda =0$} & 2 & $\left\{\times,\times,A\text{}_{\text{l}}^{2},A\text{}_{\text{l}}^{2},A\text{}_{\text{l}}^{2},\times\right\}$ \\ $^{\ast 3}$\rownumber & \makecell[cl]{$r_1=r_2=\frac{r_3}{2}-r_4=t_1=t_2=t_3=\lambda =0$} & 2 & $\left\{\times,\times,A\text{}_{\text{l}}^{2},A\text{}_{\text{l}}^{2},\times,A\text{}_{\text{l}}^{2}\right\}$ \\ $^{\ast 4}$\rownumber & \makecell[cl]{$r_1=\frac{r_3}{2}-r_4=t_1=t_2=t_3=\lambda =0$} & 2 & $\left\{A\text{}_{\text{l}}^{2},\times,A\text{}_{\text{l}}^{2},A\text{}_{\text{l}}^{2},\times,A\text{}_{\text{l}}^{2}\right\}$ \\ \refrownumber{row:Case7bSec} & \makecell[cl]{$r_1=r_2=\frac{r_3}{2}-r_4=t_1=t_3=\lambda =0$} &2 & $\left\{A\text{}_{\text{}}^{0},\times,A\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\times,A\text{}_{\text{l}}^{2}\right\}$ \\ $^{\ast 2}$\rownumber & \makecell[cl]{$r_2=2 r_1-2 r_3+r_4=t_1=t_2=t_3=\lambda =0$} & 2 & $\left\{\times,A\text{}_{\text{l}}^{2},A\text{}_{\text{l}}^{2},A\text{}_{\text{l}}^{2},A\text{}_{\text{l}}^{2},\times\right\}$ \\ \rownumber & \makecell[cl]{$r_1=r_2=\frac{r_3}{2}-r_4=t_1=t_2=\lambda =0$} & 2 & $\left\{\times,A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{l}}^{2},\times,A\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1=r_2=\frac{r_3}{2}-r_4=t_1=\lambda =0$} & 2 & \makecell[cl]{$\left\{A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\right.$\\ $\left.\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\times,A\text{}_{\text{l}}^{2}\right\}$} \\ \rownumber & \makecell[cl]{$r_1=\frac{r_3}{2}-r_4=t_1=t_2=\lambda =0$} & 2 & $\left\{A\text{}_{\text{l}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{l}}^{2},\times,A\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1,r_2,r_3,r_4,t_1+t_2,t_3,\lambda =0$} &2 & $\left\{A\text{}_{\text{}}^{0},\times,\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1,r_2,r_3,r_4,t_2,t_1+t_3,\lambda =0$} &2 & $\left\{\times,A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1,r_2,2 r_3-r_4,t_1+t_2,t_3,\lambda =0$} &2 & $\left\{A\text{}_{\text{}}^{0},A\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \end{longtable*} \egroup \setcounter{tmp}{\value{magicrownumbers}} \bgroup \def\arraystretch{1.5} \setlength\tabcolsep{0.2cm} \begin{longtable*}[e]{@{\extracolsep{\fill}}rlll} \caption{Parameter conditions for the PC renormalizable critical cases that are ghost and tachyon free and have only massive propagating modes. The cases found previously in \cite{Lin2019a} are indicated with an asterisk followed by its original numbering.} \label{tab:PGTUnitaryAndPCMv2}\\ \toprule \#&Critical condition&Additional conditions&No-ghost-and-tachyon condition\\* \colrule \noalign{\vspace{3pt} \endfirsthead \multicolumn{4}{l}{TABLE~\ref{tab:PGTUnitaryAndPCMv2} (continued) \rule{0pt}{12pt}\\ \noalign{\vspace{1.5pt}} \colrule\rule{0pt}{12pt} \#&Critical condition&Additional conditions&No-ghost-and-tachyon condition\\* \colrule \noalign{\vspace{3pt} \endhead \noalign{\nobreak\vspace{3pt} \colrule \endfoot \noalign{\nobreak\vspace{3pt} \botrule \endlastfoot \refrownumber{row:Case20} & \makecell[cl]{$r_1,r_3,r_4,r_5,\lambda =0$} &\makecell[cl]{$r_2,t_1,t_2,t_1+t_2,t_3,t_1+t_3$} & $t_2>0, r_2<0$ \\ \refrownumber{row:Case21} & \makecell[cl]{$r_1,r_3,r_4,r_5,t_1+t_2,\lambda =0$} &\makecell[cl]{$r_2,t_1,t_2,t_3,t_1+t_3$} & $r_2<0, t_1<0$ \\ \refrownumber{row:Case22} & \makecell[cl]{$r_1,r_3,r_4,r_5,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_2,t_1,t_2,t_1+t_2,t_3$} & $t_2>0, r_2<0$ \\ \refrownumber{row:Case23} & \makecell[cl]{$r_1,r_3,r_4,r_5,t_1+t_2,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_2,t_1,t_2,t_3$} & $r_2<0, t_1<0$ \\ \refrownumber{row:Case24} & \makecell[cl]{$r_1,r_3,r_4,t_1,\lambda =0$} &\makecell[cl]{$r_2,r_1+r_5,2 r_1+r_5,t_2,t_3$} & $t_2>0, r_2<0$ \\ $^{\ast 5}$\refrownumber{row:Case25} & \makecell[cl]{$r_1,r_3,r_4,r_5,t_1,\lambda =0$} &\makecell[cl]{$r_2,t_2,t_3$} & $t_2>0, r_2<0$ \\ $^{\ast 6}$\refrownumber{row:Case26} & \makecell[cl]{$r_1,r_3,r_4,r_5,t_1,t_3,\lambda =0$} &\makecell[cl]{$r_2,t_2$} & $t_2>0, r_2<0$ \\ \refrownumber{row:Case27} & \makecell[cl]{$r_1,\frac{r_3}{2}-r_4,\frac{r_3}{2}+r_5,t_1,t_3,\lambda =0$} &\makecell[cl]{$r_2,r_3,t_2$} & $t_2>0, r_2<0$ \\ \refrownumber{row:Case28} & \makecell[cl]{$r_1,r_3,r_4,t_1,t_3,\lambda =0$} &\makecell[cl]{$r_2,r_5,t_2$} & $t_2>0, r_2<0$ \\ \refrownumber{row:Case29} & \makecell[cl]{$r_1-r_3,r_4,2 r_1+r_5,t_1,\lambda =0$} &\makecell[cl]{$r_1,r_2,r_1+r_5,t_2,t_3$} & $t_2>0, r_2<0$ \\ $^{\ast 7}$\refrownumber{row:Case30} & \makecell[cl]{$r_1-r_3,r_4,2 r_1+r_5,t_1,t_3,\lambda =0$} &\makecell[cl]{$r_1,r_2,t_2$} & $t_2>0, r_2<0$ \\ $^{\ast 8}$\refrownumber{row:Case31} & \makecell[cl]{$r_1,2 r_3-r_4,2 r_3+r_5,t_1,t_3,\lambda =0$} &\makecell[cl]{$r_2,r_3,t_2$} & $t_2>0, r_2<0$ \\ \refrownumber{row:Case32} & \makecell[cl]{$r_1,r_3,r_4,r_5,t_3,\lambda =0$} &\makecell[cl]{$r_2,t_1,t_2,t_1+t_2$} & $t_2>0, r_2<0$ \\ \refrownumber{row:Case33} & \makecell[cl]{$r_1,r_3,r_4,r_5,t_1+t_2,t_3,\lambda =0$} &\makecell[cl]{$r_2,t_1,t_2$} & $r_2<0, t_1<0$ \\ \refrownumber{row:Case34} & \makecell[cl]{$r_1,2 r_3-r_4,t_1,t_3,\lambda =0$} &\makecell[cl]{$r_2,r_3,2 r_3+r_5,t_2$} & $t_2>0, r_2<0$ \\ $^{\ast 9}$\refrownumber{row:Case35} & \makecell[cl]{$r_1,\frac{r_3}{2}-r_4,2 r_3+r_5,t_1,t_3,\lambda =0$} &\makecell[cl]{$r_2,r_3,t_2$} & $t_2>0, r_2<0$ \\ $^{\ast 10}$\refrownumber{row:Case36} & \makecell[cl]{$2 r_1-2 r_3+r_4,2 r_3+r_5,t_1,t_3,\lambda =0$} &\makecell[cl]{$r_1,r_2,r_1-r_3,t_2$} & $t_2>0, r_2<0$ \\ \refrownumber{row:Case37} & \makecell[cl]{$r_1,\frac{r_3}{2}-r_4,2 r_3+r_5,t_1,\lambda =0$} &\makecell[cl]{$r_2,2 r_3-r_4,t_2,t_3$} & $t_2>0, r_2<0$ \\ \refrownumber{row:Case38} & \makecell[cl]{$r_1,2 r_3-r_4,2 r_3+r_5,t_3,\lambda =0$} &\makecell[cl]{$r_2,r_1-r_3,t_1,t_2,t_1+t_2$} & $t_2>0, r_2<0$ \\ \refrownumber{row:Case39} & \makecell[cl]{$r_1,2 r_3-r_4,2 r_3+r_5,t_1+t_2,t_3,\lambda =0$} &\makecell[cl]{$r_2,r_1-r_3,t_1,t_2$} & $r_2<0, t_1<0$ \\ \refrownumber{row:Case40} & \makecell[cl]{$r_1,r_4+r_5,t_1,t_3,\lambda =0$} &\makecell[cl]{$r_2,r_3-2 r_4,2 r_3-r_4,t_2$} & $t_2>0, r_2<0$ \\ \refrownumber{row:Case41} & \makecell[cl]{$r_1,\frac{r_3}{2}-r_4,\frac{r_3}{2}+r_5,t_1,\lambda =0$} &\makecell[cl]{$r_2,2 r_3-r_4,t_2,t_3$} & $t_2>0, r_2<0$ \\ \refrownumber{row:Case42} & \makecell[cl]{$r_1,r_3,r_4,t_1+t_2,\lambda =0$} &\makecell[cl]{$r_2,r_1+r_5,2 r_1+r_5,t_1,t_2,t_3,$\\$t_1+t_3$} & \makecell[cl]{$t_3>0, r_2<0, r_5<0,$\\$ t_1<0, t_1+t_3<0$} \\ \refrownumber{row:Case43} & \makecell[cl]{$r_1,r_3,r_4,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_2,r_1+r_5,2 r_1+r_5,t_1,t_2,$\\$t_1+t_2,t_3$} & \makecell[cl]{$r_5>0, t_2>0, t_1+t_2>0,$\\$ r_2<0, t_1<0$} \\ \refrownumber{row:Case44} & \makecell[cl]{$r_2,r_1-r_3,r_4,t_1+t_2,\lambda =0$} &\makecell[cl]{$r_1,r_1+r_5,2 r_1+r_5,t_1,t_2,t_3,$\\$t_1+t_3$} & \makecell[cl]{$t_1>0, r_1<0, r_1+r_5<0,$\\$ t_3 \left(t_1+t_3\right)>0$} \\ \refrownumber{row:Case45} & \makecell[cl]{$r_2,r_1-r_3,r_4,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_1,r_1+r_5,2 r_1+r_5,t_1,t_2,$\\$t_1+t_2,t_3$} & \makecell[cl]{$r_5>0, 2 r_1+r_5>0, t_1>0,$\\$ t_1+t_2>0, r_1<0, t_2<0$} \\ \refrownumber{row:Case46} & \makecell[cl]{$r_1-r_3,r_4,2 r_1+r_5,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_1,r_2,r_1+r_5,t_1,t_2,t_1+t_2,t_3$} & $t_1>0, t_2>0, r_1<0, r_2<0$ \\ \refrownumber{row:Case47} & \makecell[cl]{$r_1,r_2,r_3,r_4,t_1+t_2,\lambda =0$} &\makecell[cl]{$r_1+r_5,2 r_1+r_5,t_1,t_2,t_3,t_1+t_3$} & $r_5<0, t_1 t_3 \left(t_1+t_3\right)>0$ \\ \refrownumber{row:Case48} & \makecell[cl]{$r_1,r_2,r_3,r_4,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_1+r_5,2 r_1+r_5,t_1,t_2,t_1+t_2,t_3$} & $r_5>0, t_1 t_2 \left(t_1+t_2\right)<0$ \\ \refrownumber{row:Case49} & \makecell[cl]{$r_1,r_3,r_4,t_1+t_2,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_2,r_1+r_5,2 r_1+r_5,t_1,t_2,t_3$} & $r_2<0, t_1<0$ \\ \refrownumber{row:Case50} & \makecell[cl]{$r_2,r_1-r_3,r_4,r_1+r_5,t_1+t_2,\lambda =0$} &\makecell[cl]{$r_1,2 r_1+r_5,t_1,t_2,t_3,t_1+t_3$} & $t_1>0, r_1<0$ \\ \refrownumber{row:Case51} & \makecell[cl]{$r_2,r_1-r_3,r_4,2 r_1+r_5,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_1,r_1+r_5,t_1,t_2,t_1+t_2,t_3$} & $t_1>0, r_1<0$ \\ \refrownumber{row:Case52} & \makecell[cl]{$r_2,r_1-r_3,r_4,t_1+t_2,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_1,r_1+r_5,2 r_1+r_5,t_1,t_2,t_3$} & $t_1>0, r_1<0$ \\ \refrownumber{row:Case53} & \makecell[cl]{$r_2,r_1-r_3,r_4,r_1+r_5,t_1+t_2,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_1,2 r_1+r_5,t_1,t_2,t_3$} & $t_1>0, r_1<0$ \\ \refrownumber{row:Case54} & \makecell[cl]{$r_2,r_1-r_3,r_4,2 r_1+r_5,t_1+t_2,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_1,r_1+r_5,t_1,t_2,t_3$} & $t_1>0, r_1<0$ \\ \refrownumber{row:Case55} & \makecell[cl]{$r_2,r_1-r_3,r_4,r_1+r_5,t_1+t_2,t_3,\lambda =0$} &\makecell[cl]{$r_1,t_1,t_2$} & $t_1>0, r_1<0$ \\ \refrownumber{row:Case56} & \makecell[cl]{$r_2,r_1-r_3,r_4,2 r_1+r_5,t_2,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_1,t_1,t_3$} & $t_1>0, r_1<0$ \\ \refrownumber{row:Case57} & \makecell[cl]{$r_1-r_3,r_4,2 r_1+r_5,t_2,t_1+t_3,\lambda =0$} &\makecell[cl]{$r_1,r_2,t_1,t_3$} & $t_1>0, r_1<0$ \\ \refrownumber{row:Case58} & \makecell[cl]{$r_2,2 r_1-2 r_3+r_4,r_1-2 r_3-r_5,$\\$t_1+t_2,t_3,\lambda =0$} &\makecell[cl]{$r_1,r_1-r_3,t_1,t_2$} & $t_1>0, r_1<0$ \\ \end{longtable*} \egroup \setcounter{magicrownumbers}{\value{tmp}} \bgroup \def\arraystretch{1.5} \setlength\tabcolsep{0.11cm} \begin{longtable*}[e]{@{\extracolsep{\fill}}rHcl} \caption{Particle content of the PC renormalizable critical cases that are ghost and tachyon free and have only massive propagating modes. The cases found previously in \cite{Lin2019a} are indicated with an asterisk followed by its original numbering. Note that there are typos of the $b$ sectors of Cases~\ref{row:Case30} and~\ref{row:Case31} (old numbers 7 and 8) in \cite{Lin2019a}.} \label{tab:PGTUnitaryAndPCMv}\\ \toprule \#&Critical Condition&\makecell[cl]{Massive \\mode}&$b$ sectors\\* \colrule \noalign{\vspace{3pt} \endfirsthead \multicolumn{4}{l}{TABLE~\ref{tab:PGTUnitaryAndPCMv} (continued) \rule{0pt}{12pt}\\ \noalign{\vspace{1.5pt}} \colrule\rule{0pt}{12pt} \#&Critical Condition&\makecell[cl]{Massive \\mode}&$b$ sectors\\* \colrule \noalign{\vspace{3pt} \endhead \noalign{\nobreak\vspace{3pt} \colrule \endfoot \noalign{\nobreak\vspace{3pt} \botrule \endlastfoot \rownumber & \makecell[cl]{$r_1=r_3=r_4=r_5=\lambda =0$} & $0^-$& \makecell[cl]{$\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{0}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{0}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{}}^{0}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{0}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{0}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$} \\ \rownumber & \makecell[cl]{$r_1=r_3=r_4=r_5=t_1+t_2=\lambda =0$} &$0^-$& \makecell[cl]{$\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{0}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{0}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{}}^{0}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$} \\ \rownumber & \makecell[cl]{$r_1=r_3=r_4=r_5=t_1+t_3=\lambda =0$} &$0^-$& \makecell[cl]{$\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{0}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{0}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$} \\ \rownumber & \makecell[cl]{$r_1=r_3=r_4=r_5=t_1+t_2=t_1+t_3=\lambda =0$} &$0^-$& \makecell[cl]{$\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$} \\ \rownumber & \makecell[cl]{$r_1=r_3=r_4=t_1=\lambda =0$} &$0^-$& \makecell[cl]{$\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\times,\times\right\}$} \\ $^{\ast 5}$\rownumber & \makecell[cl]{$r_1=r_3=r_4=r_5=t_1=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}|\mathfrak{a}\text{}_{\text{l}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{a}\text{}_{\text{l}}^{2},\times,\times\right\}$ \\ $^{\ast 6}$\rownumber & \makecell[cl]{$r_1=r_3=r_4=r_5=t_1=t_3=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},\times,\times,A\text{}_{\text{}}^{0}|\mathfrak{a}\text{}_{\text{l}}^{2},\times,\times\right\}$ \\ \rownumber & \makecell[cl]{$r_1=\frac{r_3}{2}-r_4=\frac{r_3}{2}+r_5=t_1=t_3=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},\times,\times,\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\times,A\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1=r_3=r_4=t_1=t_3=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},\times,A\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\times,\times\right\}$ \\ \rownumber & \makecell[cl]{$r_1-r_3=r_4=2 r_1+r_5=t_1=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{}}^{0}|\mathfrak{a}\text{}_{\text{l}}^{2},A\text{}_{\text{l}}^{2},\times\right\}$ \\ $^{\ast 7}$\rownumber & \makecell[cl]{$r_1-r_3=r_4=2 r_1+r_5=t_1=t_3=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},\times,A\text{}_{\text{l}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{a}\text{}_{\text{l}}^{2},A\text{}_{\text{l}}^{2},\times\right\}$ \\ $^{\ast 8}$\rownumber & \makecell[cl]{$r_1=2 r_3-r_4=2 r_3+r_5=t_1=t_3=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{l}}^{2},\times,A\text{}_{\text{}}^{0}|\mathfrak{a}\text{}_{\text{l}}^{2},\times,\times\right\}$ \\ \rownumber & \makecell[cl]{$r_1=r_3=r_4=r_5=t_3=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},\times,A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}|\mathfrak{a}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{0}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{0}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1=r_3=r_4=r_5=t_1+t_2=t_3=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},\times,A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}|\mathfrak{a}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1=2 r_3-r_4=t_1=t_3=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{l}}^{2},A\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\times,\times\right\}$ \\ $^{\ast 9}$\rownumber & \makecell[cl]{$r_1=\frac{r_3}{2}-r_4=2 r_3+r_5=t_1=t_3=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},\times,A\text{}_{\text{l}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{a}\text{}_{\text{l}}^{2},\times,A\text{}_{\text{l}}^{2}\right\}$ \\ $^{\ast 10}$\rownumber & \makecell[cl]{$2 r_1-2 r_3+r_4=2 r_3+r_5=t_1=t_3=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{l}}^{2},A\text{}_{\text{l}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{a}\text{}_{\text{l}}^{2},A\text{}_{\text{l}}^{2},\times\right\}$ \\ \rownumber & \makecell[cl]{$r_1=\frac{r_3}{2}-r_4=2 r_3+r_5=t_1=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{}}^{0}|\mathfrak{a}\text{}_{\text{l}}^{2},\times,A\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1=2 r_3-r_4=2 r_3+r_5=t_3=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{l}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}|\mathfrak{a}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{0}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{0}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1=2 r_3-r_4=2 r_3+r_5=t_1+t_2=t_3=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{l}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}|\mathfrak{a}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1=r_4+r_5=t_1=t_3=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{l}}^{2},\times,\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\times,A\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1=\frac{r_3}{2}-r_4=\frac{r_3}{2}+r_5=t_1=\lambda =0$} &$0^-$& $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}|\mathfrak{a}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{l}}^{2}\&A\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{l}}^{2}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\times,A\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1,r_3,r_4,t_1+t_2,\lambda =0$} &$0^-,1^-$ & $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{v}}^{2}\&A\text{}_{\text{v}}^{0}\right)^\text{N}|\left(A\text{}_{\text{v}}^{2}\&\mathfrak{s}\text{}_{\text{vl}}^{2}\right)^\text{N}|\left(A\text{}_{\text{v}}^{2}\&\mathfrak{a}\text{}_{\text{vl}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1,r_3,r_4,t_1+t_3,\lambda =0$} &$0^-,1^+$ & $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},\left(A\text{}_{\text{v}}^{2}\&A\text{}_{\text{v}}^{0}\right)^\text{N}|\left(A\text{}_{\text{v}}^{2}\&\mathfrak{a}\text{}_{\text{vl}}^{2}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_2,r_1-r_3,r_4,t_1+t_2,\lambda =0$} &$1^-,2^-$ & $\left\{A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{v}}^{2}\&A\text{}_{\text{v}}^{0}\right)^\text{N}|\left(A\text{}_{\text{v}}^{2}\&\mathfrak{s}\text{}_{\text{vl}}^{2}\right)^\text{N}|\left(A\text{}_{\text{v}}^{2}\&\mathfrak{a}\text{}_{\text{vl}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_2,r_1-r_3,r_4,t_1+t_3,\lambda =0$} &$1^+,2^-$ & $\left\{A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},\left(A\text{}_{\text{v}}^{2}\&A\text{}_{\text{v}}^{0}\right)^\text{N}|\left(A\text{}_{\text{v}}^{2}\&\mathfrak{a}\text{}_{\text{vl}}^{2}\right)^\text{N},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1-r_3,r_4,2 r_1+r_5,t_1+t_3,\lambda =0$} &$0^-,2^-$ & $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},\left(A\text{}_{\text{}}^{0}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{0}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1,r_2,r_3,r_4,t_1+t_2,\lambda =0$} &$1^-$ & $\left\{A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{v}}^{2}\&A\text{}_{\text{v}}^{0}\right)^\text{N}|\left(A\text{}_{\text{v}}^{2}\&\mathfrak{s}\text{}_{\text{vl}}^{2}\right)^\text{N}|\left(A\text{}_{\text{v}}^{2}\&\mathfrak{a}\text{}_{\text{vl}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1,r_2,r_3,r_4,t_1+t_3,\lambda =0$} &$1^+$ & $\left\{A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},\left(A\text{}_{\text{v}}^{2}\&A\text{}_{\text{v}}^{0}\right)^\text{N}|\left(A\text{}_{\text{v}}^{2}\&\mathfrak{a}\text{}_{\text{vl}}^{2}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1,r_3,r_4,t_1+t_2,t_1+t_3,\lambda =0$} &$0^-$ & $\left\{A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_2,r_1-r_3,r_4,r_1+r_5,t_1+t_2,\lambda =0$} &$2^-$ & $\left\{A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{0}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{0}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{}}^{0}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_2,r_1-r_3,r_4,2 r_1+r_5,t_1+t_3,\lambda =0$} &$2^-$ & $\left\{A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},\left(A\text{}_{\text{}}^{0}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{0}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_2,r_1-r_3,r_4,t_1+t_2,t_1+t_3,\lambda =0$} &$2^-$ & $\left\{A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_2,r_1-r_3,r_4,r_1+r_5,t_1+t_2,t_1+t_3,\lambda =0$} &$2^-$ & $\left\{A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_2,r_1-r_3,r_4,2 r_1+r_5,t_1+t_2,t_1+t_3,\lambda =0$} &$2^-$ & $\left\{A\text{}_{\text{}}^{0},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{2}\right)^\text{N},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_2,r_1-r_3,r_4,r_1+r_5,t_1+t_2,t_3,\lambda =0$} &$2^-$ & $\left\{A\text{}_{\text{}}^{0},\times,A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}|\mathfrak{a}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_2,r_1-r_3,r_4,2 r_1+r_5,t_2,t_1+t_3,\lambda =0$} &$2^-$ & $\left\{\times,A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{}}^{0}|\mathfrak{a}\text{}_{\text{l}}^{2},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_1-r_3,r_4,2 r_1+r_5,t_2,t_1+t_3,\lambda =0$} &$2^-$ & $\left\{A\text{}_{\text{l}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{s}\text{}_{\text{l}}^{0}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{}}^{0}|\mathfrak{a}\text{}_{\text{l}}^{2},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \rownumber & \makecell[cl]{$r_2,2 r_1-2 r_3+r_4,r_1-2 r_3-r_5,t_1+t_2,t_3,\lambda =0$} &$2^-$ & $\left\{A\text{}_{\text{}}^{0},A\text{}_{\text{l}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}|\mathfrak{a}\text{}_{\text{l}}^{2},\left(A\text{}_{\text{}}^{\infty}\&A\text{}_{\text{}}^{-2}\right)^\text{N}|\left(A\text{}_{\text{}}^{\infty}\&\mathfrak{a}\text{}_{\text{l}}^{0}\right)^\text{N},A\text{}_{\text{v}}^{2},A\text{}_{\text{}}^{0}|\mathfrak{s}\text{}_{\text{l}}^{2}\right\}$ \\ \end{longtable*} \egroup \FloatBarrier \bibliographystyle{apsrev4-2}
1,108,101,565,836
arxiv
\section{Other Loose Ends} \label{sec:other-loose-ends} \subsection{Conjugates over the Positive Orthant} \label{sec:conj-positive} The following lemma justifies why it is enough to consider only non-negative $\lambda$s for our setting. \begin{lemma}\label{lem:pos-lambda} Given a convex, non-decreasing, non-negative function $g: \R^d_+ \to \R_+$, $\forall z \in \R^d_+$, we have $g(z) = \max_{\lambda \in \R^d} (\ip{\lambda,z} - g^\star(\lambda)) = \max_{\lambda \in \R^d_+} (\ip{\lambda,z} - g^\star(\lambda))$. \end{lemma} \begin{proof} Let $\widehat{g}$ be a function that is same as $g$ on positive orthant and is $\infty$ everywhere else. Then for $\lambda\in \R^d$, \[ \widehat{g}^\star (\lambda) = \sup_{z \in \R^d} \{\ip{\lambda,z} - \widehat{g}(z)\} = \sup_{z\geq0} \{\ip{\lambda,z} - \widehat{g}(z)\} = g^\star(\max(\lambda,0)). \] Here, the first equality is by definition. The second is because $\widehat{g}(z) = \infty$ if $z$ is not non-negative. The third is because if some coordinate of $\lambda$ is negative, zeroing out the corresponding coordinate of $z$ increases $\ip{\lambda, z}$ and decreases $\widehat{g}(z)$, because $\widehat{g}$ is non-decreasing in positive orthant. Here vector $\max(\lambda,0)$ is the coordinate-wise maximum. Now for any $z\in \R^d_+$, $g(z) = \widehat{g}(z) = \max_{\lambda \in \R^d} (\ip{\lambda,z} - \widehat{g}^\star(\lambda)) = \max_{\lambda \in \R^d} (\ip{\lambda,z} - g^\star(\max(\lambda,0))) = \max_{\lambda \in \R_+^d} (\ip{\lambda,z} - g^\star(\lambda))$. \end{proof} \subsection{Removing Assumptions on the Elements} \label{sec:except} Let $\ensuremath{\mathsf{opt}\xspace}$ denote the profit of the optimal integer solution to the problem~(\ref{eq:2}). To discharge the conditions on elements in Assumption~\ref{asm:nice} we show the following reduction. \begin{lemma} \label{lem:reduce-nice} Suppose ${\mathscr{A}}$ is algorithm that works for instances that have no exceptional items, where each item has profit $\ensuremath{\pi}(e) \leq \ensuremath{\mathsf{opt}\xspace}/\eta d$, and where items are in general position, that guarantees a profit of $\ensuremath{\mathsf{opt}\xspace}/\beta$. Then we can get another algorithm that requires none of these assumptions, and guarantees a profit of $\frac{\ensuremath{\mathsf{opt}\xspace}}{O(\beta + \eta d)}$. \end{lemma} \begin{proof} The general position argument is simplest: we essentially need that for some fixed $\lambda \in \CC$, there is at most one element such that $v(e) = \ip{\lambda, s(e)}$. This can be achieved by subtracting from each $v(e)$ some random noise picked uniformly from the interval $[0,(\delta/n) \ensuremath{\pi}(e)]$ for some tiny $\delta$; this can change the optimal profit most by a $(1-\delta)$-factor. Recall that item $e$ is called exceptional if $\arg\max_{\theta \in [0,1]}\big\{ \theta\, v(e) - g(\theta\, s(e)) \}$ is achieved at $\theta \in (0,1)$: i.e., it is optimal to take a fraction of the item. E.g., in the $1$-dimensional case, $v(e) = s(e) = 1$, and $g(s) = 0.99s^2$. The following claim is a minor variation of~\cite[Lemma~5.1]{BUCM}: \begin{claim}[Few Exceptional Items] \label{clm:few-except} If $g$ is supermodular, then any optimal solution contains at most $d$ exceptional items. \end{claim} \begin{proof} Fix an optimal solution $O^*$, and for each $i = 1,\ldots, d$, let $o_i := \arg\max_{o \in O^*} s(o)_i$ be an item for which the $i^{th}$-coordinate of the size vector is maximized. Let $L$ denote the set of these ``large'' items. If $O^*$ contains strictly more than $d$ exceptional items, let $o^* \in O^*$ be any exceptional item not in $L$, and let $x' := \chi_{O^* \setminus \{o^*\}}$ be the characteristic vector for the elements in the optimal set without $o^*$. By construction, $s(o^*) \leq Sx'$ component-wise. Since $o^*$ is exceptional, $v(o^*) < \ip{ \nabla g(s(o^*)), s(o^*) }$; moreover, the latter is at most $\ip{ \nabla g(Sx'), s(o^*) }$ due to $g$ having monotone gradients. But this implies that dropping $o^*$ would increase the profit, which contradicts our choice of $O^*$. \end{proof} Moreover, since the profit function is subadditive, there can be at most $\eta d$ high-valued items. Now the reduction procedure: with probability $\frac12$ run the single-item secretary problem (with competitive ratio $1/\mathrm{e}$), and with the remaining probability run algorithm ${\mathscr{A}}$. If the instance has a high-valued item then we get expected value at least $(1/\mathrm{e}) \cdot \ensuremath{\mathsf{opt}\xspace}/(\eta d)$. If not, divide the optimal solution $x^*$ into the solution restricted to the non-exceptional items $x^1$, and to the (at most $d$) exceptional items $x^2$. By the subadditivity of the profit, $\ensuremath{\pi}(x^*) \leq \ensuremath{\pi}(x^1) + \ensuremath{\pi}(x^2)$. Again the secretary algorithm gives a $1/(d\mathrm{e})$-approximation for the profit $\ensuremath{\pi}(x^2)$, so it suffices to get a good approximation for the non-exceptional items. \end{proof} \subsection{Probabilistic inequalities} \begin{fact}\label{fact:expectation} Consider a vector $x \in \{0,1\}^n$ and let $\mathbf{X}$ be the random vector obtained by setting each coordinate of $x$ to $0$ with probability $1/2$. If cost function $g$ is supermodular then \begin{align*} {\mathbb{E}}[\ensuremath{\pi}(\mathbf{X})] \ge \frac{1}{2}\, \ensuremath{\pi}(x). \end{align*} \end{fact} \begin{proof Function $g$ is superadditive due to Lemma \ref{lem:superadd2}, and so $\pi$ is subadditive: $\ensuremath{\pi}(y + z) \le \ensuremath{\pi}(y) + \ensuremath{\pi}(z)$. Writing $x = \mathbf{X} + (x-\mathbf{X})$ and applying subadditivity, we get \begin{align*} \ensuremath{\pi}(x) \le \ensuremath{\pi}(\mathbf{X}) + \ensuremath{\pi}(x - \mathbf{X}). \end{align*} But $\mathbf{X}$ and $x - \mathbf{X}$ have the same distribution, so taking expectations gives $2 {\mathbb{E}}[\ensuremath{\pi}(\mathbf{X})] \ge \ensuremath{\pi}(x)$. \end{proof} \begin{fact} \label{fact:submod-conc} Consider a submodular function $f : 2^\mathcal{U} \rightarrow \R$. Consider a set $Y \subseteq \mathcal{U}$ such that $f$ is non-negative over all of its subsets and we also have the following Lipschitz condition for some $M$: \begin{gather}\label{eq:lip} \textrm{For all $Y' \subseteq Y$ and element $e \in Y'$,~~~} |f(Y') - f(Y' - e)| \le M. \end{gather} Let $\mathbf{Y}$ be the random subset obtained from picking each element from $Y$ independently with some probability (which can be different for each item). Then % \begin{gather}\label{eq:conc2} \Pr(|f(\mathbf{Y}) - {\mathbb{E}}[f(\mathbf{Y})]| \ge t) \le \frac{2M\, {\mathbb{E}}[f(\mathbf{Y})]}{t^2} \end{gather} \end{fact} \begin{proof} Vondr\'ak showed that $M$-Lipschitz non-negative submodular functions are \emph{weakly (2M,0)-self-bounding}~\cite{Von}. By the Efron-Stein inequality, such functions have $\text{Var}(f(\mathbf{Y})) \leq 2M\, {\mathbb{E}}[f(\mathbf{Y})]$~\cite{concentration}. Now Chebychev's inequality gives the result. \end{proof} \section{The Offline Constrained Case} \label{sec:constr} Having built up tools and intuition in the unconstrained case, we turn to the case where there is a downwards-closed constraint ${\mathscr{F}} \subseteq \{0,1\}^n$, and the goal is to maximize the profit subject to $x \in {\mathscr{F}}$. We again work with Assumption~\ref{asm:nice}, but do not assume anything about items sizes. We discuss computational aspects at the end of this section. The general idea is again to use classifiers $\lambda \in \R^d_+$, and only consider items in $U_{\lambda}$, namely those with ``high-enough'' value $v_i \ge \ip{\lambda, S^i}$. However, because of the constraints ${\mathscr{F}}$ we may no longer be able to pick all these items. Thus, we need to consider the most profitable solution from ${\mathscr{F}}$ in this filtered feasible set $U_{\lambda}$ (whose quality is less clear how to analyze). Again we restrict to the 1-dimensional curve $\mathcal{C}$ defined in the previous section; however, it only satisfies slightly modified versions of properties (P1)-(P2), since we do not assume the item sizes to be infinitesimal anymore. To make this precise, define the ``open'' set $U^{\degree}_\lambda := \{ e \in U \mid v(e) > \ip{ \lambda, s(e) } \}$; note the strict inequality. Under the assumption of items being in general position, there is at most one ``threshold'' item with $v_i = \ip{\lambda, S^i}$, i.e., $|U_{\lambda} \setminus U^{\degree}_\lambda| \leq 1$. Now a ``good'' classifier is one that satisfies the following: \begin{enumerate} \item[(P1')] For all binary $x$ with $\textrm{support}(x) \subseteq U^{\degree}_\lambda$ and $x \in {\mathscr{F}}$, $\nabla g(Sx) \leq \lambda$. \item[(P2')] There exists a binary $x^{occ}$ with $\textrm{support}(x^{occ}) \subseteq U_{\lambda}$ and $x^{occ} \in {\mathscr{F}}$, and index $i^*$ such that $(\nabla g(Sx^{occ}))_{i^*} \geq \lambda_{i^*}.$ (Note that if $\textrm{support}(x^{occ}) \subseteq U^{\degree}_\lambda$, then by property~(P1') the above inequality holds at equality; else $x^{occ}$ contains the unique element in $U_{\lambda} \setminus U^{\degree}_{\lambda}$.) \item[(P3')] This is the same as before: $g^\star_{i}(\lambda_{i}) = g^\star_{j}(\lambda_{j}) ~~~\forall i, j \in [d]$. \end{enumerate} \newcommand{\ensuremath{x^{lin}}}{\ensuremath{x^{lin}}} The arguments of Lemma~\ref{lemma:existC} show the following. \begin{lemma} Given Assumption \ref{asm:nice}, the curve $\mathcal{C}$ defined in the previous section contains a $\lambda$ satisfying properties (P1')-(P3'). \end{lemma} Next, we show that for a good classifier $\lambda \in \mathcal{C}$, the maximum profit solution from ${\mathscr{F}}$ contained within $U^{\degree}_{\lambda}$ essentially gives an $O(\nicefrac{1}{d})$-approximation. \begin{theorem}[Offline Approach] \label{thm:constr-main} Suppose Assumption \ref{asm:nice} holds. Let $\lambda^*$ be a classifier in $\mathcal{C}$ satisfying properties~(P1')--(P3'). Then the better of the two solutions: (a)~the maximum profit solution in ${\mathscr{F}}$ containing elements only from $U^{\degree}_{\lambda^*}$, and (b)~the optimal single element in $U_{\lambda^*}$, has profit at least $\pi(x')/(2d+1)$ for any vector $x' \in \operatorname{Conv}({\mathscr{F}}) \subseteq [0,1]^n$. \end{theorem} \begin{proof} The idea is to follow the development in Theorem \ref{thm:unc}. There same solution $x^*$ satisfied the value lower bounds of Lemmas \ref{lemma:linGap} and \ref{lemma:profitDual}; to satisfy the first lemma, we needed the solution to be optimal for the linearization of $\pi$ using ``slope'' $\lambda^*$; to satisfy the second, we needed to satisfy (P2). Here, we construct two solutions in ${\mathscr{F}}$ intersect $U_{\lambda^*}$ to satisfy these lemmas separately: \ignore{To motivate these solutions, we go back to the analysis for the unconstrained case in Theorem~\ref{thm:unc}. There, we showed that solution $x^{unc} = \chi_{U_{\lambda^*}}$ had large profit using the following properties: \begin{enumerate} \item[(i)] $x^{unc}$ was the \emph{maximum profit solution for the linearized objective} $L(\cdot, \lambda^*) = \ip{v - S^\intercal \lambda^*,\cdot} + g^\star(\lambda^*)$, where the cost $g$ was linearized with the good classifier $\lambda^*$. This allowed us to use the dual $\lambda^*$ to lower bound the profit of the solution up to the linearization gap (Lemma~\ref{lemma:linGap}). \item[(ii)] $x^{unc}$ had \emph{large occupation} in the coordinate $i^*$: that is, $\nabla g(Sx^{unc})_{i^*} = \lambda^*_{i^*}$. This allowed us to pay for the linearization gap (Lemma~\ref{lemma:profitDual}). \end{enumerate} Without constraints, both optimality and occupation were achieved simultaneously by picking all items in $U_{\lambda^*}$. However, having constraints ${\mathscr{F}}$ means we now use a separate solution for each.} \begin{alignat*}{2} \ensuremath{x^{lin}} &:= \textrm{argmax}\{ \ip{v,y} - \ip{\lambda^*,Sy} \mid y \subseteq U^{\degree}_{\lambda^*}, y \in {\mathscr{F}}\}\\ x^{occ} &:= \textrm{the solution promised by property~(P2')}. \end{alignat*} Since property~(P1') and~(P3') holds for $\ensuremath{x^{lin}}$, Lemmas~\ref{lemma:linGap} and~\ref{lemma:gsi} hold essentially unchanged, and thus for any vector $x' \in \operatorname{Conv}({\mathscr{F}})$ we have % \begin{align} \ensuremath{\pi}(\ensuremath{x^{lin}}) \ge \ensuremath{\pi}(x') - d \cdot g^\star_{i^*}(\lambda^*_{i^*}). \label{eq:4} \end{align The solution $x^{occ}$ may not belong to the set $U^{\degree}_{\lambda^*}$, since it may contain the threshold item $e^{\circ} = \ip{\lambda^*, s(e^{\circ})}$, if it exists (let $x^\circ = \chi_{\{e^\circ\}}$ be its characteristic vector, all 0's vector if does not exists). Let $x^{rest} = x^{occ} - x^\circ$. \begin{lemma} \label{lem:rest} These solutions satisfy $\ensuremath{\pi}(x^{rest}) + \ensuremath{\pi}(x^{\circ}) \geq g^\star_{i^*}(\lambda^*_{i^*}).$ \end{lemma} \begin{subproof} Property~(P1') gives $\nabla g(Sx^{rest}) \leq \lambda^*$, and Property~(P2') implies $\nabla g(S(x^{rest} + x^{\circ})) = \nabla g(Sx^{occ})$ is at least $\lambda^*$ at some coordinate $i^*$. Since $g$ is convex and differentiable, the gradients are continuous~\cite[Remark~D.6.2.6]{HUL}, so there is $\delta \in [0,1]$ where the vector $\widehat{x} := x^{rest} + \delta x^\circ$ satisfies $\nabla g(S\widehat{x}) \le \lambda^*$ and $\nabla g(S\widehat{x})_{i^*} = \lambda^*_{i^*}$ for some coordinate $i^*$. Due to these properties, the proof of Lemma~\ref{lemma:profitDual} holds for $\widehat{x}$ and shows $\ensuremath{\pi}(\widehat{x}) \ge g^\star_{i^*}(\lambda^*_{i^*})$. The assumption of no exceptional items gives $\ensuremath{\pi}(\delta x^\circ) \leq \ensuremath{\pi}(x^\circ)$. From subadditivity of profit~$\ensuremath{\pi}$, % $g^\star_{i^*}(\lambda^*_{i^*}) \le \ensuremath{\pi}(\widehat{x}) \le \pi(x^{rest}) + \pi(\delta x^{\circ}) \le \pi(x^{rest}) + \pi(x^{\circ}).$ This concludes the proof. \end{subproof} Combining Lemma~\ref{lem:rest} with inequality~\eqref{eq:4}, for any $x' \in {\mathscr{F}}$ we have \begin{gather*} \pi(x') \leq \ensuremath{\pi}(\ensuremath{x^{lin}}) + d\,\ensuremath{\pi}(x^{rest}) + d\,\ensuremath{\pi}(x^\circ). \end{gather*} Since $\ensuremath{x^{lin}}, x^{rest}$ are feasible for problem~(a) in the theorem statement, and $x^\circ$ is feasible for problem~(b), the best of them gives a $(2d+1)$-approximation. This proves Theorem~\ref{thm:constr-main}. \end{proof} Picking the most profitable singleton is trivial offline, and well-approximable online by the secretary algorithm~\cite{freeman-secretary}. Moreover, we need to approximately optimize the \emph{submodular} function $\ensuremath{\pi}$ (Fact~\ref{fct:supermod}) over ${\mathscr{F}}|_{U^{\degree}_{\lambda^*}}$ (i.e., the sets in ${\mathscr{F}}$ with only elements of $U^{\degree}_{\lambda^*}$). For several constraint structures (e.g., matroids, $p$-systems), there are known algorithms for approximately optimizing \emph{non-negative} (and sometimes also monotone) submodular functions. Unfortunately, our profit function $\ensuremath{\pi}$ may take negative values, so we cannot directly use these algorithms. Simply considering the truncated function $\max\{\pi(z), 0\}$ does not work because it may be non-submodular. In the next section, when $g$ is \emph{separable}, we introduce a way of making our profit function non-negative everywhere, while maintaining submodularity and preserving the values at the region of interest ${\mathscr{F}}|_{U^{\degree}_{\lambda^*}}$. \subsection{Making the Profit Function $\ensuremath{\pi}$ Non-negative} \label{sec:monoton} We first show that $\ensuremath{\pi}$ already satisfies the desired properties over the sets in ${\mathscr{F}}|_{U^{\degree}_{\lambda^*}}$. \begin{lemma} \label{lemma:mono-feas} The profit function $\ensuremath{\pi}$ is non-negative monotone over ${\mathscr{F}}|_{U^{\degree}_{\lambda^*}}$. \end{lemma} \begin{proof} Since $\ensuremath{\pi}(\emptyset) = 0$ it suffices to show monotonicity. Consider $x \in {\mathscr{F}}|_{U^{\degree}_{\lambda^*}}$ and let $\chi_e$ be the indicator os an item in $x$. Comparing the costs with and without $e$ we have \begin{align*} g(Sx) \stackrel{\text{(convexity)}}{\leq} g(S(x-\chi_e)) + \ip{ \nabla g(Sx), S\chi_e } \stackrel{\text{(Property~(P1'))}}{\leq} g(S(x - \chi_e)) + \ip{ \lambda^*, s(e)}. \end{align*} Since $x \in U^{\degree}_{\lambda^*}$, we have $v(e) > \ip{\lambda^*, s(e)}$ and thus $\ensuremath{\pi}(x) > \ensuremath{\pi}(x - \chi_e)$, i.e., monotonicity. \end{proof} However, to run algorithms that approximately optimize $\ensuremath{\pi}$ over ${\mathscr{F}}|_{U^{\degree}_{\lambda^*}}$ in a black-box fashion, non-negativity over the feasible sets ${\mathscr{F}}|_{U^{\degree}_{\lambda^*}}$ is not enough, even if the algorithm only probes $\ensuremath{\pi}$ over these sets, since their \emph{proof of correctness} may require this property outside of feasible sets. Thus, we need to modify $\ensuremath{\pi}$ to ensure non-negativity outside of ${\mathscr{F}}|_{U^{\degree}_{\lambda^*}}$. For that, the idea is to truncate the gradient of the cost $g$ so $\nabla g(Sx)$ becomes at most $\lambda^*$ for all subsets $x \subseteq U^{\degree}_{\lambda^*}$ (i.e., so Property~(P1') holds for all subsets); this was the crucial element for the monotonicity (and hence non-negativity) proof above. Notice that since Property~(P1') guarantees already $\nabla g(Sx) \le \lambda^*$ for all $x \in {\mathscr{F}}|_{U^{\degree}_{\lambda^*}}$, this does not change the value of $\ensuremath{\pi}$ over these points. The proof of the lemma is given in Appendix \ref{app:proofs}. \begin{lemma}\label{lemma:piplus} If $g$ is separable, there is a submodular function $\ensuremath{\pi}^+$ satisfying the following: \begin{OneLiners} \item[i.] $\ensuremath{\pi}^+$ is non-negative and monotone over all subsets of $U^{\degree}_{\lambda^*}$, and \item[ii.] $\ensuremath{\pi}^+(x) = \ensuremath{\pi}(x)$ for every $x \in {\mathscr{F}}|_{U^{\degree}_{\lambda^*}}$. \end{OneLiners} \end{lemma} \subsection{The Offline Algorithm: Wrap-up} \label{sec:off-endgame} Using this non-negativization procedure, we get an $O(d)$-approximation \emph{offline} algorithm for constrained profit maximization for \emph{separable} cost functions $g$; this is an offline analog of Theorem~\ref{thm:main2}. For the unconstrained case, Lemma~\ref{lemma:mono-feas} implies that the profit function $\ensuremath{\pi}$ it itself monotone, so we get an $O(d)$-approximation offline algorithm for the \emph{supermodular} case. In the next section we show how to convert these algorithms into online algorithms. One issue we have not discussed is the computational cost of finding $\lambda^*$ satisfying (P1')--(P3'). In the full version of the paper, we show that for any $\varepsilon > 0$ we can efficiently find a $\lambda^*$ satisfying (P1'), (P2'), and a slightly weaker condition: $| g^\star_{i}(\lambda^*_{i}) - g^\star_{j}(\lambda^*_{j})| \leq 2\varepsilon$ for all $i, j \in [d]$. Using this condition in Theorem~\ref{thm:constr-main} means we get a profit of at least $\frac{\ensuremath{\mathsf{opt}\xspace} - 2d\varepsilon}{2d+1} \geq [\nicefrac{\ensuremath{\mathsf{opt}\xspace}}{(2d+1)}] - \varepsilon$; the running time depends on $\log \varepsilon^{-1}$ so we can make this loss negligible. \ifstandalone \end{document} \fi \subsection{Convex Functions} \label{sec:convex} To avoid degenerate conditions, we assume that the convex cost functions $g$ we consider are closed, not identically $+\infty$ and there is an affine function minorizing $g$ on $\R^d$. \begin{definition}[Convex Dual] \label{def:dual} For any function $g : \R^d \rightarrow \R$, its \emph{convex dual} is the function $g^\star : \R^d \rightarrow \R$ given by $$g^\star(y) := \sup_x \big[\ip{y,x} - g(x) \big].$$ \end{definition} \begin{claim}[Linearization] \label{clm:linear} (\cite[Theorem~E.1.4.1]{HUL}) For every convex function $g: \R^d \to \R$, any (sub)gradient at the point $x$ gives the ``right linearization'': \begin{gather} g(x) = \ip{ x, u } - g^\star(u) \iff u \in \partial g(x). \end{gather} \end{claim} \begin{claim}[Double Dual] (\cite[Corollary~E.1.3.6]{HUL}) \label{clm:doubleDual} Let $g : \R^d \rightarrow \R$ be a convex function. If its epigraph $\{(x,r) \in \R^d \times \R : r \ge g(x) \}$ is closed, then $g^{\star \star} = g$. \end{claim} \begin{claim}[Fenchel-Young Inequality] \label{clm:fy-ineq} For every convex function $g: \R^d \to \R$, linearizing using any vector gives us an underestimate on the value of $g$: \begin{gather} g(x) \geq \ip{ x, u } - g^\star(u). \end{gather} \end{claim} \begin{claim}[Dual Function Facts] \label{clm:fenchel-props} Let $g: \R_+^d \to \R$ be a convex function on the positive orthant. \begin{OneLiners} \item[a.] If $g(0) = 0$, then $g^\star(\lambda) \geq 0$ for all $\lambda$. \item[b.] If $\lambda \geq \lambda'$ then $g^\star(\lambda) \geq g^\star(\lambda')$. \item[c.] $g^\star$ is a closed convex function. \end{OneLiners} \end{claim} \begin{proof} For property~(a), $g^\star(\lambda) \geq \ip{ \lambda, 0} - g(0) = 0$ using Claim~\ref{clm:fy-ineq}. For property~(b), take any $x$ in $\R^d_+$ (the domain of $g$) and observe $$\ip{\lambda,x} - g(x) \ge \ip{\lambda',x} - g(x).$$ Take the supremum over all such $x$'s in the left-hand side and use Definition~\ref{def:dual} to get $$g^\star(\lambda) \ge \ip{\lambda',x} - g(x)$$ for all $x \in \R^d_+$. To complete the argument, take the supremum on the right-hand side. For property~(c), see \cite[Theorem~E.1.1.2]{HUL}. \end{proof} \begin{claim}[Duals and Marginals Commute] \label{clm:dual-marginal} Given a monotone convex $g: \R^d \to \R$, $(g^\star)_i(z) = (g_i)^\star(z)$ for all $z \in \R$. I.e., the marginal of the dual is the same as the dual of the marginal. \end{claim} \begin{proof} $(g^\star)_i(z) = g^\star(z \mathbf{e}_i) = \max_x \left(\ip{z \mathbf{e}_i, x} - g(x)\right) \stackrel{g \textrm{ increas.}}{=} \max_{x_i} \left(z x_i - g(x_i \mathbf{e}_i)\right)$ $= \max_{x_i} \left(z x_i - g_i(x_i)\right)$ $= (g_i)^\star(z)$. This means there are no concerns of ambiguity when we write $g^\star_i(z)$. \end{proof} \begin{claim}[Subadditivity over Coordinates] \label{claim:subaddCoord} Given a superadditive convex function $g : \R^d \rightarrow \R$, \begin{align*} g^\star(\lambda) \le \sum_i g^\star_i(\lambda_i) ~~~~~\forall \lambda. \end{align*} \end{claim} \begin{proof} From the definition of convex dual, we have \begin{align} g^\star(\lambda) = \max_{x} (\ip{x,\lambda} - g(x)) \le \max_{x} \big(\ip{x,\lambda} - \sum_i g_i(x_i)\big) = \sum_i \max_{x_i} (x_i \, \lambda_i - g_i(x_i)) = \sum_i g^\star_i(\lambda_i).\notag \end{align} The inequality uses the superadditivity of $g$. \end{proof} \section{Introduction} \label{sec:introduction} The problem we consider is a basic convex optimization problem in the online setting: $n$ items appear one-by-one. Each item/element $e$ has a \emph{$d$-dimensional size} $s(e) \in \R_+^d$ and a \emph{value} $v(e) \in \R_+$, which are both revealed to us when the item arrives We must either accept or reject an item when it arrives, before seeing the future items. If we accept a certain subset $A \subseteq [n]$ of the items, we get their total value $v(A) := \sum_{e \in A} v_e$, but incur a production cost $g(s(A)) := g(\sum_{e \in A} s(e))$, where $g: \R_+^d \to \R_+$ is a non-decreasing \emph{convex cost} function with $g(0) = 0$. Optionally, we may also be given a downwards-closed family of subsets ${\mathscr{F}} \subseteq 2^{[n]}$, and now the accepted set of elements $A$ must lie in ${\mathscr{F}}$. More formally, we want to solve \begin{gather} \max_{A \in {\mathscr{F}}} \text{ profit } \ensuremath{\pi}(A) := \big[ v(A) - g(s(A)) \big]. \label{eq:main1} \end{gather} This question arises, e.g., when we are selling some service that depends on $d$ commodities, where the value is the amount of money customer $e$ is willing to pay for the service, and the size vector $s(e)$ is the amount of resources she will require. The cost function $g(\cdot)$ captures our operating expenses; its convexity models \emph{diseconomies of scale} that arise when dealing with scarce commodities. In particular, it can capture $d$-dimensional knapsack constraints, by setting $g(z) = 0$ until the knapsack size, and $\infty$ afterwards. When the cost function is linear $g(z) = \ip{a,z}$, we want to pick a max-weight subset from ${\mathscr{F}}$ using item weights $v(e) - \ip{a, s(e)}$, which is tractable/approximable for ${\mathscr{F}}$ being a matroid, $p$-system, etc. Blum et al.~\cite{BGMS11} defined this problem in the adversarial model, and gave posted-price algorithms for ``low-degree'' \emph{separable} cost functions $g$, that is, of the form $g(z) = \sum_{i=1}^d g_i(z_i)$ for 1-dimensional functions $g_i$'s. This result was tightened by Huang and Kim~\cite{HK15}, still for separable functions with additonal growth control. More recently, Azar et al.~\cite{AzarBCCCGHKNNP16} studied this problem for more general \emph{supermodular} non-separable convex functions $g$ (see also~\cite{EF16}). A differentiable function $g$ is supermodular if for any vectors $x \le x'$ we have $\nabla g(x) \le \nabla g(x')$. Equivalently, if $g$ is twice-differentiable, it is supermodular if $\frac{\partial^2 g}{\partial x_i \partial x_j} \geq 0$ for all $i \neq j$, i.e., increasing the consumption of a resource cannot decrease the marginal cost for another. However, to handle the worst-case ordering, Azar et al.\ also require the cost functions to have essentially low-degree. Can we do better by going beyond the worst-case model? In this paper, we focus on the random-order or ``secretary'' setting, where the set of items is fixed by an adversary but they arrive in random order. In the single-dimensional case $d=1$, it is easy to see that a solution that learns a ``good'' threshold $\lambda$ and picks all further items with density $v(e)/s(e)$ at least $\lambda$ essentially gives a constant approximation, much like in the secretary and knapsack secretary problems~\cite{freeman-secretary,babaioff}. The multi-dimensional case is much more challenging. This was studied by Barman et al.~\cite{BUCM}, again assuming a separable cost function $g(z) = \sum_{i=1}^d g_i(z_i)$. They give an $O(d)$-competitive algorithm for the unconstrained case, and an $O(d^5 \alpha)$-competitive algorithm for the problem with a downward closed constraint set ${\mathscr{F}}$, where $\alpha$ is the competitive ratio for the ${\mathscr{F}}$-secretary problem. Their main idea is to perform a clever decomposition of the value of each item into ``subvalues'' $v_i(e)$ for each of the coordinate cost functions $g_i$'s; this effectively decomposes the problem into $d$ 1-dimension problems with values $v_i$'s and costs $g_i$'s. Unfortunately, since their solution explicitly relies on the decomposability of the cost function, it is unclear how to extend it to general supermodular functions. We note that when the cost function is supermodular, the profit function is a \emph{submodular} set function (Section \ref{sec:superm-funct}). However, the profit can take \emph{negative values}, and then existing algorithms for submodular maximization break down.\footnote{For example, we can model set packing (which is $\Omega(\sqrt{\textrm{\# sets}})$-hard) as follows: for a subcollection $\mathcal{S}$ of sets, let $\pi(\mathcal{S}) = |\bigcup_{S \in \mathcal{S}} S| - \sum_{S \in \mathcal{S}} (|S| - 1)$. The function $\pi$ is submodular, and its maximizer is a largest set packing.} Our work is then motivated by trying to better understand the multi-dimensional nature of this problem, and provide a more principled algorithmic approach. \subsection{Our Results} \label{sec:our-results} We use techniques from convex duality to re-interpret, simplify, and improve the existing results. First, we obtain the first approximation for non-separable supermodular cost functions. (We omit some mild regularity conditions for brevity; see Section~\ref{sec:unconstr} for full details.) \begin{theorem}[Unconstrained \& Supermodular] \label{thm:main1} For the unconstrained problem with \emph{supermodular} convex cost functions $g$, we give an $O(d)$-competitive randomized algorithm in the random-order model. \end{theorem} This result generalizes the $O(d)$-approximation of Barman et al.~\cite{BUCM} to the non-separable case. The factor $d$ seems unavoidable, since our problem inherits the (offline) $\Omega(d^{1-\varepsilon})$ hardness of the $d$-dimensional knapsack, assuming $NP \neq ZPP$~\cite{DGV05}. Next, we consider the constrained case. For simplicity, we focus on the most interesting case where ${\mathscr{F}}$ is a matroid constraint; more general results can be obtained from the results and techniques in Section~\ref{sec:contr-online}. \begin{theorem}[Constrained \& Separable] \label{thm:main2} For the constrained problem with ${\mathscr{F}}$ being a matroid constraint, and the cost function $g$ being \emph{separable}, we get an $O(d^2 \log \log \text{rank})$-competitive randomized algorithm in the random-order model. \end{theorem} This improves by a factor of $d^3$ the $O(d^5 \log \log \text{rank})$-approximation given by~\cite{BUCM}. Finally, we give a general reduction that takes an algorithm for \emph{separable} functions and produces an algorithm for \emph{supermodular} functions, both with respect to a matroid constraint. This implies: \begin{theorem}[Constrained \& Supermodular] \label{thm:main2b} For the constrained problem with ${\mathscr{F}}$ being a matroid constraint, and the cost function $g$ being \emph{supermodular}, we get an $O(d^3 \log \log \text{rank})$-competitive randomized algorithm in the random-order model. \end{theorem} On conceptual contributions are in bringing techniques from convex duality to obtain, in a principled way, \emph{threshold-based} algorithms for non-linear secretary problems. Since this is a classical and heavily used algorithmic strategy for secretary problems~\cite{freeman-secretary,babaioff,kleinberg,agrawal,mRavi} we hope that the perspectives used here will find use in other contexts. \ignore{ \subsection{Our Techniques} \label{sec:our-techniques} One of our main contributions is to use the lens of convex duality to study this problem. This allows us to simplify and elucidate several of the arguments in prior work (especially that in~\cite{BUCM}). In turn, this allows us to tighten these results, and extend them to the more general setting of supermodular functions. To get a sense for the algorithm, look at the single-dimensional problem. Here the sizes are given by a vector $s$ in $\R^n$ and $g: \R_+ \to \R_+$ is a univariate convex function, and assume the items are allowed to be fractional. (The classical analog of this problem is the $1$-dimensional fractional knapsack problem, where $g(z) = 0$ until the knapsack size, and $\infty$ afterwards.) A little thought tells us that an optimal solution here is again to pick items in decreasing order of value density (i.e., value/size). Adding these items causes the total occupancy---and hence the incurred cost---to increase, so we stop when the value density of the current item becomes smaller than the derivative of the cost function at the current utilization, since the marginal increase in value-minus-cost becomes negative at this point. I.e., we find a threshold $\rho$ such that $g'(\text{\emph{total size of items having density}}\geq \rho) \approx \rho$, and take all these high-density items. In order to extend this analysis to the multi-dimensional convex case, it helps to derive the above algorithm in a more principled fashion. We can take the (Fenchel) dual of the cost function to get \begin{gather} \max_{x \in [0,1]^n} \big( \ip{v,x} - g(s^\intercal x) \big) \quad = \quad \max_{x \in [0,1]^n} \min_{\lambda \in \R_+} \bigg[\ip{v,x} - \bigg(\ip{\lambda,s^\intercal x} - g^\star(\lambda) \bigg) \bigg]. \end{gather} For those not used to this dualization, the idea is simple. Take all possible tangents lying below the curve $g$ (where a generic tangent is given by its slope $\lambda \in \R_+$ and its $y$-intercept $- g^\star(\lambda)$). The function value $g(z)$ is by definition greater of all these linear functions, and in fact equals their supremum. Consider an optimal $(x,\lambda)$ primal-dual pair of this max-min problem---i.e., a pair such that neither $x$ or $\lambda$ can be improved \emph{given the other}. If $(x^*, \lambda^*)$ is such a pair, then \begin{itemize} \item By optimality we have $x^* \in \arg\max_{x \in [0,1]^n} \{ \ip{v - \lambda^* s, x} \}$. Hence, for every item $i$ with $v(e) > \lambda^* s(e)$, we set $x_e$ to $1$, and for $v(e) < \lambda^* s(e)$, we set $x_e$ to $0$. \item Differentiating, the optimality of $\lambda$ gives $s^\intercal x^* = (g^\star)'(\lambda^*)$. Now since the derivatives $g'$ and $(g^\star)'$ are inverses of each other (as long as $g$ is reasonable), we get $\lambda^* = g'(s^\intercal x^*)$. \end{itemize} Not surprisingly, this gives us precisely what we obtained from first principles: that we would set a threshold such that the occupancy at this threshold is precisely the derivative of $g$ at this point, and choose all items with density at least as high. Of course, since the integral case $x \in \{0,1\}^n$ contains knapsack, we do not expect to solve it exactly. But taking all items as long as the gradients are low, or taking the item that causes the gradient to overshoot, is the classical $2$-approximation. And indeed, this kind of analysis conceptually goes over unchanged for the $d$-dimensional case. We would like to find $x^* \in \{0,1\}^n$ and $\lambda^* \in \R^d$ such that we can pick all items with $v(e) > \ip{ \lambda^*, s(e)}$ and some subset of the items $\{e \mid v(e) = \ip{ \lambda^*, s(e)}\}$, to get a $O(d)$-approximation. Moreover the ``classifier'' $\lambda^*$ would satisfy the gradient condition $\lambda^* = \nabla g(\sum_e s(e) x_e)$. But how do we find such an $(x^*, \lambda^*)$ pair? There is no monotonicity of classifiers in dimensions two or higher, so different $\lambda$s pick incomparable sets, and searching over the space of $\lambda$s is not easy. The natural idea we use (due to~\cite{BUCM}) is to now define a $1$-dimensional monotone curve $\mathcal{C}$ through $\R^d$. Very loosely speaking, for each classifier $\lambda$ on this curve, we are guaranteed a similar amount of profit from each coordinate. Moreover, now we have monotonicity: the set of items picked by some $\lambda$, i.e., $\{e \mid v(e) \geq \ip{ \lambda, s(e)} \}$ gets smaller as we move further along in the curve, so we have some hope of binary-searching for the right classifier. Now the convex duality view gives us the right tools to do so: restricting to this curve means we may not achieve the optimality condition $\lambda^* = \nabla g(Sx^*)$, but we give some approximate version of this, and can quantify the loss due to this approximation. Section~\ref{sec:unconstr} steps through this argument again for the unconstrained case, hopefully in an manner accessible also to non-experts. In Section~\ref{sec:constr} we give the small changes required to extend the arguments to the constrained case. Part of the complexity in the prior algorithms~\cite{BUCM} come from the fact that the profit function is not monotone; tools using duality allow us to get a monotone version of profit functions arising from separable cost functions in Section~\ref{sec:monoton}. Section~\ref{sec:contr-online} gives details of how estimate the desired $\lambda^*$ online. Section~\ref{sec:separ-vs-super} gives our procedure to convert an algorithm for separable functions into one for supermodular functions, both subject to matroid constraints. Since some basic familiarity with convex functions and conjugates will be useful, we give basic facts about convex functions and some probabilistic inequalities in Appendix~\ref{sec:app-facts} for completeness. The details of the procedures required to implement our algorithms efficiently are in Appendix~\ref{sec:implementation}, and we discharge some convenient assumptions in Appendix~\ref{sec:other-loose-ends}. } \subsection{Other Related Work} \label{sec:related-work} There is a vast literature on secretary problems~\cite{freeman-secretary}. Closest to our setting, Agrawal and Devanur study an online convex optimization problem in the random order model, and give a powerful result showing strong regret bounds in this setting~\cite{AD15}. They extend this result to give algorithms for online packing LPs with ``large'' right-hand sides. However, it is unclear how to use their algorithm to obtain results in our setting. Other algorithms solving packing LPs with large right-hand sides appear in~\cite{agrawal,Devanur11,mRavi,KRTV14,GM16,ESF14}. Feldman and Zenklusen~\cite{FZ15} show how to transform any algorithm for (linear) matroid secretary into one for \emph{submodular} matroid secretary. They give an $O(\log \log \text{rank})$-algorithm for the latter, based on results of~\cite{Lachish14,FSZ15}. All these algorithms critically assume the submodular function is non-negative everywhere, which is not the case for us, since picking too large a set may cause the profit function to go negative. Indeed, one technical contribution is a procedure for making the profit function non-negative while preserving submodularity (Section~\ref{sec:monoton}), which allows us to use these results as part of our solution. \subsection{Structure of the paper} Section~\ref{sec:unconstr} develops the convex duality perspective used in the paper for the offline version of the unconstrained case, hopefully in an manner accessible to non-experts. Section~\ref{sec:constr} gives the small changes required to extend this to the constrained case. Section~\ref{sec:contr-online} shows how transform these into online algorithms. Section~\ref{sec:separ-vs-super} shows how to convert an algorithm for separable functions into one for supermodular functions, both subject to matroid constraints. To improve the presentation, we make throughout convenient assumptions, which are discharged in Appendix~\ref{sec:other-loose-ends}. Since some familiarity with convex functions and conjugates will be useful, we give basic facts about them and some probabilistic inequalities in Appendix~\ref{sec:app-facts}. \section{Facts about Convex Functions and Useful Inequalities} \label{sec:app-facts} \input{convex} \input{concen} \input{proofs} \input{appendix} \end{document} \section{Separability versus Supermodularity} \label{sec:separ-vs-super} In this section, we show that an $\beta$-approximation algorithm for the separable-cost case gives a $O(d\beta)$-approximation for a slight generalization of the supermodular-cost case. Consider the problem of picking a set $A$ to solve \[ \ensuremath{\pi}(A) := \max_{A \in {\mathscr{F}}} \bigg( v(A) - g\big(\sum_{e \in A} s(e)\big) \bigg), \] where $v(A)$ is a (discrete) submodular function over $\{0,1\}^n$ with $v(\emptyset) = 0$, $g$ is a convex, (continuous) supermodular function over $\R^d$, and ${\mathscr{F}}$ is some downward-closed constraint set. We show that for the case of matroid constraints, this problem can be reduced to the setting where the cost function is separable over its $d$ coordinates, suffering a loss of $O(d)$. \begin{theorem}[Reduction] \label{thm:reduction} Given an $\beta$-approximation algorithm for profit-maximization for \emph{separable} convex cost functions under matroid constraints, we can get an $d(\beta + 2\mathrm{e} d)$-approximation algorithm for the profit-maximization problem with \emph{supermodular costs} $g$, \emph{submodular values} $v$, and ${\mathscr{F}}$ being a \emph{matroid} constraint. \end{theorem} The reduction is the following: \begin{OneLiners} \item[1.] Define separable costs $\overline{g}(y) := \nicefrac1d \sum_{i = 1}^d g_i(d y_i)$, where $g_i$ are marginal functions for $g$. \item[2.] W.p.\ $p = \frac{\beta}{\beta + 2\mathrm{e} d}$, run single-secretary algorithm to return element with maximum profit. \item[3.] W.p.\ $1-p = \frac{2\mathrm{e} d}{\beta + \mathrm{e} d}$, run algorithm for value function $v(\cdot)$ and separable cost fn.\ $\overline{g}(\cdot)$. \end{OneLiners} This reduction relies on the following simple but perhaps surprising observation that relates separability with supermodularity, which may find other applications. \begin{lemma} \label{lemma:dapprox} Given a monotone convex superadditive function $g$ with $g(0) = 0$, let $g_i$ be the marginal functions. Then for all $y \in \R^d_+$: \begin{enumerate} \item $g(y) \ge \sum_i g_i(y_i)$ \item $g(y) \le \frac{1}{d} \sum_i g_i(d y_i) = \overline{g}(y)$. \end{enumerate} \end{lemma} \begin{proof} The first property follows from the superadditivity of $g$, and the second follows from Jensen's inequality. \end{proof} While the full proof of Theorem \ref{thm:reduction} is deferred to Appendix \ref{app:proofs}, the main idea is clean. Given an optimal integer solution $x^*$ for the original problem (with the original cost function), we use Lemma~\ref{lemma:dapprox} and the Lov\'asz (convex) extension of submodular functions to show that $x^*/d$ is a good fractional solution for the separable cost function. Now using polyhedral properties of $d$-dimensional faces of the matroid polytope, and other properties of the Lov\'asz extension, we show the existence of a good integer solution to the separable problem. Combining this reduction with Theorem~\ref{thm:main2} proves Theorem~\ref{thm:main2b}. \section{The Online Algorithm} \label{sec:contr-online} \renewcommand{U^{\text{sample}}}{L} \renewcommand{U^{\oos}}{R} \newcommand{R^\circ}{R^\circ} \renewcommand{{\widehat{\lambda}}}{\mu} \renewcommand{{\color{red} UB}}{M_i^+} In the previous sections we were working offline: in particular, in computing the ``good'' classifier $\lambda \in \mathcal{C}$, we assumed knowledge of the entire element set. We now present the online framework for the setting where elements come in random order. Recall the definition of the curve $\CC$ from \S\ref{sec:unconstr}, and the fact that there is a total order among all $\lambda \in \CC$. Recall that for simplicity we restrict the constraints ${\mathscr{F}}$ to be matroid constraints. For a subset of elements $A\subseteq U$, let $\ensuremath{\mathsf{opt}\xspace}(A)$ and $\ensuremath{\mathsf{fopt}\xspace}(A)$ denote the integer and fractional optimal profit for ${\mathscr{F}}|_A$, the feasible solutions restricted to elements in $A$. Note that in the fractional case this means the best solution in the convex hull $\operatorname{Conv}({\mathscr{F}}|_A)$. Clearly, $\ensuremath{\mathsf{fopt}\xspace}(A) \ge \ensuremath{\mathsf{opt}\xspace}(A)$. We use $\ensuremath{\mathsf{opt}\xspace}$ and $\ensuremath{\mathsf{fopt}\xspace}$ to denote $\ensuremath{\mathsf{opt}\xspace}(U)$ and $\ensuremath{\mathsf{fopt}\xspace}(U)$ for the entire instance~$U$. Again we work under Assumption~\ref{asm:nice}. We will also make use of any algorithm for maximizing submodular functions over ${\mathscr{F}}$ in the random-order model satisfying the following. \begin{assumption} \label{ass:submod-algo} Algorithm $\textsf{SubmodMS}$ takes a nonnegative monotone submodular function $f$ with $f(\emptyset) = 0$, and a number $N$. When run on a sequence $X$ of $N$ elements presented in random order, it returns a (random) subset $X_{alg} \in {\mathscr{F}}$ with expected value ${\mathbb{E}}[f(X_{alg})] \geq \frac{1}{\alpha} \max_{X' \in {\mathscr{F}}} f(X)$. Moreover, the it only evaluates the function $f$ on \textbf{feasible} sets. \end{assumption} Our algorithm is very simple: \begin{algorithm}[H] \caption{Online Algorithm for Profit Maximization} \begin{algorithmic}[1] \small \State $U^{\text{sample}} \gets$ first $\text{Binomial}(n, \nicefrac12)$ items. \State ${\widehat{\lambda}} \gets $ largest vector on curve $\mathcal{C}$ s.t.\ $\ensuremath{\mathsf{fopt}\xspace}(U^{\text{sample}}_{{\widehat{\lambda}}}) \ge \frac{1}{12 d}\, \ensuremath{\mathsf{fopt}\xspace}(U^{\text{sample}})$.\label{line:2} \State $U^{\oos} \gets $ remaining instance, namely the last $n - |L|$ items. \State $R^\circ_{{\widehat{\lambda}}} \gets \{ e \in U^{\oos} \mid v(e) > \ip{ {\widehat{\lambda}}, s(e)} \}$ be the (strictly) ``filtered'' remaining instance. \State {\bf Un-constrained:} Select items in $R^\circ_{{\widehat{\lambda}}}$ not decreasing the current value of the solution. \hspace{-0.7cm}{\bf Constrained:} Run algorithm $\textsf{SubmodMS}$ on $R^\circ_{{\widehat{\lambda}}}$ using the profit function $\ensuremath{\pi}$, selecting items according to this algorithm. However, do not add any items that decrease the current value of the solution. \end{algorithmic} \label{alg:online-constr} \end{algorithm} Note that $U^{\text{sample}}_{{\widehat{\lambda}}}$ denotes the set of items in the sample $L$ picked by ${\widehat{\lambda}}$ (Definition \ref{def:occ}). In Step~\ref{line:2}, we can use the Ellipsoid method to find $\ensuremath{\mathsf{fopt}\xspace}$, i.e., to maximize the concave profit function $\ensuremath{\pi}$ over the matroid polytopes $\operatorname{Conv}({\mathscr{F}}|_{U^{\text{sample}}_{{\widehat{\lambda}}}}$) and $\operatorname{Conv}({\mathscr{F}}|_{U^{\text{sample}}}$), within negligible error. Moreover, we must do this for several sets $U^{\text{sample}}_{{\widehat{\lambda}}}$ and pick the largest one on $\mathcal{C}$ using a binary-search procedure. We defer the technical details to the full version of the paper. \subsection{Analysis} To analyze the algorithm, we need to show that the classifier ${\widehat{\lambda}}$ learned in Step~\ref{line:2} is large enough that we do not waste space with useless items, but low enough that we admit enough useful items. Along the way we frequently use the concentration bound from Fact~\ref{fact:submod-conc}. For this we need the profit function $\ensuremath{\pi}$ to satisfy a Lipschitz-type condition (\ref{eq:lip}) on the optimal solutions of any given sub-instance. To facilitate this, let us record a useful lemma, proved in Appendix \ref{app:proofs}. For a vector $y \in \R^n$, and a subset $A \subseteq U$, define $y_A$ to be the same as $y$ on $A$, and zero outside $A$. \begin{claim} \label{clm:lip} Consider any $U' \subseteq U$, and let $y$ be an optimal fractional solution on ${\mathscr{F}}|_{U'}$ (so $\ensuremath{\pi}(y) = \ensuremath{\mathsf{fopt}\xspace}(U')$). Then for any $B \subseteq A \subseteq U'$ with $|A \setminus B| = 1$, we have $| \ensuremath{\pi}(y_A) - \ensuremath{\pi}(y_B) | \leq M$, where $M$ is an upper bound on the profit from any single item. \end{claim} From Section~\ref{sec:constr}, recall $\lambda^* \in \R^d_+$ is a classifier that satisfies properties~(P1')--(P3'). \begin{lemma}[Goldilocks Lemma]\label{lem:goldilocks} \label{lemma:online-good-lambda} Given Assumption \ref{asm:nice}, the classifier ${\widehat{\lambda}}$ computed in Line~\ref{line:2} of Algorithm~\ref{alg:online-constr} satisfies: \begin{enumerate} \item[(a)] (Not too small) ${\widehat{\lambda}} \ge \lambda^*$, with probability at least $\nicefrac{19}{20}$. \item[(b)] (Not too big) $\ensuremath{\mathsf{fopt}\xspace}(U_{{\widehat{\lambda}}}) \ge \frac{\ensuremath{\mathsf{fopt}\xspace}}{48d}$ with probability at least $1-\nicefrac{1}{20d} \geq \nicefrac{19}{20}$. \end{enumerate} \end{lemma} \begin{proof}[Proof sketch] (See Appendix \ref{app:proofs} for full proof.) For the first part, we show that the classifier $\lambda^*$ satisfies the properties needed in Line~\ref{line:2} with probability $1 - \nicefrac{1}{20}$; since ${\widehat{\lambda}}$ is the largest such vector, we get ${\widehat{\lambda}} \geq \lambda^*$. Using Theorem~\ref{thm:constr-main} and the assumption that no item has large profit, we have $\ensuremath{\mathsf{fopt}\xspace}(U_{\lambda^*}) \ge \frac{\ensuremath{\mathsf{fopt}\xspace}}{3d}$. Moreover, the sample obtains at least half of this profit in expectation, i.e., ${\mathbb{E}}\,\ensuremath{\mathsf{fopt}\xspace}(L_{\lambda^*}) \ge \frac{\ensuremath{\mathsf{fopt}\xspace}}{3d}$. Then using concentration (Fact \ref{fact:submod-conc}) with the Lipschitz property of Claim \ref{clm:lip} and the no-high-profit-item assumption, we have $\ensuremath{\mathsf{fopt}\xspace}(L_{\lambda^*}) \ge \frac{\ensuremath{\mathsf{fopt}\xspace}}{12d}$ (which is at least $\frac{\ensuremath{\mathsf{fopt}\xspace}(L)}{12d}$) with probability at least $\nicefrac{19}{20}$. Thus, with this probability $\lambda^*$ satisfies the properties needed in Line~\ref{line:2} of the algorithm, as desired. For the part (b) of the lemma, notice that for each scenario $\ensuremath{\mathsf{fopt}\xspace}(U_{{\widehat{\lambda}}}) \ge \ensuremath{\mathsf{fopt}\xspace}(U^{\text{sample}}_{{\widehat{\lambda}}})$, since feasible solutions for the sample are feasible for the whole instance. Next, by definition of ${\widehat{\lambda}}$, $\ensuremath{\mathsf{fopt}\xspace}(U^{\text{sample}}_{{\widehat{\lambda}}}) \geq \frac{\ensuremath{\mathsf{fopt}\xspace}(U^{\text{sample}})}{12d}$. Finally, if $x$ is the fractional optimal solution on $U$ with $\ensuremath{\pi}(x) = \ensuremath{\mathsf{fopt}\xspace}$, then ${\mathbb{E}}[\pi(x_U^{\text{sample}})] \ge \ensuremath{\mathsf{fopt}\xspace}/2$, since $g$ is super-additive. Again using the concentration bound Fact~\ref{fact:submod-conc}, the profit $\pi(x_U^{\text{sample}})$ is at least $\frac{\ensuremath{\mathsf{fopt}\xspace}}{4}$ with probability at least $(1-\nicefrac{1}{20d})$. Of course, $\ensuremath{\mathsf{fopt}\xspace}(L) \geq \ensuremath{\pi}(x_L)$. Chaining these inequalities, $\ensuremath{\mathsf{fopt}\xspace}(U_{{\widehat{\lambda}}}) \ge \frac{\ensuremath{\mathsf{fopt}\xspace}}{48d}$ with this probability. \end{proof} In view of Theorem \ref{thm:constr-main}, we show the filtered out-of-sample instance $R^\circ_{{\widehat{\lambda}}}$ behaves like $U^{\degree}_{\lambda^*}$. \begin{lemma} \label{lem:constr-oos} The filtered out-of-sample instance $R^\circ_{{\widehat{\lambda}}}$ satisfies the following w.p.\ $\nicefrac{19}{20}$: \begin{OneLiners} \item[(a)] For all $e \in R^\circ_{{\widehat{\lambda}}}$, $v(e) \ge \langle {\lambda^*}, s(e) \rangle$. \item[(b)] For all $x$ with $\text{support}(x) \subseteq R^\circ_{{\widehat{\lambda}}}$ such that $x\in {\mathscr{F}}$, $\nabla g(Sx) \le {\lambda^*}$. \item[(c)] $\ensuremath{\mathsf{fopt}\xspace}(R^\circ_{{\widehat{\lambda}}}) \ge \frac{\ensuremath{\mathsf{fopt}\xspace}}{200d}$. \end{OneLiners} \end{lemma} \begin{proof} By Lemma~\ref{lem:goldilocks}(a), threshold ${\widehat{\lambda}} \ge {\lambda^*}$ with probability $\nicefrac{19}{20}$. When that happens, $U^{\degree}_{\widehat{\lambda}} \subseteq U^{\degree}_{\lambda^*}$. Since the first two properties hold for $U^{\degree}_{\lambda^*}$, they also hold for $U^{\degree}_{{\widehat{\lambda}}}$, and by downward-closedness, also for $R^\circ_{{\widehat{\lambda}}}$. For the third part, let $\lambda^+$ be the largest threshold in $\mathcal{C}$ such that $\ensuremath{\mathsf{fopt}\xspace}(U_{\lambda^+}) \ge \frac{\ensuremath{\mathsf{fopt}\xspace}}{48 d}$. From Lemma~\ref{lemma:online-good-lambda}(b), with good probability we have ${\widehat{\lambda}} \le \lambda^+$. Since ${\widehat{\lambda}}$ is a smaller threshold, the instance $U_{\lambda^{+}}$ is contained in the instance $U_{{\widehat{\lambda}}}$, which implies that for every scenario $\ensuremath{\mathsf{fopt}\xspace}(U^{\oos}_{{\widehat{\lambda}}}) \ge \ensuremath{\mathsf{fopt}\xspace}(U^{\oos}_{\lambda^+})$. Next we will show that that with good probability $\ensuremath{\mathsf{fopt}\xspace}(U^{\oos}_{\lambda^+}) \ge \frac{\ensuremath{\mathsf{fopt}\xspace}}{200d}$, and hence get the same lower bound for $\ensuremath{\mathsf{fopt}\xspace}(U^{\oos}_{{\widehat{\lambda}}})$. If $y$ is the optimal fractional solution for $U_{\lambda^+}$, then $y_U^{\oos}$ is feasible for $U^{\oos}_{\lambda^+}$ with ${\mathbb{E}}[\ensuremath{\pi}(y_U^{\oos})] = \frac12 \ensuremath{\mathsf{fopt}\xspace}(U_{\lambda^+}) \geq \frac{\ensuremath{\mathsf{fopt}\xspace}}{96d}$. Moreover, using the concentration bound again, we get that $\ensuremath{\pi}(y_U^{\oos}) \geq \frac{\ensuremath{\mathsf{fopt}\xspace}}{192d}$ with probability at least $\nicefrac{19}{20}$. Finally, by the assumption of general position, there is at most one item in $U^{\oos}_{{\widehat{\lambda}}} \setminus R^\circ_{\widehat{\lambda}}$. Dropping this item from the solution $y$ to get $y^\circ$ reduces the value by at most $M = \frac{\ensuremath{\mathsf{fopt}\xspace}}{10^4d}$; here we use subadditivty of the profit, and that there are no exceptional items. Hence, with probability at least $\nicefrac{19}{20}$: \begin{gather*} \ensuremath{\mathsf{fopt}\xspace}(R^\circ_{{\widehat{\lambda}}}) \ge \ensuremath{\mathsf{fopt}\xspace}(R^\circ_{\lambda^+}) \ge \ensuremath{\pi}(y^\circ_U^{\oos}) \geq \frac{\ensuremath{\mathsf{fopt}\xspace}}{196d} - M \geq \frac{\ensuremath{\mathsf{fopt}\xspace}}{200d}. \qedhere \end{gather*} \end{proof} Finally, we are ready to prove the main theorems in the online setting. \begin{theorem}[Unconstrained Case: Supermodular Cost Functions] \label{thm:online-unconstr} Algorithm \ref{alg:online-constr} gives an $O(d)$-approximation in expectation for the unconstrained case, if the cost function is supermodular. \end{theorem} \begin{proof} Define the event ${\mathcal{E}}$ that Lemmas~\ref{lemma:online-good-lambda} and~\ref{lem:constr-oos} hold; $\Pr({\mathcal{E}}) \geq \nicefrac{17}{20}$. Now, by Lemma~\ref{lem:constr-oos}(c), the optimal fractional solution for $R^\circ_{{\widehat{\lambda}}}$ has profit at least $\ensuremath{\mathsf{fopt}\xspace}/200d$. Moreover, since there are no constraints, the profit function is monotone submodular over all of $U^{\circ}_{\lambda^*}$ by Lemma~\ref{lemma:mono-feas}. Conditioning on the good event ${\mathcal{E}}$, Lemma~\ref{lemma:online-good-lambda}(a) gives that $R^\circ_{{\widehat{\lambda}}} \subseteq U^{\circ}_{\lambda^*}$, so the algorithm to maximize the monotone submodular function (both integrally and fractionally) is to pick all elements. Hence, conditioned on ${\mathcal{E}}$, the profit we get is at least $\ensuremath{\mathsf{fopt}\xspace}/200d$. In the other case, we never pick an item that gives negative marginal value, so our solution is always non-negative. Hence our expected profit is at least $\Pr[{\mathcal{E}}] \cdot \ensuremath{\mathsf{opt}\xspace}(U^{\oos}_{{\widehat{\lambda}}}) = \Omega(\ensuremath{\mathsf{fopt}\xspace}/d)\ge \Omega(\ensuremath{\mathsf{opt}\xspace}/d)$. \end{proof} The analysis of the algorithm for the constrained separable-cost case is similar, only using the constrained offline guarantees of Theorem \ref{thm:constr-main}, and the non-negativization Lemma \ref{lemma:mono-feas} to argue that $\textsf{SubmodMS}$ maintains its guarantees. Details are provided in Appendix \ref{app:proofs}. \begin{theorem}[Constrained Case: Separable Cost Functions] \label{thm:online-constr} Suppose algorithm $\textsf{SubmodMS}$ satisfies Assumption~\ref{ass:submod-algo} and is $\alpha$-competitive in expectation. Then Algorithm \ref{alg:online-constr} gives a $O(\alpha d^2)$-approximation in expectation. \end{theorem} \ifstandalone \end{document} \fi \section{The Problems} \section{Preliminaries} \label{sec:preliminaries} \paragraph{Problem Formulation.} Elements from a universe $U$ of size $n$ are presented in random order. Each element $e$ has value $v(e) \in \R_+$ and size $s(e) \in \R_+^d$. We are given a convex cost function $g: \R_+^d \to \R_+$. On seeing each element we must either accept or discard it. A downwards-closed collection ${\mathscr{F}} \subseteq 2^U$ of feasible sets is also given. When ${\mathscr{F}} = 2^U$, we call it the \emph{unconstrained} problem. The goal is to pick a subset $A \in {\mathscr{F}}$ to maximize the \emph{profit} \begin{gather} \ensuremath{\pi}(A) := \sum_{e \in A} v(e) - g\big( \sum_{e \in A} s(e) \big). \label{eq:1} \end{gather} We often use vectors in $\{0,1\}^n$ to denote subsets of $U$; $\chi_A$ denotes the indicator vector for set $A$. Hence, ${\mathscr{F}} \subseteq \{0,1\}^n$ is a down-ideal on the Boolean lattice, and we can succinctly write our problem as \begin{gather} \max_{x \in {\mathscr{F}}} ~~\ensuremath{\pi}(x) := \ip{ v, x } - g(Sx), \label{eq:2} \end{gather} where columns of $S \in \R^{d \times n}$ are the item sizes. Let $\ensuremath{\mathsf{opt}\xspace}$ denote the optimal value. For a subset $A\subseteq U$, $v(A)$ and $s(A)$ denote $\sum_{e \in A} v(e) = \ip{v, \chi_A}$ and $\sum_{e \in A} s(e) = S\chi_A$ respectively. \begin{definition}[Exceptional] Item $e \in U$ is \emph{exceptional} if $\arg\max_{\theta \in [0,1]}\big\{ \theta\, v(e) - g(\theta\, s(e)) \} \in (0,1)$. \end{definition} \begin{definition}[Marginal Function] \label{def:marg} Given $g: \R^d \to \R$, define the $i^{th}$ \emph{marginal function} $g_i: \R \to \R$ as $g_i(x) := g(x \mathbf{e}_i)$, where $\mathbf{e}_i$ is the $i^{th}$ standard unit vector. \end{definition} \ifsubmit \else \input{convex} \fi \subsection{Supermodular Functions} \label{sec:superm-funct} While supermodular functions defined over the Boolean lattice are widely considered, one can define supermodularity for all real-valued functions. Omitted proofs are presented in Appendix \ref{app:supermod} \begin{definition}[Supermodular] \label{def:supermod} Let $X \subseteq \R^d$ be a lattice. A function $f: X \to \R$ is \emph{supermodular} if for all $x, y \in X$, $f(x) + f(y) \leq f(x \land y) + f(x \lor y),$ where $x \land y$ and $x \lor y$ are the component-wise minimum and maximum operations. \end{definition} This corresponds to the usual definition of (discrete) supermodularity when $X = \{0,1\}^d$. For proof of the lemma below and other equivalent definitions, see, e.g.,~\cite{Top98}. \begin{lemma}[Supermodularity and Gradients] \label{lemma:supermod} A convex function $f : \R^d_+ \to \R$ is supermodular if and only if any of the following are true. \begin{OneLiners} \item $\nabla f$ is increasing in each coordinate, if $f$ is differentiable. \item $\smash{\frac{\partial^2 f(x)}{\partial x_i \partial x_j}} \geq 0$ for all $i, j$, if $f$ is twice-differentiable. \end{OneLiners} \end{lemma} \ifsubmit \else \begin{proof} \emph{(Supermodular $\equiv$ inc. gradient)} Suppose $f$ differentiable. For $x'_i \ge x_i$, consider the difference $f(x_{-i},x'_i) - f(x_{-i},x_i)$, where $x_{-i}$ denotes a vector of values for all coordinates but the $i$th one. Theorem 2.6.1 and Corollary 2.6.1 of~\cite{Top98} show that $f$ is supermodular iff for all $x'_i \le x_i$ this difference is increasing in each coordinate of $x_{-i}$. But this happens iff the partial derivative $\frac{\partial f}{\partial x_i}(x_{-i},x_i)$ is increasing in $x_{-i}$ (the ``if'' part follows by writing the difference $f(x_{-i},x'_i) - f(x_{-i},x_i)$ as an integral over the gradients, and the ``only if'' part follows by taking the limit of this difference with $x'_i \rightarrow x_i$). Moreover, since $f$ is convex its restriction to the line $\{(x_{-i},x_i) : x_i \ge 0\}$ is also convex, and hence its gradient, which is $\frac{\partial f}{\partial x_i}(x_{-i},x_i)$, is increasing in $x_i$. Thus, each partial derivative $\frac{\partial f}{\partial x_i}(x)$ is increasing in all coordinates of $x$. \medskip \emph{(Inc. gradient $\equiv$ non-neg. second derivatives)} Suppose $f$ is twice-differentiable. The $i$th coordinate of the gradient $\nabla f(x)_i$ is increasing in all coordinates of $x$ iff its partial derivatives $\frac{\partial \nabla f(x)_i}{\partial x_j} = \frac{\partial^2 f}{\partial x_i \partial x_j}$ are non-negative for all $j$. The equivalence then follows. \end{proof} \fi \begin{lemma}[Superadditivity] \label{lem:superadd2} If $f: \R_+^d \to \R$ is differentiable, convex, and supermodular, then for $x, x', y \in \R_+^d$ such that $x' \leq x$, $f(x' + y) - f(x') \leq f(x + y) - f(x)$. In particular, if $f(0) = 0$, setting $x' = 0$ gives $f(x) + f(y) \leq f(x+y).$ \end{lemma} \begin{corollary}[Subadditivity of profit] The profit function $\ensuremath{\pi}$ is subadditive. \end{corollary} The next fact shows that the cost $g$ is also supermodular when seen in a discrete way. \begin{fact}[Continuous vs.\ Discrete Supermodularity] \label{fct:supermod} Given a convex supermodular function $g: \R^d \to \R$ and $n$ items with sizes $s_1, \ldots, s_n \in \R_+^d$, define the function $h: \{0,1\}^n \to \R$ as $h(v) = g(\sum_i s_iv_i) = g(Sv)$. Then $h(\cdot)$ is a (discrete) supermodular function. \end{fact} \ifsubmit \else \input{concen} \fi \section{Missing Proofs} \label{app:proofs} \subsection{Proofs for Section \ref{sec:superm-funct}} \label{app:supermod} \begin{proof}[Proof of Lemma \ref{lem:superadd2}] By integrating gradients we have \[ f(x'+y) - f(x') = \int_{t = 0}^1 \ip{ \nabla f(x' + t\, y), y } \, dt \leq \int_{t = 0}^1 \ip{ \nabla f(x + t\, y), y } \, dt = f(x+y) - f(x), \] where the inequality uses Lemma \ref{lemma:supermod}(1), the monotone gradients property. \end{proof} \begin{proof}[Proof of Fact \ref{fct:supermod}] Given $x' \leq x$ in the Boolean cube and $i$ not in $x$, \[ h(x' + \mathbf{e}_i) - h(x') = g(S(x' + \mathbf{e}_i)) - g(Sx') \leq g(S(x + \mathbf{e}_i)) - g(Sx) = h(x + \mathbf{e}_i) - h(x), \] where the inequality uses Lemma~\ref{lem:superadd2}, and $S\ge 0$. \end{proof} \subsection{Proof of Lemma \ref{lemma:piplus}} Since $g$ is separable, it has the form $g(z) = \sum_i g_i(z_i)$. Thus, it will suffice to perform the gradient truncation on each of the 1-dimensional functions $g_i$, which is accomplished by the following lemma. \begin{claim}[Gradient truncation] \label{clm:grad-trunc} Given a $1$-dimensional convex function $f: \R \to \R$ and a real value $\gamma \in \R_+$, there is another function $f^+ : \R \rightarrow \R$ satisfying the following: \begin{OneLiners} \item[i.] $f^+$ is convex, \item[ii.] For all $z$, all subgradients $u \in \partial f^+(z)$, and satisfy $u \le \gamma$. \item[iii.] If $z$ is such that some subgradient $z \in \partial f(z)$ satisfies $u \le \gamma$, then $f^+(z) = f(z)$. \end{OneLiners} \end{claim} \begin{subproof} Define $f^+$ as follows: \begin{align*} f^+(z) = \sup_{u \le \gamma} \left[ u\cdot z - f^\star(u) \right]; \end{align*} notice the constraint $u \le \gamma$, and that the dual $f^\star$ is the usual Fenchel dual for $f$. Properties~(i) and~(ii) follow from the fact $f^+$ is the point-wise supremum of linear functions with slopes at most $\gamma$. For Property~(iii), by the double dual property in Claim~\ref{clm:doubleDual}, $f(z) = \sup_{u} [ u\cdot z - f^\star(u)],$ and hence $f \ge f^+$. Moreover, take a point $z$ such that some subgradient $u \in \partial f(z)$ satisfies $u \le \gamma$. Then we have \begin{gather*} f(z) \stackrel{\text{(Claim~\ref{clm:linear})}}{=} \overline{u} \cdot z - f^\star(u) \le \sup_{u \le \lambda^*} \, [ u\cdot z - f^\star(u)] \stackrel{\text{(by defn.)}}{=} f^+(z) \leq f(z). \end{gather*} This concludes the proof. \end{subproof} Now define $g^+_i$ as the function obtained by applying the truncation lemma above with $f = g_i$ and $\gamma = \lambda^*_i$. Also define the truncated cost function $g^+$ as $g^+(z) = \sum_i g^+_i(z_i)$, and the truncated profit function $\ensuremath{\pi}^+$ as $\ensuremath{\pi}^+(x) = \ip{v,x} - g^+(Sx)$. We claim that $\ensuremath{\pi}^+$ satisfies all properties required by the lemma. First, it is discrete submodular: $g^+$ is convex and supermodular, since it is a sum of $1$-dimensional convex functions, which are trivially supermodular; discrete submodularity of $\ensuremath{\pi}^+$ then follows from Fact~\ref{fct:supermod}. Moreover, we claim $\ensuremath{\pi}^+$ has the same value as $\ensuremath{\pi}$ over solutions in ${\mathscr{F}}|_{U^{\degree}_{\lambda^*}}$. This follows from the fact that every solution $x$ in this family has $\nabla g(Sx) \le \lambda^*$ (by Property~(P1')), and hence Claim~\ref{clm:grad-trunc}(iii) guarantees that $g(Sx) = g^+(Sx)$. Finally, we claim that $\ensuremath{\pi}^+$ is non-negative monotone. Since $\ensuremath{\pi}^+(\emptyset) = \ensuremath{\pi}(\emptyset) = 0$, it again suffices to show monotonicity. For that, Claim~\ref{clm:grad-trunc}(ii) guarantees that all solutions $x \subseteq U^{\degree}_{\lambda^*}$ now satisfy $\nabla g^+(Sx) \leq \lambda^*$. The proof of Lemma~\ref{lemma:mono-feas} then extends to show that $\ensuremath{\pi}^+$ is monotone. This concludes the proof of Lemma \ref{lemma:piplus}. \subsection{Proof of Claim \ref{clm:lip}} Say $A \setminus B = \{i\}$. Then by supermodularity of the cost function $g$ (Lemma~\ref{lem:superadd2}) and the absence of exceptional items, \[ \ensuremath{\pi}(y_A) - \ensuremath{\pi}(y_B) = v_iy_i - (g(Sy_A) - g(Sy_B)) \le v_iy_i - g(S^iy_i) \leq \ensuremath{\pi}(\mathbf{e}_i) \le M. \] For the lower bound, we also use the optimality of $y$. \[ \ensuremath{\pi}(y_A) - \ensuremath{\pi}(y_B) = v_iy_i - (g(Sy_A) - g(Sy_B)) \ge v_iy_i - (g(Sy_U) - g(Sy_{U \setminus \{i\}})) = \ensuremath{\pi}(y) - \ensuremath{\pi}(y_{U \setminus \{i\}})\ge 0, \] where the middle inequality uses supermodularity of $g$, and the last one uses the optimality of $y$. This concludes the proof. \subsection{Proof of Lemma \ref{lem:goldilocks}} Part (b) was already proved in details, so we provide only a proof for part (a). Let $\ensuremath{\mathsf{fopt}\xspace}' = \ensuremath{\mathsf{fopt}\xspace}(U_{\lambda^*})$. Using Theorem~\ref{thm:constr-main} and the assumption that no item has profit more than $\nicefrac{\ensuremath{\mathsf{opt}\xspace}}{\eta d}$, we know $\ensuremath{\mathsf{fopt}\xspace}' \ge \ensuremath{\mathsf{opt}\xspace}(U^{\degree}_{\lambda^*}) \ge \frac{\ensuremath{\mathsf{fopt}\xspace}}{2d+1} \geq \frac{\ensuremath{\mathsf{fopt}\xspace}}{3d}$. Let $y$ be an optimal fractional solution for $U_{\lambda^*}$, so that $\ensuremath{\pi}(y) = \ensuremath{\mathsf{fopt}\xspace}' \ge \frac{\ensuremath{\mathsf{fopt}\xspace}}{3d}$. By downward-closedness of ${\mathscr{F}}$, $y_U^{\text{sample}}$ is a feasible fractional solution only containing items in $U^{\text{sample}}_{\lambda^*}$. Therefore, ${\mathbb{E}}[\ensuremath{\mathsf{fopt}\xspace}(U^{\text{sample}}_{\lambda^*})] \geq {\mathbb{E}}[\ensuremath{\pi}(y_U^{\text{sample}})] = \ensuremath{\mathsf{fopt}\xspace}'/2$. Finally, using the concentration inequality of Fact \ref{fact:submod-conc} with $t=\ensuremath{\mathsf{fopt}\xspace}/12d$ (where the Lipschitz-type condition is satisfied due to Claim~\ref{clm:lip}), we get \[ \Pr\left(\left|\ensuremath{\pi}(y_U^{\text{sample}}) - \frac{\ensuremath{\mathsf{fopt}\xspace}'}{2}\right| \le \frac{\ensuremath{\mathsf{fopt}\xspace}}{12d}\right) \ge 1- \frac{2 M\, (\ensuremath{\mathsf{fopt}\xspace}'/2)}{(\ensuremath{\mathsf{fopt}\xspace}/12d)^2} \ge \frac{19}{20}. \] The last inequality follows using $M = \frac{\ensuremath{\mathsf{opt}\xspace}}{\eta d} \le \frac{\ensuremath{\mathsf{fopt}\xspace}}{10^4 d}$. Hence, w.p.\ at least $\nicefrac{19}{20}$, $\ensuremath{\mathsf{fopt}\xspace}(U^{\text{sample}}_{\lambda^*}) \ge \pi(y_U^{\text{sample}}) \geq \frac{\ensuremath{\mathsf{fopt}\xspace}'}{2} - \frac{\ensuremath{\mathsf{fopt}\xspace}}{12d} \geq \frac{\ensuremath{\mathsf{fopt}\xspace}}{12d}$. This is at least $\frac{1}{12d}\,\ensuremath{\mathsf{fopt}\xspace}(U^{\text{sample}})$ since $L\subset U$. Hence $\lambda^*$ is a candidate in the definition of ${\widehat{\lambda}}$, thus by maximality ${\widehat{\lambda}} \ge \lambda^*$, proving the part (a) of the lemma. \subsection{Proof of Theorem \ref{thm:online-constr}} Recall the definition of modified profit function $\ensuremath{\pi}^+$ from Lemma~\ref{lemma:piplus}. Again, let ${\mathcal{E}}$ be the event that Lemmas~\ref{lemma:online-good-lambda} and~\ref{lem:constr-oos} hold; by a union bound $\Pr[{\mathcal{E}}] \geq \nicefrac{17}{20}$. We claim that under this event, the functions $\ensuremath{\pi}^+$ and $\ensuremath{\pi}$ coincide over feasible sets of $R^\circ_{{\widehat{\lambda}}}$. Indeed, ${\widehat{\lambda}} \ge {\lambda^*}$, so the elements $R^\circ_{{\widehat{\lambda}}} \subseteq U_{{\lambda^*}}$, and then Lemma~\ref{lemma:piplus}(iii) tells us that $\ensuremath{\pi}^+$ and $\ensuremath{\pi}$ agree on all feasible subsets of $U_{\lambda^*}$. Using Assumption~\ref{ass:submod-algo}, the algorithm $\textsf{SubmodMS}$ applied over $R^\circ_{{\widehat{\lambda}}}$ cannot distinguish between $\ensuremath{\pi}$ and $\ensuremath{\pi}^+$. Thus, the solution $\mathbf{X} \in {\mathscr{F}}$ returned by our algorithm using profit function $\ensuremath{\pi}$ is the same as one returned by running algorithm $\textsf{SubmodMS}$ over instance $R^\circ_{{\widehat{\lambda}}}$ with the \emph{non-negative monotone submodular} function $\ensuremath{\pi}^+$. This algorithm is $\alpha$-competitive, and moreover conditioning on the sample still leaves the out-of-sample items in {random order}, so the guarantee of the algorithm still holds. Hence, \begin{align*} {\mathbb{E}}[\ensuremath{\pi}(\mathbf{X}) \mid {\mathcal{E}}] ~~=~ {\mathbb{E}}[\ensuremath{\pi}^+(\mathbf{X}) \mid {\mathcal{E}}] ~~ \stackrel{\textrm{$\alpha$-approx}}{\ge} ~~ \frac{1}{\alpha}\,{\mathbb{E}}[\ensuremath{\mathsf{opt}\xspace}(U^{\oos}_{{\widehat{\lambda}}}) \mid {\mathcal{E}}], \end{align*} where the first equality follows from $\ensuremath{\pi}^+$ and $\ensuremath{\pi}$ agreeing on $U^{\oos}_{{\widehat{\lambda}}}$. Since we are assuming no item has large profit, by Theorem~\ref{thm:constr-main}, the integer optimum is at least a $\nicefrac{1}{(2d+1)}$-fraction of the fractional optimum, \begin{align*} \frac{1}{\alpha}\,{\mathbb{E}}[\ensuremath{\mathsf{opt}\xspace}(U^{\oos}_{{\widehat{\lambda}}}) \mid {\mathcal{E}}] ~~\ge~~ \frac{1}{\alpha(2d+1)}\,{\mathbb{E}}[\ensuremath{\mathsf{fopt}\xspace}(U^{\oos}_{{\widehat{\lambda}}}) \mid {\mathcal{E}}] ~~\ge~~ \frac{\ensuremath{\mathsf{fopt}\xspace}}{200 \alpha d(2d+1)}, \end{align*} the last inequality using that event ${\mathcal{E}}$ guarantees Lemma~\ref{lem:constr-oos}(c). Since the algorithm does not include items with negative marginals, it always produces solutions with non-negative values. Therefore, \begin{align*} {\mathbb{E}}[\ensuremath{\pi}(\mathbf{X})] \ge {\mathbb{E}}[\ensuremath{\pi}(\mathbf{X}) \mid {\mathcal{E}}]\, \Pr({\mathcal{E}}) \ge \frac{\ensuremath{\mathsf{opt}\xspace}}{O(\alpha d^2)}. \end{align*} The last inequality follows since $\Pr({\mathcal{E}})$ is a constant. This concludes the proof. \subsection{Proof of Theorem \ref{thm:reduction}} Let $x^*$ be the optimal solution with value $\ensuremath{\mathsf{opt}\xspace} := \ensuremath{\pi}(x^*)$ for the problem of maximizing profit with the supermodular cost function $g$. Since our proof deals with fractional allocations, define $\widehat{v}(\cdot)$ to be the convex extension (or Lov\'asz extension) of the submodular value function $v(\cdot)$~\cite{Schrijver-book}. Since $\widehat{v}$ is an extension of $v$, $v(x) = \widehat{v}(x)$ for all points $x$ in the domain of $v$, i.e., for $x \in \{0,1\}^n$. Define for all $x \in [0,1]^n$, \[ \overline{\ensuremath{\pi}}(x) := \widehat{v}(x) - \overline{g}(Sx). \] Now, \begin{gather} \overline{\ensuremath{\pi}}(x^*/d) = \widehat{v}(x^*/d) - \overline{g}(Sx^*/d) \geq \nicefrac1d \, ( v(x^*) - g(Sx^*) ) = \ensuremath{\pi}(x^*)/d = \ensuremath{\mathsf{opt}\xspace}/d. \label{eq:profit-good} \end{gather} The inequality uses that for the fractional point $x^*/d$, the Lov\'asz extension value is $\widehat{v}(x^*/d) = (1-\nicefrac1d)\cdot v(\emptyset) + \nicefrac1d \cdot v(x^*)$, and that by Lemma~\ref{lemma:dapprox}(1), $\overline{g}(Sx^*/d) \leq g(Sx^*)$. So the separable problem has a good \emph{fractional} solution $x^*/d$, and we want to ``round'' it to a near-integral solution. Indeed, take the matroid polytope $\mathcal{P}$ corresponding to the matroid constraint ${\mathscr{F}}$, and intersect $\mathcal{P}$ with the subspace $\{x \mid Sx = S(x^*/d)\}$. Clearly $x^*/d$ belongs to this intersection. Now consider maximizing the linear function $\ip{\nabla \widehat{v}(x^*/d), x - x^*/d}$ over this polytope, and let $\tilde{x}$ be a basic feasible solution to this linear optimization problem. Since at most $d$ of the tight constraints come from the subspace restriction, the point $\tilde{x}$ lies on some face of the matroid polytope of dimension at most $d$. By~\cite[Theorem~4.3]{GRSZ14}, $\tilde{x}$ has at most $2d$ fractional coordinates. Moreover, since $\tilde{x}$ is the maximizer of the linear function and $x^*/d$ is a feasible point, the inner product $\ip{\nabla \widehat{v}(x^*/d), \tilde{x} - x^*/d} \geq 0$. The convexity of the Lov\'asz extension now implies $\widehat{v}(\tilde{x}) \geq \widehat{v}(x^*/d)$. Because $S\tilde{x} = Sx^*/d$, the cost remains unchanged and we get \[ \overline{\ensuremath{\pi}}(\tilde{x}) \geq \overline{\ensuremath{\pi}}(x^*/d) \geq \ensuremath{\mathsf{opt}\xspace}/d. \] Let $x^{\text{int}}$ be the $\tilde{x}$ restricted to the integral coordinates, and let $F \subseteq [n]$ be the set of fractional coordinates in $\tilde{x}$. Then by subadditivity of the $\overline{\ensuremath{\pi}}$ function, we get the following, where $\chi_e$ is an indicator vector of element $e$. \begin{align*} \ensuremath{\mathsf{opt}\xspace}/d &\leq \overline{\ensuremath{\pi}}(\tilde{x}) \leq \overline{\ensuremath{\pi}}(x^{\text{int}}) + \sum_{e \in F} \overline{\ensuremath{\pi}}(\tilde{x}_e\, \chi_e) \tag{by subadditivity of $\overline{\ensuremath{\pi}}$} \\ &\leq \overline{\ensuremath{\pi}}(x^{\text{int}}) + \sum_{i \in F} \big( \widehat{v}(\tilde{x}_e \, \chi_e) - g(\tilde{x}_e \, S\chi_e) \big) . \tag{Definition of $\overline{\ensuremath{\pi}}$, and Lemma~\ref{lemma:dapprox}(2)} \end{align*} Moreover, for each individual item $e$, \[ \widehat{v}(\tilde{x}_e \, \chi_e) - g(\tilde{x}_e \, S\chi_e) = \tilde{x}_e \,\widehat{v}( \chi_e) - g(\tilde{x}_e \, S\chi_e) \leq \widehat{v}( \chi_e) - g(S\chi_e) = \ensuremath{\pi}(\chi_e). \] The first equality above uses that the Lov\'asz extension acts linearly on single items. The inequality follows since there are no exceptional items and $\tilde{x}_e \in (0,1)$. Hence, we get $\overline{\ensuremath{\pi}}(x^{\text{int}}) + \sum_{e \in F} \ensuremath{\pi}(\chi_e) \geq \ensuremath{\mathsf{opt}\xspace}/d$. We can use the algorithm for the separable problem (which is part of the theorem assumption) to find $x^{\text{sep}}$ with value $\overline{\ensuremath{\pi}}(x^{\text{sep}}) \geq (\nicefrac1{\beta}) \overline{\ensuremath{\pi}}(x^{\text{int}})$. Using Lemma~\ref{lemma:dapprox}(2) again, $\ensuremath{\pi}(x^{\text{sep}}) \geq \overline{\ensuremath{\pi}}(x^{\text{sep}})$. Also, using the well-known $1/\mathrm{e}$-approximation for the most profitable item returns an item $e^*$ with profit $\ensuremath{\pi}(\chi_{e^*}) \geq \frac1{\mathrm{e} \cdot 2d} \sum_{e \in F} \ensuremath{\pi}(\chi_e) $. Returning $x^{\text{sep}}$ with probability $\frac{\beta}{\beta + 2\mathrm{e} d}$ and the single element $\widehat{e}$ otherwise gives expected value at least \[ \frac{1}{\beta + 2\mathrm{e} d} \bigg( \beta \,\ensuremath{\pi}(x^{\text{sep}}) + 2\mathrm{e} d\, \ensuremath{\pi}(\chi_{e^*}) \bigg) \geq \frac{1}{\beta + 2\mathrm{e} d} \bigg( \ensuremath{\pi}(x^{\text{int}}) + \sum_{e \in F} \ensuremath{\pi}(\chi_e) \bigg) \geq \frac{\ensuremath{\mathsf{opt}\xspace}}{d(\beta + 2 \mathrm{e} d)}. \qedhere \] \section{The Offline Unconstrained Problem} \label{sec:unconstr} We first present an offline algorithm for supermodular functions in the \textbf{unconstrained} case (where ${\mathscr{F}} = \{0,1\}^n$). We focus on the main techniques and defer some technicalities and all computational aspects for now. Just for this section, we assume item sizes are ``infinitesimal''. We make the following assumptions on the cost function $g$ and the elements. \begin{assumption} \label{asm:nice} We assume that cost function $g$ is non-negative, strictly convex, closed, and differentiable. We assume $g(0) = 0$, $g$ is supermodular, and that gradients of $g$ go to $\infty$ along every positive direction. We assume elements are in general position\footnote{There are no non-trivial linear dependencies, see Lemma~\ref{lem:reduce-nice} for a formal definition}, and that there are no exceptional items. We also assume that every individual item has profit at most $M := \nicefrac{\ensuremath{\mathsf{opt}\xspace}}{\eta d}$ for $\eta \geq 10^4$. (See Section~\ref{sec:except} on how to remove these assumptions on elements.) \end{assumption} \paragraph{Classifiers.} The offline algorithm will be based on \emph{linear classifiers}, where a set of weights is used to aggregates the multidimensional size of an item into a scalar, and the algorithm picks all items that have high-enough value/aggregated-size ratio. \begin{definition}[Classifiers and Occupancy] \label{def:occ} Given a vector $\lambda \in \R_+^d$ (a ``classifier''), define the set of items picked by $\lambda$ as $U_\lambda := \{ e \in U \mid v(e) \ge \ip{\lambda,s(e)} \}$. Let $\occ{\lambda} := \sum_{e: v(e) \geq \ip{ \lambda, s(e) }} s(e)$ denote the multidimensional occupancy induced by choosing items in $U_\lambda$. \end{definition} To understand the importance of classifier-based solutions it is instructive to consider the problem with single-dimensional size. A little thought shows that an optimal solution is to pick items in decreasing order of value density $v(e)/s(e)$. Adding these items causes the total occupancy---and hence the incurred cost---to increase, so we stop when the value density of the current item becomes smaller than the derivative of the cost function at the current utilization. That is, we find a density threshold $\lambda$ such that $g'(\text{\emph{total size of items having $v(e) \ge \lambda \, s(e)$}}) \approx \lambda$, and take all these high-density items. Thus, the optimal solution is one based on the classifier $\lambda$. To see that this holds in the multi-dimensional case, express $g$ in terms of linearizations % \begin{align} g(z) = \max_{\lambda \in \R^d_+} (\ip{\lambda,z} - g^\star(\lambda)), \label{eq:dual} \end{align} where $g^\star$ is its Fenchel dual. (Note we are maximizing over positive classifiers; Lemma~\ref{lem:pos-lambda} shows this is WLOG.) Then our unconstrained problem~(\ref{eq:1}) becomes a minimax problem: \begin{align*} \max_{x \in \{0,1\}^n} \min_{\lambda \in \R^d_+} \bigg[\ip{v,x} - \bigg(\ip{\lambda,Sx} - g^\star(\lambda) \bigg) \bigg]. \end{align*} Consider an optimal pair $(x^*, \lambda^*)$; i.e., a pair that is a saddle-point solution, so neither $x^*$ nor $\lambda^*$ can be improved keeping the other one fixed. This saddle-point optimality implies: \begin{enumerate} \item[(a)] Since $\lambda^* = \argmax_{\lambda \in \R^d_+} (\ip{\lambda, Sx^*} - g^\star(\lambda))$, it is the right linearization of $g$ at $Sx^*$ and thus $\lambda^* = \nabla g(Sx^*)$ (see Claim~\ref{clm:doubleDual}). \item[(b)] $x^*$ is such that $x^*_i = 1$ if $v_i > \ip{\lambda^*, S^i}$ and $x^*_i = 0$ if $v_i < \ip{\lambda^*,S^i}$, with $S^i$ being the $i^{th}$ column of $S$ and the size of the $i^{th}$ item. \end{enumerate} From part (b) we see that the optimal solution $x^*$ is essentially the one picked by the classifier $\lambda^*$ (ignoring coordinates with the ``0 marginal value'' $v_i = \ip{\lambda^*,S^i}$). Moreover, the converse also holds. \begin{claim} \label{claim:optimality} For a classifier $\lambda \in \R^d_+$, let $x$ be the items picked by it. If we have $\lambda = \nabla g(Sx) \stackrel{\text{~def~}}{=} \nabla g(\occ{\lambda})$, then $x$ is an optimal solution. \end{claim} \begin{proof} For any solution $x'$, \begin{align*} \ensuremath{\pi}(x') &= \ip{v,x'} - g(Sx') \leq \ip{v,x'} - \ip{\lambda, Sx'} + g^\star(\lambda) \\ &\leq \ip{v,x} - \ip{\lambda, Sx} + g^\star(\lambda) \stackrel{(\lambda = \nabla g(Sx))}{=} \ip{v,x} - g(Sx) = \ensuremath{\pi}(x), \end{align*} where the second inequality holds since, by definition, $x$ maximizes $\ip{v,x} - \ip{\lambda, Sx}$. \end{proof} \paragraph{Restricting the Set of Classifiers.} The existence of such good classifiers is not enough, since we need to \emph{find} them online. This is difficult not only because of $d$ degrees of freedom and no control over the magnitude of the values/sizes (to be exploited in concentration inequalities), but also because picking too few or too many items could lead to low profits. So we restrict the set of candidate classifiers to be a \emph{monotone}\footnote{A curve $\mathcal{C}$ is \emph{monotone} if for every pair $\lambda, \lambda' \in \mathcal{C}$, one is coordinate-wise smaller than the other. \emph{1-dimensional} curve $\mathcal{C} \subseteq \R^d_+$, satisfying additional properties given below. The main motivation is that it imposes a total ordering on the set of items picked by the classifiers: given $\lambda \le \mu$ on such a curve $\mathcal{C}$, the sets of items picked satisfy the inclusion $U_{\lambda} \supseteq U_{\mu}$. This allows us to select a ``minimally good'' classifier in $\mathcal{C}$ in a robust way, avoiding classifiers that select too many items. To design the curve $\mathcal{C}$ so it contains a classifier with profit $\approx \frac{\ensuremath{\mathsf{opt}\xspace}}{d}$, we relax the condition $\nabla g(\occ{\lambda}) = \lambda$ from Claim~\ref{claim:optimality} (too much to ask) and require the existence of $\lambda \in \mathcal{C}$ satisfying: \begin{enumerate} \item[(P1)] (don't pick too many items) $\nabla g(\occ{\lambda}) \le \lambda$. \item[(P2)] (partial gradient equality) There is a coordinate $i^*$ where $(\nabla g(\occ{\lambda}))_{i^*} = \lambda_{i^*}$. \item[(P3)] (balanced curve) $g^\star_{i}(\lambda_{i}) = g^\star_{j}(\lambda_{j}) ~~~\forall i, j \in [d]$ (see also Claim~\ref{clm:dual-marginal}). \end{enumerate} Property (P1) enforces half of the equality in Claim~\ref{claim:optimality}, and (P2) guarantees that equality holds for \emph{some} coordinate. Now for property (P3). Since $\lambda \neq \nabla g(\occ{\lambda})$ the optimality proof of Claim \ref{claim:optimality} does not go though, since $g(\occ{\lambda}) \neq \ip{\lambda,\occ{\lambda}} - g^\star(\lambda)$. As we prove later, the difference between these terms can be at most $g^\star(\lambda)$ (see Figure \ref{fig:offshoot} for an illustration), and the superadditivity of $g$ gives us $g^\star(\lambda) \leq \sum_i g^\star_i(\lambda_i)$ (see Claim~\ref{claim:subaddCoord}). Property~(P3) is used to control this sum, by charging it to the coordinate $i^*$ where we know we have ``the right linearization'' (by property (P2)). Reinterpreting the construction of~\cite{BUCM} in our setting, we then define $\mathcal{C}$ as any monotone curve where every $\lambda \in \mathcal{C}$ satisfies (P3). \begin{lemma} \label{lemma:existC} The curve $\mathcal{C}$ exists and contains a $\lambda$ satisfying properties (P1)-(P3). \end{lemma} \begin{proof} We first show existence, that is, the set $\{ \lambda \in \R^d_+ \mid g^\star_i(\lambda_i) = g^\star_j(\lambda_j) ~~\forall i, j \}$ contains a monotone curve. Notice that this set is the union of the box $\{\lambda \in \R^d_+ \mid g^\star_i(\lambda_i) = 0~~\forall i\} = \prod_i [0, g'_i(0)]$ (range of slopes where we can swivel around $g_i(0) = 0$) and a monotone curve $\{\lambda(\tau) \mid \tau > 0\}$, where $\lambda(\tau)$ is the unique vector satisfying $g^\star_i(\lambda_i(\tau)) = \tau$; uniqueness follows from the fact $g^\star_i$ stays at value zero in the interval $[0, g'_i(0)]$, but after that is strictly increasing due to its convexity, and monotonicity of this curve also follows from monotonicity of the $g^\star_i$'s. Thus, $\CC$ is this curve plus any monotone curve extending it to the origin. To see that $\mathcal{C}$ satisfies properties (P1) and (P2), we note that since the $g^\star_i$'s are increasing and not identically 0, $\mathcal{C}$ is unbounded in all coordinates. Thus, a sufficiently large $\lambda \in \mathcal{C}$ satisfies (P1), and we can start with such $\lambda$ and move down the curve (decreasing in each coordinate) until we obtain $\lambda' \in \mathcal{C}$ with $\lambda' = \nabla g(\occ{\lambda'})$, since the $g$ has increasing gradients. (The equality in this final step uses the assumption that item sizes are infinitesimal, which we made for simplicity in this section). \end{proof} Making the above discussion formal, we show that $\mathcal{C}$ has a high-value classifier. Recall that $U_\lambda$ is the set of items picked by $\lambda$ (Definition \ref{def:occ}). \begin{theorem} \label{thm:unc} Given Assumption~\ref{asm:nice}, let $\lambda^*$ be a classifier in $\mathcal{C}$ satisfying properties~(P1)-(P3). Then for all $x' \in [0,1]^n$ we have $\pi(U_{\lambda^*}) \ge \frac{1}{d+1}\cdot \pi(x')$. \end{theorem} \begin{proof} Let $x^* = \chi_{U_\lambda^*}$ be the solution picked by the classifier $\lambda^*$, and note that $\occ{\lambda^*} = Sx^*$. Let $L(y,\mu) := \ip{v,y} - [\ip{\mu, Sy} - g^\star(\mu)]$ be the linearization of $\pi(y)$ at some slope $\mu$. From \eqref{eq:dual} we know $g(y) \ge L(y,\mu)$ for all $\mu \ge 0$. Since $x^* $ is optimal for the linearization $L(y,\lambda^*)$ (because $x^*_i = 1$ iff $v_i - \ip{\lambda^*, S^i} \ge 0$), we have \begin{align} L(x^*, \lambda^*) \ge L(x', \lambda^*) \ge \ensuremath{\pi}(x')~~~~~~~\textrm{for all $x' \in [0,1]^n$.} \label{eq:thmU1} \end{align} Now we relate the true profit $\ensuremath{\pi}(x^*)$ to this linearized value. Observe that \begin{align} \ensuremath{\pi}(x^*) = \ip{v,x^*} - g(Sx^*) &= \ip{v,x^*} - [\ip{\nabla g(Sx^*), Sx^*} - g^\star(\nabla g(Sx^*))] \tag{by Claim~\ref{clm:linear}} \\ & \geq \underbrace{\ip{v,x^*} - \ip{\lambda^*, Sx^*}}_{\geq 0} + \underbrace{g^\star(\nabla g(Sx^*))}_{\geq 0}, \label{eq:7} \end{align} where the inequality uses that $\lambda^* \ge \nabla g(Sx^*)$ by property~(P1) and $Sx^* \ge 0$. The first term is non-negative because we only pick items for which $v_i - \ip{\lambda, S^i} \ge 0$. The second term is non-negative due to Claim~\ref{clm:fenchel-props}(a). We can now prove three lemmas that imply the theorem. \begin{lemma \label{lemma:linGap} For any $x' \in [0,1]^n$, $\ensuremath{\pi}(x^*) \ge L(x^*, \lambda^*) - g^\star(\lambda^*) \geq \ensuremath{\pi}(x') - g^\star(\lambda^*).$ \end{lemma} \begin{subproof} Drop the second term from~(\ref{eq:7}), then use the definition of $L(\cdot,\cdot)$ and~(\ref{eq:thmU1}). \end{subproof} \begin{lemma}\label{lemma:gsi} $g^\star(\lambda^*) \le d \cdot g^\star_{i^*}(\lambda^*_{i^*}).$ \end{lemma} \begin{subproof} Using the superadditivity of $g$ and Claim~\ref{claim:subaddCoord} we get $g^\star(\lambda^*) \le \sum_i g^\star_i(\lambda^*_i)$. Now from property (P3) of the classifier $\lambda^*$, all the terms in the sum are equal. \end{subproof} \begin{lemma} \label{lemma:profitDual} $\ensuremath{\pi}(x^*) \ge g^\star_{i^*}(\lambda^*_{i^*}).$ \end{lemma} \begin{subproof} We claim that $g^\star(\nabla g(Sx^*)) \ge g^\star_{i^*}(\lambda^*_{i^*})$; plugging this into~(\ref{eq:7}) proves the lemma. For the claim, define $\lambda' = \nabla g(Sx^*)$. By Property~(P2), $\lambda'_{i^*} = \lambda^*_{i^*}$, so we want to show $g^\star(\lambda') \geq g^\star_{i^*}(\lambda'_{i^*}) = g^\star(\lambda'_{i^*} \mathbf{e}_{i^*})$. This follows because $g^\star$ is monotone (Claim~\ref{clm:fenchel-props}(b)). \end{subproof} This completes the proof of Theorem~\ref{thm:unc}. \end{proof} \ifstandalone \end{document} \fi
1,108,101,565,837
arxiv
\section{Introduction} \subsection{Motivation} The classical Plateau problem concerns the existence and properties of a disc of smallest area bounded by a given Jordan curve. In a Riemannian manifold $X$, a solution of the Plateau problem is obtained by a disc of minimal energy, where one minimizes over the set $\Lambda (\Gamma ,X)$ of all maps $u$ in the Sobolev space $W^{1,2} (D,X)$, whose boundary $tr(u):S^1\to X$ is a reparametrization of the given Jordan curve $\Gamma$. This approach has the additional useful feature that the area minimizer obtained in this way is automatically conformally parametrized. Recently, the authors of the present article generalized the classical Plateau problem to the setting of arbitrary proper metric spaces in \cite{LW}. In particular, they proved existence of area minimizing discs with prescribed boundary in any proper metric space and with respect to any quasi-convex definition of area (in the sense of convex geometry). It should be noted that the classical approach (described above) to the Plateau problem cannot work literally in the generality of metric spaces. This is due to the fact that there are many natural but different definitions of area and of energy. Moreover, different definitions of area may give rise to different minimizers as was shown in \cite{LW}. Finally, the presence of normed spaces destroys any hope of obtaining a conformal area minimizer and the inevitable lack of conformality is the source of difficulties when trying to compare or identify minimizers of different energies and areas. One of the principal aims of the present article is to show that the classical approach (of minimization of the area via the simpler minimization of the energy) does in fact work for some definitions of energy and area. As a byproduct we obtain new definitions of area which are quasi-convex (topologically semi-elliptic in the language of \cite{Iva09}), which might be of some independent interest in convex geometry. \subsection{Energy and area minimizers} For a metric space $X$, the Sobolev space $W^{1,2} (D,X)$ consists of all measurable, essentially separably valued maps $u:D\to X$ which admit some function $g\in L^2 (D)$ with the following property (cf. \cite{Res97}, see also \cite{HKST15}): For any $1$-Lipschitz function $f:X\to \mathbb{R}$ the composition $f\circ u$ lies in the classical Sobolev space $W^{1,2} (D )$, and the norm of the gradient of $f\circ u$ is bounded from above by $g$ at almost every point of $D$. In $L^2(D)$ there exists a unique minimal function $g$ as above, called the \emph{generalized gradient} of $u$. This generalized gradient $g_u$ coincides with the \emph{minimal weak upper gradient} of a representative of $u$ in the sense of \cite{HKST15}. The square of the $L^2$-norm of this \emph{generalized gradient} $g_u$ is the Reshetnyak energy of $u$, which we denote by $E_+^2 (u)$. A different but equivalent definition of the Sobolev space $W^{1,2} (D,X)$ is due to Korevaar-Schoen (\cite{KS93}) and comes along with another definition of energy $E^2(u)$ generalizing the classical Dirichlet energy. If $X$ is a Riemannian manifold then $g_u (z)$ is just the point-wise sup-norm of the weak differential $Du (z)$ for almost all $z\in D$. The Dirichlet-Korevaar-Schoen energy $E^2 (u)$ is obtained in this case by integrating over $D$ the sum of squares of eigenvalues of $Du(z)$. It is the heart of the classical approach to Plateau's problem by Douglas and Rado, extended by Morrey to Riemannian manifolds, that any minimizer of the Dirichlet energy $E^2$ in $\Lambda (\Gamma , X)$ is conformal and minimizes the area in $\Lambda (\Gamma, X)$. Turning to general proper metric spaces $X$, we recall from \cite{LW} that for any Jordan curve $\Gamma$ in $X$ one can find minimizers of $E^2$ and $E^2_+$ in the set $\Lambda (\Gamma, X)$, whenever $\Lambda (\Gamma, X)$ is not empty. The first special case of our main result \tref{thmmain} identifies any minimizer of the Reshetnyak energy $E_+^2$ in $\Lambda (\Gamma, X)$ as a minimizer of the inscribed Riemannian area $\mu^i$ investigated by Ivanov in \cite{Iva09}, see also Subsections~\ref{subsecarea} -- \ref{vergleich} below. \begin{thm} \label{thm2} Let $\Gamma$ be any Jordan curve in a proper metric space $X$. Then every map $u\in \Lambda (\Gamma, X)$ which minimizes the Reshetnyak energy $E_+ ^2$ in $\Lambda (\Gamma ,X)$ also minimizes the $\mu^i$-area in $\Lambda (\Gamma ,X)$. \end{thm} Any minimizer $u$ of the Reshetnyak energy as in \tref{thm2} is $\sqrt 2$-quasiconformal. This means, roughly speaking, that $u$ maps infinitesimal balls to ellipses of aspect ratio at most $\sqrt{2}$, see \cite{LW} and Subsection \ref{subsecconst} below. We emphasize that our notion of quasiconformal map is different from the notion of quasiconformal homeomorphism studied in the field of quasiconformal mappings. For any map $v\in W^{1,2} (D,X)$ there is an energy-area inequality $E^2_+ (v)\geq \operatorname{Area} _{\mu ^i} (v)$; and for any $u$ as in \tref{thm2} equality holds. We find a similar phenomenon in the case of the more classical Korevaar-Schoen energy $E^2$, which generalizes the Dirichlet energy from the Riemannian to the metric setting. However, the corresponding \emph{Dirichlet definition of area} $\mu^D$ seems to be new, see Subsection~\ref{subsec+}. \begin{thm} \label{thm1} There exists a quasi-convex definition of area $\mu ^{D}$ such that the following holds true. For any Jordan curve $\Gamma$ in a proper metric space $X$, and for any map $u\in \Lambda (\Gamma, X)$ with minimal Korevaar-Schoen energy $E ^2(u)$ in $ \Lambda (\Gamma ,X)$, this map $u$ minimizes the $\mu ^D$-area in $\Lambda (\Gamma ,X)$. \end{thm} Recall that quasi-convexity of the definition of area is a very important feature in the present context, since it is equivalent to the lower semi-continuity of the corresponding area functional in all Sobolev spaces \cite{LW}, Theorem 5.4, and therefore, closely related to the question of the existence of area minimizers. In order to describe the definition of area $\mu ^{D}$, we just need to fix the values of the $\mu ^D$-areas of one subset in every normed plane $V$. Considering the subset to be the ellipse arising as the image $L(D)$ of a linear map $L: \mathbb{R}^2\to V$ (see Section \ref{sec2} below), this value $\operatorname{Area} _{\mu ^D} (L) $ equals \begin{equation} \label{equation1} \operatorname{Area}_{\mu ^D} (L)= \frac 1 2 \inf \{ E^2 (L\circ g)\; | \; g\in {\rm SL}_2 \}. \end{equation} For any Sobolev map $v\in W^{1,2} (D,X)$ the energy-area inequality $E^2(v) \geq 2\cdot \operatorname{Area}_{\mu^D} (v)$ holds true, with equality for any any minimizer $u$ as in \tref{thm1}. The minimizers in \tref{thm1} are $Q$-quasiconformal with the non-optimal constant $Q= 2\sqrt 2 + \sqrt 6$ (\cite{LW} and Subsection \ref{subsec+} below). An answer to the following question would shed light on the structure of energy minimizers from \tref{thm1}, cf. \cite{Milman}, p.723 for the "dual" question. \begin{quest} For which $g\in {\rm SL}_2$ is the infimum in \eqref{equation1} attained? Is it possible to describe the measure $\mu ^{D}$ appearing in \tref{thm1} in a geometric way? What is the optimal quasiconformality constant of the minimizers of the Korevaar-Schoen energy? \end{quest} All definitions of area of Sobolev maps agree with the parametrized Hausdorff area if $X$ is a Riemannian manifold, a space with one-sided curvature bound or, more generally, any space with the property (ET) from \cite{LW}, Section 11. In this case, \tref{thm1} directly generalizes the classical result of Douglas-Rado-Morrey. Our results apply to all other \emph{quasi-convex definitions of energy}, see \tref{quasiarea}. We refer to Section \ref{sec2} for the exact definitions and mention as a particular example linear combinations $a\cdot E^2+ b\cdot E_+ ^2+ c\cdot \operatorname{Area} _{\mu}$, where $a,b,c\geq 0$ with $a^2+b^2>0$ and where $\mu$ is some quasi-convex definition of area. For any such energy $E$ there exists a quasi-convex definition of area $\mu ^E$ such that a minimizer of $E$ automatically provides a quasiconformal minimizer of $\mu ^E$ as in \tref{thm2} and \tref{thm1}. The definition of area $\mu ^E$ is given similarly to \eqref{equation1}. \begin{rem} We would like to mention a related method of obtaining an area-minimizer for any quasi-convex definition of area $\mu$. In the Riemannian case this idea can be found in \cite{HM}, cf. \cite{Dierkes-et-al10}, Section 4.10: Consider the energy $E_{\epsilon} =\epsilon E_+^2 + (1-\epsilon ) \operatorname{Area} _{\mu}$. Then a minimizer $u_{\epsilon}$ of $E_{\epsilon}$ in $\Lambda (\Gamma ,X)$ can be found in the same way as the minimizer of $E_+^2$. This minimizer is automatically $\sqrt 2$-quasiconformal and minimizes the area functional $(1-\epsilon ) \operatorname{Area} _{\mu}+ \epsilon \operatorname{Area} _{\mu ^i}$ in $\Lambda (\Gamma ,X)$. Due to the quasiconformality these minimizers have uniformly bounded energy. Therefore one can go to the limit (fixing three points in the boundary circle) and obtain a minimizer of $\operatorname{Area} _{\mu}$. \end{rem} This remark also shows that the set of quasi-convex areas obtained via the minimization of energies as in \eqref{equation1} is a dense convex subset in the set of all quasi-convex definitions of area. It seems to be a natural question which definitions of area correspond in this way to some energies. In particular, if it is the case for the most famous Hausdorff, Holmes-Thompson and Benson definitions of area. \begin{rem} \label{remark} From \tref{quasiarea} and \tref{thm2} one can deduce the quasi-convexity of the inscribed Riemannian area $\mu ^i$. However, a much stronger convexity property of this area has been shown in \cite{Iva09}. \end{rem} \subsection{Regularity of energy minimizers} In the presence of quadratic isoperimetric inequalities the regularity results for area minimizers obtained in \cite{LW} imply regularity of energy minimizers, once we have identified energy minimizers as area minimizers in \tref{thmmain}. Recall that a complete metric space $X$ is said to admits a $(C,l_0)$-quadratic isoperimetric inequality with respect to a definition of area $\mu$ if for every Lipschitz curve $c:S^1\to X$ of length $l\leq l_0$ there exists some $u\in W^{1,2} (D,X)$ with $$\operatorname{Area} _{\mu} (u) \leq C\cdot l^2$$ and such that the trace $tr (u)$ coincides with $c$. We refer to \cite{LW} for a discussion of this property satisfied by many interesting classes of metric spaces. If $\mu$ is replaced by another definition of area $\mu '$ then in the definition above only the constant $C$ will change and it will be changed by at most by the factor $2$. If the assumption is satisfied for some triple $(C,l_0,\mu)$ we say that $X$ satisfies a \emph{uniformly local quadratic isoperimetric inequality}. As far as qualitative statements are concerned the constants and the choice of the area do not play any role. As a consequence of \tref{thm1} and the regularity results for area minimizers in \cite{LW} we easily deduce continuity up to the boundary and local Hoelder continuity in the interior for all energy minimizers in $\Lambda (\Gamma ,X)$ for any quasi-convex definition of energy. We refer to \tref{thmgenreg} for the precise statement. \subsection{Improved regularity of $\mu$-minimal discs} We can use \tref{thm2} to slightly improve the regularity results for solutions of the Plateau problem obtained in \cite{LW}. Assume again that $\Gamma$ is a Jordan curve in a proper metric space $X$ and let $\mu$ be a definition of area. We introduce the following \begin{defn} We say that a map $u\in \Lambda (\Gamma ,X)$ is $\mu$-minimal if it minimizes the $\mu$-area in $\Lambda (\Gamma ,X)$, and if it has minimal Reshetnyak energy $E_+^2$ among all such minimizers of the $\mu$-area. \end{defn} Due to \tref{thm2}, for the inscribed Riemannian definition of area $\mu=\mu ^i$, a $\mu$-minimal disc is just a minimizer of the Reshetnyak energy $E_+^2$ in $\Lambda (\Gamma, X)$. It follows from \cite{LW} that, for any quasi-convex $\mu$, one finds some $\mu$-minimal disc in any non-empty $\Lambda (\Gamma , X)$. Moreover, any such $\mu$-minimal map is $\sqrt 2$-quasiconformal. Assume further that $X$ satisfies the $(C,l_0,\mu )$-quadratic isoperimetric inequality. In \cite{LW}, we used the quasiconformality to deduce that any such map has a locally $\alpha$-Hoelder continuous representative with $\alpha=\frac 1 {8 \pi C}$. However, $\mu$-minimal maps satisfy a stronger infinitesimal condition than $\sqrt 2$-quasiconformality, and this can be used to improve $\alpha$ by a factor of $2\cdot q(\mu) \in [1,2]$ depending on the definition of area $\mu$. The number $q(\mu )$ equals $1$ for the maximal definition of area $\mu=\mu^i$. For other definitions of area $\mu$, the number $q(\mu)$ is smaller than $1$ and measures the maximal possible deviation of $\mu$ from $\mu ^i$, see \eqref{eq-def-qmu}. For instance, $q(\mu ^b ) =\frac{\pi}{4}$ for the Hausdorff area $\mu ^b$. Thus the following result improves the above Hoelder exponent by $2$ in the case of the inscribed Riemannian definition of area $\mu =\mu ^i$ and by $\frac \pi 2$ in the case of the Hausdorff area $\mu =\mu ^b$: \begin{thm} \label{optregul} Let $\Gamma$ be a Jordan curve in a proper metric space $X$. Assume that $X$ satisfies the $(C,l_0,\mu )$-quadratic isoperimetric inequality and let $u$ be a $\mu$-minimal disc in $\Lambda (\Gamma ,X)$. Then $u$ has a locally $\alpha$-Hoelder continuous representative with $\alpha = q(\mu)\cdot\frac{1}{4 \pi C}$. \end{thm} For $\mu =\mu ^i$ we get the optimal Hoelder exponent $\alpha =\frac 1 {4\pi C}$ as examples of cones over small circles show (see \cite{MR02} and \cite{LW}, Example 8.3). \subsection{Some additional comments} The basic ingredient in the proof of \tref{thm2} and its generalization \tref{thmmain} is the localized version of the classical conformality of energy minimizers. This was already used in \cite{LW}. This idea shows that almost all (approximate metric) derivatives of any minimizer $u$ of the energy $E$ in $\Lambda (\Gamma ,X)$ have to minimize the energy in their corresponding ${\rm SL}_2$-orbits, as in \eqref{equation1}. The proof of the quasi-convexity of $\mu ^D$, generalized by \tref{quasiarea}, is achieved by applying an idea from \cite{Jost}. We obtain a special parametrization of arbitrary Finsler discs by minimizing the energy under additional topological constraints. This idea might be of independent interest as it provides canonical parametrizations of any sufficiently regular surface. \bigskip {\bf Acknowledgements:} We would like to thank Frank Morgan for useful comments. \section{Preliminaries} \label{sec2} \subsection{Notation} By $(\mathbb{R}^2, s)$ we denote the plane equipped with a semi-norm $s$. If $s$ is not specified, $\mathbb{R}^2$ is always considered with its canonical Euclidean norm, always denoted by $s_0$. By $D$ we denote the open unit disc in the Euclidean plane $\mathbb{R}^2$ and by $S^1$ its boundary, the unit circle. Integration on open subsets of $\mathbb{R}^2$ is always performed with respect to the Lebesgue measure, unless otherwise stated. By $d$ we denote distances in metric spaces. Metric spaces appearing in this note will be assumed complete. A metric space is called proper if its closed bounded subsets are compact. \subsection{Seminorms and convex bodies}\label{subsecseminorm} By $\mathfrak S_2$ we denote the proper metric space of seminorms on $\mathbb{R}^2$ with the distance given by $d_{\mathfrak S_2} (s,s')= \max _{v\in S^1} \{|s(v)-s'(v)| \}$. A seminorm $s \in \mathfrak S_2$ is \emph{$Q$-quasiconformal} if for all $v,w\in S^1$ the inequality $s(v) \leq Q\cdot s(w)$ holds true. A convex body $C$ in $\mathbb{R}^2$ is a compact convex subset with non-empty interior. Convex, centrally symmetric bodies are in one-to-one correspondence with unit balls of norms on $\mathbb{R}^2$. Any convex body $C$ contains a unique ellipse of largest area, called the \emph{Loewner ellipse} of $C$. The convex body is called \emph{isotropic} if its Loewner ellipse is a Euclidean ball (cf. \cite{Milman}). We call a seminorm $s\in \mathfrak S_2$ isotropic if it is the $0$ seminorm, or if $s$ is a norm and its unit ball $B$ is isotropic. In the last case the Loewner ellipse of $B$ is a multiple $t\cdot \bar D$ of the closed unit disc. By John's theorem (cf. \cite{AlvT04}) $B$ is contained in $\sqrt 2 \cdot t \cdot \bar D$. Therefore, every isotropic seminorm is $\sqrt 2$-quasiconformal. \subsection{Definitions of area} \label{subsecarea} While there is an essentially unique natural way to measure areas of Riemannian surfaces, there are many different ways to measure areas of Finsler surfaces, some of them more appropriate for different questions. We refer the reader to \cite{Iva09}, \cite{Ber14}, \cite{AlvT04} and the literature therein for more information. A definition of area $\mu$ assigns a multiple $\mu _V$ of the Lebesgue measure on any $2$-dimensional normed space $V$, such that natural assumptions are fulfilled. In particular, it assigns the number $\mathbf{J}^{\mu} (s)$, \emph{the $\mu$-Jacobian} or \emph{$\mu$-area-distortion}, to any seminorm $s$ on $\mathbb{R}^2$ in the following way. By definition, $\mathbf{J} ^{\mu} (s)=0$ if the seminorm is not a norm. If $s$ is a norm then $\mathbf{J} ^{\mu} (s)$ equals the $\mu _{(\mathbb{R}^2,s)}$-area $\mu _{(\mathbb{R}^2,s)} (A)$ of the unit Euclidean square $A\subset \mathbb{R}^2$. Indeed, the choice of the definition of area is equivalent to a choice of the \emph{Jacobian} in the following sense. \begin{defn} A (2-dimensional definition of) Jacobian is a map $\mathbf{J}: \mathfrak S_2 \to [0,\infty )$ with the following properties: \begin{enumerate} \item Monotonicity: $\mathbf{J}(s)\geq \mathbf{J}(s')$ whenever $s\geq s'$; \item Homogeneity: $\mathbf{J}(\lambda \cdot s)= \lambda ^2 \cdot \mathbf{J}(s)$ for all $\lambda \in [0,\infty)$; \item ${\rm SL}_2$-invariance $\mathbf{J}(s\circ T)=\mathbf{J}(s)$ for any $T\in {\rm SL}_2$; \item Normalization: $\mathbf{J} (s_0)= 1$. \end{enumerate} \end{defn} The properties (2) and (3) can be joined to the usual transformation rule for the area: $\mathbf{J}(s\circ T)= |\det (T)| \cdot \mathbf{J}(s)$. It follows that $\mathbf{J}(s)= 0$ if and only if the seminorm $s$ is not a norm. Moreover, properties (1)-(3) imply that $\mathbf{J}$ is continuous. This is due to the following crucial fact: If norms $s_i$ converge to a norm $s$ in $\mathfrak S_2$ then, for any $\epsilon >0$ and all large $i$, the inequalities $(1-\epsilon) \cdot s_i \leq s \leq (1+\epsilon ) \cdot s_i$ hold true. A definition of area $\mu$ gives rise to a Jacobian $\mathbf{J}^{\mu}$ described above. On the other hand, any Jacobian $\mathbf{J}:\mathfrak S_2\to [0,\infty )$ provides a unique definition of area $\mu ^{\mathbf{J}}$ in the following way. On any $(\mathbb{R}^2 ,s)$ the definition of area $\mu ^{\mathbf{J}}$ assigns the $\mathbf{J}(s)$-multiple of the Lebesgue area of $\mathbb{R}^2$. For any normed plane $V$, we choose a linear isometry to some $(\mathbb{R}^2, s)$ and pull back the corresponding measure from $(\mathbb{R}^2, s)$ to $V$. By construction, the assignments $\mu \to \mathbf{J} ^{\mu}$ and $\mathbf{J}\to \mu ^{\mathbf{J}}$ are inverses of each other. \begin{rem} We refer to another similar geometric interpretation of a definition of area discussed in \cite{Ber14}. \end{rem} There are many non-equivalent definitions of area/Jacobian. Any two of them differ at most by a factor of $2$, due to John's theorem, \cite{AlvT04}. The most prominent examples are the Busemann (or Hausdorff) definition $\mu ^b$, the Holmes-Thompson definition $\mu ^{ht}$, the Benson (or Gromov $mass ^*$) definition $m^{\ast}$ and the inscribed Riemannian (or Ivanov) definition $\mu ^i$. We refer to \cite{AlvT04} for a thorough discussion of these examples and of the whole subject; and to \cite{Iva09}, \cite{BI13}, \cite{Ber14} for recent developments. Here, we just mention the Jacobians of these four examples (cf. \cite{Ber14}). In the subsequent examples, $B$ will always denote the unit ball of the normed plane $(\mathbb{R}^2,s)$. \begin{enumerate} \item The Jacobian $\mathbf{J}^b$ corresponding to the Hausdorff (Busemann) area $\mu^b$ equals $\mathbf{J} ^b(s)=\frac {\pi} {|B|}$, where $|B|$ is the Lebesgue area of $B$. \item The Jacobian $\mathbf{J}^{ht}$ corresponding to the Holmes-Thomspon area $\mu ^{ht}$ equals $\mathbf{J}^{ht}(s)= \frac {|B^*|} {\pi}$, where $|B^*|$ is the Lebesgue area of the unit ball $B^*$ of the dual norm $s^*$ of $s$. \item The Jacobian $\mathbf{J}^*$ corresponding to Benson (Gromov mass$^{\ast}$) definition of area $m^*$ equals $\mathbf{J} ^*(s)= \frac 4 {|P|}$, where $|P|$ is the Lebesgue area of a parallelogram $P$ of smallest area which contains $B$. \item The Jacobian $\mathbf{J}^i$ corresponding to the inscribed Riemannian definition of area $\mu ^i$ equals $\mathbf{J}^i(s) =\frac {\pi} {|L|} $, where $|L|$ is Lebesgue area of the Loewner ellipse of $B$. \end{enumerate} \subsection{Comparision of the definitions of area} \label{vergleich} Below we denote by $|C|$ the Lebesgue area of a subset $C\subset \mathbb{R}^2$. Let $s$ be a norm on $\mathbb{R}^2$, let $B$ be its unit ball and let $L\subset B$ denote the Loewner ellipse of $B$. If $s_L$ denotes the norm whose unit ball is $L$, then $s\leq s_L$ and $s_L$ is Euclidean. Thus, for any definition of area $\mu $ with Jacobian $\mathbf{J} ^{\mu}$ we have $\mathbf{J}^{\mu} (s_L) = \frac {\pi} {|L|}$ and $\mathbf{J} ^{\mu}(s) \leq \mathbf{J}^{\mu}(s_L)$. For the inscribed Riemannian area $\mu ^i$ and its Jacobian $\mathbf{J}^i$ we have equality $\mathbf{J}^{i} (s) =\mathbf{J}^{i} (s_L)$ in the above inequality. Hence, for any other definition of area $\mu$ we must have $ \mathbf{J}^{i} \geq \mathbf{J}^{\mu}$. In particular, the inscribed Riemannian area is the largest definition of area. On the other hand, by John's theorem, $\mathbf{J}^i \leq 2\mathbf{J} ^{\mu}$. We set \begin{equation}\label{eq-def-qmu} q(\mu) := \inf \frac{\mathbf{J}^{\mu } (s)}{\mathbf{J}^i (s)}, \end{equation} where $s$ runs over all norms on $\mathbb{R}^2$. As we have just observed, $q(\mu^i )=1$ and $1/2\leq q(\mu) < 1$ for any other definition of area $\mu$. \begin{lem} For the Hausdorff area $\mu ^b$ we have $q(\mu ^b) = \frac{\pi}{4}$. \end{lem} \begin{proof} Let $B$ be the unit ball of the norm $s$ on $\mathbb{R}^2$. In order to compare $\mathbf{J}^i (s)$ and $\mathbf{J}^b (s)$ we just need to evaluate $\mu^i$ and $\mu ^b $ on $B$. For the Busemann definition of area we have $\mu ^b (B)=\pi$. On the other hand, $\mu^i (B)= \pi \cdot \frac {|B| } {|L|}$, where $L$ is the Loewner ellipse of $B$. The \emph{volume ratio} $\frac {|B|} {|L|}$ is maximal when $B$ is a square, see \cite{Bal97}, Theorem 6.2, in which case it is equal to $\frac 4 {\pi}$. \end{proof} Since we will not need further statements about the function $q$ we just summarize here some properties without proofs. For any definition of area $\mu$, there exists a norm $s$ with $q(s) \cdot \mathbf{J}^{\mu} (s)= \mathbf{J}^i (s)$. Moreover, using John's theorem one can show that this norm $s$ can be chosen to have a square or a hexagon as its unit ball. One can show that $q(\mu ^{ht} ) = \frac{2}{\pi}$, where again on the supremum norm $s_{\infty}$ the difference between $\mathbf{J}^i$ and $\mathbf{J}^{ht}$ is maximal. Finally, for Gromov's definition of area $m^{\ast}$ one can show that $q(m^{\ast} )=\frac{\sqrt 3}{2}$. Here the maximal deviation of $\mathbf{J}^i$ from $\mathbf{J}^{\ast}$ is achieved for the norm whose unit ball is a regular hexagon. \subsection{Definitions of energy} \label{subsecdef} An assignment of a definition of area or Jacobian is essentially equivalent to the assignment of an area functional on all Lipschitz and Sobolev maps defined on domains in $\mathbb{R}^2$, see below. Similarly, the choice of an energy functional is essentially equivalent to the following choice of a \emph{definition of energy}: \begin{defn} A ($2$-dimensional conformally invariant) \emph{definition of energy} is a continuous map $\mathcal{I}:\mathfrak S_2\to [0,\infty )$ which has the following properties: \begin{enumerate} \item Monotonicity: $\mathcal I(s)\geq \mathcal I(s')$ whenever $s\geq s'$; \item Homogeneity: $\mathcal I(\lambda \cdot s)= \lambda ^2 \cdot \mathcal{I}(s)$ for all $\lambda \in [0,\infty)$; \item ${\rm SO}_2$-invariance: $\mathcal{I}(s\circ T)=\mathcal{I}(s)$ for any $T\in {\rm SO}_2$; \item Properness: The set $\mathcal{I}^{-1} ([0,1])$ is compact in $\mathfrak S_2$. \end{enumerate} \end{defn} Due to properness and homogeneity, we have $\mathcal{I}(s)=0$ only for $s=0$. The properness of $\mathcal{I}$ implies that a definition of energy is \emph{never} ${\rm SL}_2$-invariant, in contrast to a definition of area. The set of all definitions of energy is a convex cone. Moreover, for any Jacobian $\mathbf{J}$, any definition of area $\mathcal{I}$ and any $\epsilon >0$ the map $I_{\epsilon} := \mathbf{J}+ \epsilon \cdot \mathcal{I}$ is a definition of energy. Thus the closure (in the topology of locally uniform convergence) of the set of definitions of energy contains all definitions of area. The following two definitions of energy are most prominent: the Korevaar-Schoen-Dirichlet energy $I^2$ given by $$I^2(s)=\frac 1 {\pi} \int _{S^1} s(v)^2 dv$$ and the Reshetnyak energy $$I_+^2 (s)= \sup \{ s(v) ^2 |v\in S^1 \}. $$ Due to properness and homogeneity any two definitions of energy are comparable: For any definition of energy $\mathcal{I}$ there is a constant $k_{\mathcal{I}} \geq 1$, such that \begin{equation} \label{2ener} \frac 1 {k_{\mathcal{I}}} \cdot {\mathcal{I}} \leq I_+ ^2 \leq k_{\mathcal{I}} \cdot \mathcal{I}. \end{equation} \subsection{Energy and area of Sobolev maps} \label{subsecsob} We assume some experience with Sobolev maps and refer to \cite{LW} and the literature therein. In this note we consider only Sobolev maps defined on bounded open domains $\Omega \subset \mathbb{R}^2$. Let $\Omega$ be such a domain and let $u\in W^{1,2} (\Omega,X)$ be a Sobolev map with values in $X$. Then $u$ has an approximate metric derivative at almost every point $z\in \Omega$ (\cite{Kar07},\cite{LW}), which is a seminorm on $\mathbb{R}^2$ denoted by $\ap\md u_z$. When $\ap\md u_z$ exists, it is the unique seminorm $s$ for which the following approximate limit is $0$: $$\operatorname{ap} \lim _{y\to z} \frac {d(u(z),u(y))- s(y-z)} {|y-z|} =0.$$ We refer the reader to \cite{LW}, \cite{Kar07} and mention here only that in the case of locally Lipschitz maps $u$, the approximate metric derivative is just the metric derivative defined by Kirchheim (\cite{Kir94}, cf. also \cite{AK00}, \cite{Iva09}). If the target space $X$ is a Finsler manifold then the approximate metric derivative at almost all points $z$ is equal to $|D_z u|$, where $D_z u$ is the usual (weak) derivative and $|\cdot |$ is the given norm on the tangent space $T_{u(z)} X$. A map $u\in W^{1,2} (\Omega,X) $ is called \emph{$Q$-quasiconformal} if the seminorms $\ap\md u_z \in \mathfrak S_2$ are $Q$-quasiconformal for almost all $z\in \Omega$. For a definition of energy $\mathcal{I}$, the $\mathcal{I}$-energy of a map $u\in W^{1,2} (\Omega ,X)$ is given by $$E_{\mathcal{I}}(u):= \int _{\Omega} \mathcal{I}( \ap\md u_z) dz$$ This value is well-defined and finite for any $u \in W^{1,2} (\Omega ,X)$, due to \eqref{2ener}. If $\mathcal{I}$ is the Korevaar-Schoen definition of energy $I^2$, respectively the Reshetnyak definition of energy $I_+^2$ then $E_{\mathcal{I}}(u)$ is the Korevaar-Schoen respectively the Reshetnyak energy of $u$ described in \cite{KS93}, \cite{Res97} and in the introduction. We will denote $E_{\mathcal{I}}$ in these cases as before by $E^2$ and $E_+^2 $, respectively. Similarly, given a definition of area $\mu$ and the corresponding Jacobian $\mathbf{J}^ {\mu} $ one obtains the $\mu$-area of $u$ by integrating $\mathbf{J} ^{\mu} (\ap\md u_z)$ over $\Omega$. We will denote it by $$\operatorname{Area} _{\mu} (u) :=\int _{\Omega} \mathbf{J}^{\mu} ( \ap\md u_z) dz$$ Pointwise comparision of $\mu$ with the inscribed Riemannian definition of area $\mu^i$ discussed in Subsection \ref{vergleich} gives us for any Sobolev map $u$: \begin{equation} \label{areafunct} q(\mu)^{-1} \cdot \operatorname{Area} _{\mu } (u) \geq \operatorname{Area} _{\mu^i} (u) \geq \operatorname{Area} _{\mu} (u). \end{equation} \subsection{Quasi-convexity} \label{subsecquas} A definition of energy $\mathcal{I}: \mathfrak{S}_2 \to[0,\infty)$ is called \emph{quasi-convex} if linear $2$-dimensional subspaces of normed vector spaces have minimal $\mathcal{I}$-energy. More precisely, if for every finite dimensional normed space $Y$ and every linear map $L: \mathbb{R}^2\to Y$ we have \begin{equation}\label{eq1} E_ {\mathcal{I}} (L|_D) \leq E_{\mathcal{I}} (\psi) \end{equation} for every smooth immersion $\psi: \bar D\to Y$ with $\psi|_{\partial D} = L|_{\partial D}$. Similarly, one defines the quasi-convexity of a definition of area with corresponding functional $\mathbf{J}:\mathfrak S _2 \to [0,\infty )$, see Section 5 in \cite{LW}. As has been shown in \cite{LW}, in extension of the classical results (cf. \cite{AF84}), a definition of energy is quasi-convex if and only if the map $u\mapsto E_{\mathcal{I}}(u)$ is semi-continuous on any Sobolev space $W^{1,2} (\Omega, X)$ (with respect to $L^2$-convergence). Similarly, the quasi-convexity of a definition of area $\mu$ is equivalent to the semi-continuity property of the $\mu$-area on all Sobolev spaces $W^{1,2} (\Omega, X)$. Recall that the Reshetnyak and Korevaar-Schoen definitions of energy are quasi-convex (\cite{KS93}, \cite{Res97}, \cite{LW}). The four definitions of area mentioned in Subsection \ref{subsecarea} are quasi-convex as well (\cite{Iva09}, \cite{BI13}, \cite{AlvT04}, \cite{LW}). We dwell a bit discussing the properties of a definition of area $\mu$ which is not quasi-convex (cf.~\cite{Mor52}). Let $L:\mathbb{R}^2\to Y$ be a linear map to a finite-dimensional normed vector space and let $\psi :\bar D\to Y$ be a smooth map which coincides with $L$ on $S^1$ and satisfies $$\operatorname{Area} _{\mu} (\psi) < \operatorname{Area} _{\mu} (L|_D).$$ By enlarging $Y$ if needed and by using a general position argument we can assume that $\psi$ is a diffeomorphism onto its image. Now we can obtain a special sequence of maps $\psi _m:\bar D\to Y$ converging to $L:\bar D\to Y$ and violating the semi-continuity property in the following way. The map $\psi _m$ differs from $L$ on $\delta \cdot m ^2$ disjoint balls of radius $m^{-1}$, where $\delta>0$ is a sufficiently small, fixed constant. The difference between $\psi _m$ and $L$ on any of these balls is given by the corresponding translate of $\psi$, rescaled by the factor $m^{-1}$. Then there is a number $K>0$, such that any of the maps $\psi _m$ is biLipschitz with the same biLipschitz constant $K$. The maps $\psi _m$ converge uniformly to the linear map $L$. Finally, for $\epsilon = \operatorname{Area} _{\mu } (L) - \operatorname{Area} _{\mu} (\psi )$, we deduce $\operatorname{Area} _{\mu } (\psi _m) =\operatorname{Area} _{\mu } (L) - \delta \cdot \epsilon $ for all $m$. In particular, $\operatorname{Area}_ {\mu} (L)>\lim _{m\to \infty} (\operatorname{Area} {\mu } (\psi _m))$. \section{Area definition corresponding to an energy} \label{sec3} \subsection{General construction} Let now $\mathcal{I}$ be any definition of energy. Consider the function $\hat J: \mathfrak S_2\to [0,\infty )$: $$\hat J(s):= \inf \{ \mathcal{I}(s\circ T) | T\in {\rm SL}_2 \}$$ given by the infimum of $\mathcal{I}$ on the ${\rm SL}_2$-orbit of $s$. Due to the properness of $\mathcal{I}$, the infimum in the above equation is indeed a minimum, unless the seminorm is not a norm. On the other hand, if $s$ is not a norm then the ${\rm SL}_2$-orbit of $s$ contains the $0$ seminorm in its closure, and we get $\hat J(s)=0$. By construction, the function $\hat J:\mathfrak S_2\to [0,\infty )$ is ${\rm SL}_2$-invariant. Since $\mathcal{I}$ is monotone and homogeneous, so is $\hat J$. Finally, $\hat J(s_0)$ is different from $0$. Thus, setting the constant $\lambda _{\mathcal{I}}$ to be $\frac 1 {\hat J (s_0)}$, we see that $\mathbf{J} ^{\mathcal{I}} (s) := \lambda _{\mathcal{I}} \cdot \hat J (s) $ is a definition of a Jacobian in the sense of the previous section. The definition of area which corresponds to the Jacobian $\mathbf{J}^{\mathcal{I}}$ will be denoted by $\mu ^{\mathcal{I}}$. By construction, \begin{equation} \label{generalineq} \mathbf{J} ^{\mathcal{I}} (s) \leq \lambda _{\mathcal{I}} \cdot \mathcal{I}(s) \end{equation} with equality if and only if $\mathcal{I}$ assumes the minimum on the ${\rm SL}_2 $-orbit of $s$ at the seminorm $s$. \begin{defn} We will call a seminorm $s$ minimal for the definition of energy $\mathcal{I}$, or just $\mathcal{I}$-minimal, if $\mathcal{I}(s) \leq \mathcal{I}(s\circ T)$ for all $T\in {\rm SL}_2$. \end{defn} Thus a seminorm $s$ is $\mathcal{I}$-minimal if and only if we have equality in the inequality \eqref{generalineq}. By homogeneity and continuity, the set of all $\mathcal{I}$-minimal seminorms is a closed cone. Any $\mathcal{I}$-minimal seminorm is either a norm or the trivial seminorm $s=0$. We therefore deduce by a limiting argument: \begin{lem} \label{quasicon} There is a number $Q_{\mathcal{I}} >0$ such that any $\mathcal{I}$-minimal seminorm $s$ is $Q_{\mathcal{I}}$-quasiconformal. \end{lem} \subsection{The Reshetnyak energy and the inscribed Riemannian area} \label{subsecconst} We are going to discuss the application of the above construction to the main examples. In order to describe the Jacobian $\mathbf{J}^{\mathcal{I}}$, the normalization and the quasiconformality constants $\lambda _{\mathcal{I}}, Q_{\mathcal{I}}$ induced by a definition of energy $\mathcal{I}$, it is crucial to understand $\mathcal{I}$-minimal norms. By general symmetry reasons one might expect that $\mathcal{I}$-minimal norms are particularly round. Our first result, essentially contained in \cite{LW}, confirms this expectation for the Reshetnyak energy: \begin{lem} \label{resiso} Let $\mathcal{I}=I_+^2$ be the Reshetnyak definition of energy. A seminorm $s\in \mathfrak S_2$ is $I^2 _+$-minimal if and only if $s$ is isotropic in the sense of Subsection \ref{subsecseminorm}. \end{lem} \begin{proof} For seminorms which are not norms the statement is clear. Thus we may assume that $s$ is a norm. After rescaling, we may assume $I^2_+(s)=1$. Hence $1=\sup \{ s(v), v\in S^1\}$, and $\bar D$ is the largest Euclidean disc contained in the unit ball $ B$ of the norm $s$. Assume that $s$ is $I_+ ^2$-minimal and $\bar D$ is not the Loewner ellipse of $B$. Then there exists an area increasing linear map $A:\mathbb{R}^2\to \mathbb{R}^2$ such that $B$ still contains the ellipse $A(D)$, hence $I_+ ^2(s\circ A) \leq 1$. Consider the map $T=\det (A) ^{-\frac 1 2} \cdot A \in {\rm SL}_2$. Then $I^2_+(s\circ T) <1$ since $\det (A) >1$. This contradicts the assumption that $s$ is $I^2_+$-minimal. On the other hand, if $s$ is isotropic then $\bar D$ is the Loewner ellipse of $B$. Consider an $I^2_+$-minimal norm $s'=s\circ T$ in the ${\rm SL}_2$-orbit of $s$. Then the Loewner ellipse $T(\bar{D})$ of $s'$ must be a multiple of $\bar D$, as we have seen above. Hence $T\in {\rm SO}_2$. Since $I^2_+$ is conformally invariant, we get $I^2_+(s)=I^2_+(s')$, and $s$ is $I^2_+$-minimal. \end{proof} Now we can easily deduce: \begin{cor} \label{corivanov} For the Reshetnyak definition of energy $\mathcal{I}=I_+^2$ the normalization constant $\lambda _{\mathcal{I}}$ equals $1$, the optimal quasiconformality constant $Q_{\mathcal{I}}$ equals $\sqrt 2$, and the induced definition of area $\mu ^{\mathcal{I}}$ is the inscribed Riemannian area $\mu ^i$. \end{cor} \begin{proof} We have $\lambda _{\mathcal I} =\frac 1 {\hat J( s_0)}= \frac 1 {\mathcal{I}(s_0)}=1$ since $s_0$ is $I_+^2$-minimal. Isotropic seminorms are $\sqrt 2$-quasiconformal by John's theorem. The supremum norm $s_{\infty} \in \mathfrak S_2$ is isotropic, hence $I^2_+$-minimal. For $s_{\infty}$ the quasiconformality constant $\sqrt 2$ is optimal. In order to prove that the induced definition of area coincides with the inscribed Riemannian area $\mu ^i$, it suffices to evaluate the Jacobians on any $I^2_+$-minimal norm $s$. By homogeneity we may assume again that the Loewner ellipse of the unit ball $B$ of $s$ is the unit disc $\bar D$. Then $\mathbf{J} ^{\mathcal I} (s)= 1 =\mathbf{J}^i (s)$. \end{proof} \subsection{The Korevaar-Schoen energy and the Dirichlet area} \label{subsec+} Unfortunately, in the classical case of the Korevaar-Schoen energy $\mathcal{I}=I^2$ we do not know much about the induced definition of area/Jacobian. We call this the Dirichlet definition of area/Jacobian and denote it by $\mu ^D$ and $\mathbf{J} ^D$, respectively. Only the normalization constant in this case is easy to determine. \begin{lem} For the Korevaar-Schoen energy $\mathcal{I}=I^2$, the canonical Euclidean norm $s_0$ is $I^2$-minimal. The normalization constant $\lambda _{\mathcal I}$ equals $\frac 1 2$. \end{lem} \begin{proof} We have $I^2(s_0)=\frac 1 {\pi} \cdot 2\pi =2$. Therefore, it suffices to prove the $I^2$-minimality of $s_0$. Since $I^2$ and $s_0$ are ${\rm SO}_2$-invariant, it suffices to prove $I^2(s_0\circ T) \geq I^2(s_0)$ for any symmetric matrix $T \in {\rm SL}_2 $. In this case, one easily computes $I^2 (s\circ T) =\frac 1 2 (\lambda _1 ^2 +\lambda _2 ^2)$, where $\lambda _{1,2}$ are the eigenvalues of $T$. Under the assumption $\lambda _1\cdot \lambda _2=\det (T)=1$ the minimum is achieved for $\lambda _1=\lambda _2=1$. Hence $s_0$ is $I^2$-minimal. \end{proof} From the corresponding property of $\mathcal{I}=I^2$, it is easy to deduce that for norms $s\neq s'$ the inequality $s\geq s'$ implies the strict inequality $\mathbf{J}^D(s) >\mathbf{J}^D (s')$, in contrast to the cases of inscribed Riemannian and Benson definitions of areas $\mu ^i$ and $m^{\ast}$. In \cite{LW} it is shown that for $\mathcal{I}=I^2$ the quasiconformality constant $Q_{\mathcal{I}}$ in \lref{quasicon} can be chosen to be $2\sqrt2 +\sqrt 6$. However, the computation of $Q_{\mathcal{I}}$ in \cite{LW} and the above strict monotonicity statement show that this constant is not optimal. Computing $\mathbf{J}^D$ on the supremum norm $s_{\infty}$ it is possible to see that $\mu ^D$ is different from the Busemann and Holmes-Thompson definitions of area. We leave the lengthy computation to the interested reader. \section{Main lemma and main theorems} \label{sec4} \subsection{Basic observations} Let $\mathcal{I}$ be a definition of energy and let $\mu ^{\mathcal{I}}$ and $\mathbf{J}^{\mathcal{I}}$ be the corresponding definitions of area and Jacobian. Let $\lambda _{\mathcal{I}}$ be the normalization constant from the previous section. Let $X$ be a metric space, $\Omega \subset \mathbb{R}^2$ a domain and let $u\in W^{1,2} (\Omega ,X)$ be a Sobolev map. Integrating the point-wise inequality \eqref{generalineq} we deduce: \begin{equation} \label{genineq} \operatorname{Area}_{\mu ^{\mathcal{I}}} (u) \leq \lambda _{\mathcal{I}} \cdot E_{\mathcal{I}} (u) \end{equation} Moreover, equality holds if and only if the approximate metric derivative $ \ap\md u_z$ is $\mathcal{I}$-minimal for almost all $z\in \Omega$. In case of equality, \lref{quasicon} implies that the map $u$ is $Q_{\mathcal{I}}$-quasiconformal. \subsection{Main Lemma} Conformal invariance of $\mathcal{I}$ together with the usual transformation rule (\cite{LW}, Lemma 4.9) has the following direct consequence: For any conformal diffeomorphism $\phi:\Omega '\to \Omega$ which is biLipschitz and for any map $u\in W^{1,2} (\Omega ,X)$, the composition $u\circ \phi$ is contained in $ W^{1,2} (\Omega ', X)$, and it has the same $\mathcal{I}$-energy as $u$. The general transformation formula shows that for any definition of area $\mu$, any biLipschitz homeomorphism $\phi:\Omega '\to \Omega$, and any $u\in W^{1,2} (\Omega ,X)$ the map $u\circ \phi \in W^{1,2} (\Omega ', X)$ has the same $\mu$-area as $u$. Now we can state the main technical lemma, which appears implicitly in \cite{LW}: \begin{lem} \label{mainlem} Let $\mathcal{I}, \mu ^{\mathcal{I}} , \lambda _{\mathcal{I}}$ be as above. Let $X$ be a metric space and let $u\in W^{1,2} (D,X)$ be arbitrary. Then the following conditions are equivalent: \begin{enumerate} \item $\operatorname{Area} _{\mu ^{\mathcal{I}} }(u) =\lambda _{\mathcal{I}} \cdot E_{\mathcal{I}}(u)$. \item For almost every $z\in D$ the approximate metric derivative $\ap\md u_z$ is an $\mathcal{I}$-minimal seminorm. \item For every biLipschitz homeomorphism $\psi : D\to D$ we have $E_{\mathcal{I}}(u\circ \psi ) \geq E_{\mathcal{I}}(u)$. \end{enumerate} \end{lem} \begin{proof} We have already proven the equivalence of (1) and (2). If (1) holds, then (3) follows directly from the general inequality \eqref{genineq} and invariance of the $\operatorname{Area} _{\mu}$ under diffeomorphisms. It remains to prove the main part, namely that (3) implies (2). Thus assume (3) holds. The conformal invariance of $\mathcal{I}$ and the Riemann mapping theorem imply that for any other domain $\Omega \subset \mathbb{R}^2$ with smooth boundary and any biLipschitz homeomorphism $\psi: \Omega \to D$ the inequality $E_{\mathcal{I}}(u\circ \psi )\geq E_{\mathcal{I}}(u)$ holds true. Indeed, we only need to compose $\psi$ with a conformal diffeomorphism $F:D \to \Omega $, which is biLipschitz since the boundary of $\Omega$ is smooth. Assume now that (2) does not hold. Then it is possible to construct a biLipschitz map $\psi$ from a domain $\Omega $ to $D$ such that $E_{\mathcal{I}}(u\circ \psi) <E_{\mathcal{I}}(u)$ in the same way as in the proof of Theorem 6.2 in \cite{LW}, to which we refer for some technical details. Here we just explain the major steps. First, we find a compact set $K \subset D$ of positive measure such that at no point $z\in K$ the approximate metric derivative $\ap\md u_z$ is $\mathcal{I}$-minimal. Making $K$ smaller we may assume that the map $z\mapsto \ap\md u_z$ is continuous on $K$. By continuity, we find a Lebesgue point $z$ of $K$, a map $T\in {\rm SL}_2 $ and some $\epsilon >0$ such that $\mathcal{I}(s\circ T) \leq \mathcal{I}(s)-\epsilon$ for any seminorm $s$ which arises as the approximate metric derivative $\ap\md u_y$ at some point $y\in K\cap B_{\epsilon} (z)$. We may assume without loss of generality that $z$ is the origin $0$ and that $T$ is a diagonal matrix with two different eigenvalues $\lambda _1 >\lambda _2 =\frac 1 {\lambda _1} >0$. Then (here comes the trick!) we define a family of biLipschitz homeomorphisms $\psi _r :\mathbb{R}^2\to \mathbb{R}^2$ as follows. The map $\psi _r$ coincides with $T$ on the closed $r$-ball around $0$. On the complement of this $r$-ball, the map $\psi _r$ is the restriction of the holomorphic (hence conformal) map $f_r:\mathbb{C}^{\ast} \to \mathbb{C}$, defined by $f_r(z)= c \cdot z + r^2 \cdot d \cdot z ^{-1}$, where the constants $c,d \in \mathbb{C}$ are given by $c=\frac 1 2 (\lambda _1+ \lambda _2)$ and $d =\frac 1 2 (\lambda _1 - \lambda _2)$. Then the map $f_r$ coincides with $T$ on the $r$-circle around $0$. This map $\psi _r$ is biLipschitz on $\mathbb{R}^2$ (and smooth outside of the $r$-circle around $0$). Moreover, the map $\psi _r$ preserves the $\mathcal{I}$-energy of the map $u$ on the complement of the $r$-ball, due to the conformality of $f_r$ and the conformal invariance of $\mathcal{I}$. Finally, by construction of $T$, the map $\psi _r$ decreases the $\mathcal{I}$-energy of $u$ by some positive amount (at least $\frac 1 2 \epsilon \pi r^2$), if $r$ is small enough. Thus $E_{\mathcal{I}}(u\circ \psi _r) <E_{\mathcal{I}}(u)$ for $r$ small enough. This provides a contradiction and finishes the proof of the lemma. \end{proof} \subsection{Formulation of the main theorems} The proof of the following theorem is postponed to the next section. \begin{thm} \label{quasiarea} Let $\mathcal{I}$ be a quasi-convex definition of energy. Then the corresponding definition of area $\mu ^{\mathcal{I}}$ is quasi-convex as well. \end{thm} \tref{quasiarea} generalizes the first statement of \tref{thm1}. Together with \cref{corivanov} it shows that $\mu ^i$ is quasi-convex, cf. Remark \ref{remark}. Before turning to the main theorem stating the connection of energy and area minimizers, we recall an important step in the solution of the Plateau problem (\cite{LW}, Propostion 7.5, \cite{KS93}): Let $\Gamma$ be a Jordan curve in a proper metric space $X$. Assume that the sequence of maps $w_i \in \Lambda (\Gamma , X)$ has uniformly bounded Reshetnyak energy $E^2 _+(w_i)$. Then there exist conformal diffeomorphisms $\phi_i:D\to D$ such that the sequence $w_i' =w_i\circ \phi \in \Lambda (\Gamma ,X)$ converges in $L^2$ to a map $\bar w\in \Lambda (\Gamma, X)$. Note that for any quasi-convex definition of area $\mu$ or energy $\mathcal{I}$, we have in this case (\cite{LW}, Theorem 5.4): \begin{equation} \label{limit} \operatorname{Area} _{\mu} (\bar w) \leq \liminf \operatorname{Area} _{\mu} (w_i)\text{ and } E_{\mathcal{I}} (\bar w) \leq \liminf E_{\mathcal{I}} (w_i). \end{equation} The proof of the following theorem will rely on \tref{quasiarea}. \begin{thm} \label{thmmain} Let $\mathcal{I}$ be a quasi-convex definition of energy. Let $\Gamma$ be a Jordan curve in a proper metric space $X$. Any map $u\in \Lambda (\Gamma, X)$ with minimal $\mathcal{I}$-energy in $\Lambda (\Gamma ,X)$ has minimal $\mu^{\mathcal{I}}$-area in $\Lambda (\Gamma ,X)$. Moreover, $u$ is $Q_{\mathcal{I}}$-quasiconformal. \end{thm} \begin{proof} Let $u\in \Lambda (\Gamma, X)$ with minimal $\mathcal{I}$-energy among all maps $v\in \Lambda (\Gamma ,X)$ be given. Then $E_{\mathcal{I}}(u) \leq E_{\mathcal{I}}(u\circ \phi)$ for any biLipschitz homeomorphism $\phi:D\to D$. Due to \lref{mainlem}, $\operatorname{Area} _{\mu ^{\mathcal{I}} }(u) =\lambda _{\mathcal{I}} \cdot E_{\mathcal{I}}(u)$. Moreover, by \lref{quasicon}, almost all approximate derivatives of $u$ are $Q_{\mathcal{I}}$-quasiconformal. This proves the last statement. Assume that $u$ does not minimize the $\mu^{\mathcal{I}}$-area and take another element $v\in \Lambda (\Gamma ,X)$ with $\operatorname{Area} _{\mu ^{\mathcal{I}}} (v) <\operatorname{Area} _{\mu ^{\mathcal{I}}}(u)$. Consider the set $\Lambda _0$ of elements $w\in \Lambda (\Gamma ,X)$ with $\operatorname{Area} _{\mu ^\mathcal{I}} (w) \leq \operatorname{Area} _{\mu ^\mathcal{I}} (v)$. We take a sequence $w_n \in \Lambda _0$ such that $E_{\mathcal{I}} (w_n)$ converges to the infimum of the $\mathcal{I}$-energy on $\Lambda _0$. Due to \eqref{2ener}, the Reshetnyak energy of all maps $w_n$ is bounded from above by a uniform constant. Using the observation preceding \tref{thmmain}, we find some $\bar w \in \Lambda (\Gamma ,X)$ which satisfies \eqref{limit}. Here we have used the quasi-convexity of $\mu^{\mathcal{I}}$, given by \tref{quasiarea}. Thus, $\bar w$ is contained in $\Lambda _0$ and minimizes the $\mathcal I$-energy in $\Lambda _0$. In particular, $E_{\mathcal{I}}(\bar w \circ \phi) \geq E_{\mathcal{I}}(\bar w)$, for any biLipschitz homeomorphism $\phi : D\to D$. Applying \lref{mainlem} to the map $\bar w$ we deduce $$\lambda _{\mathcal{I}} \cdot E_{\mathcal{I}}(u)= \operatorname{Area} _{\mu^{\mathcal{I}}} (u) >\operatorname{Area} _{\mu ^{\mathcal{I}}}(v)\geq \operatorname{Area} _{\mu ^{\mathcal{I}}}(\bar w) =\lambda _{\mathcal{I}} \cdot E_{\mathcal{I}}(\bar w).$$ This contradicts the minimality of $E_{\mathcal{I}}(u)$. \end{proof} \subsection{Regularity of energy minimizers} The regularity of energy minimizers is now a direct consequence of \cite{LW}. Recall that a Jordan curve $\Gamma\subset X$ is a \emph{chord-arc curve} if the restriction of the metric to $\Gamma$ is biLipschitz equivalent to the induced intrinsic metric. A map $u:D\to X$ is said to satisfy \emph{Lusin's property $(N)$} if for any subset $S$ of $D$ with area $0$ the image $u(S)$ has zero two-dimensional Hausdorff measure. \begin{thm} \label{thmgenreg} Let $X$ be a proper metric space which satisfies a uniformly local quadratic isoperimetric inequality. Let $\mathcal{I}$ be a quasi-convex definition of energy and let $\Gamma $ be a Jordan curve in $X$ such that the set $\Lambda (\Gamma ,X)$ is not empty. Then there exists a minimizer $u$ of the $\mathcal{I}$-energy in $\Lambda (\Gamma ,X)$. Any such minimizer has a unique locally Hoelder continuous representative which extends to a continuous map on $\bar D$. Moreover, $u$ is contained in the Sobolev space $W^{1,p} _{loc} (D,X)$ for some $p>2$ and satisfies Lusin's property $(N)$. If the curve $\Gamma$ is a chord-arc curve then $u$ is Hoelder continuous on $\bar D$. \end{thm} \begin{proof} The existence of a minimizer $u$ of the $\mathcal{I}$-energy in $\Lambda (\Gamma ,X)$ is a consequence of \cite{LW}, Theorem~5.4 and Proposition~7.5, see also Theorem~7.6. Any map $u$ minimizing the $\mathcal{I}$-energy in $\Lambda (\Gamma, X)$ is quasiconformal and minimizes the $\mu ^{\mathcal{I}}$-area in $\Lambda (\Gamma, X)$, by \tref{thmmain}. The result now follows from \cite{LW}, Theorems 8.1, 9.2, and 9.3. \end{proof} \subsection{Optimal regularity} We are going to provide the proof of \tref{optregul}. Thus, let $u\in W^{1,2}(D, X)$ be as in \tref{optregul}. Then for any biLipschitz homeomorphism $\psi: D\to D$ we have $\operatorname{Area}_\mu(u\circ \psi)= \operatorname{Area}_\mu(u)$ and therefore $E_+^2(u\circ\psi) \geq E_+^2(u)$. Applying \lref{mainlem} and \lref{resiso} we see that $u$ is infinitesimally isotropic in the following sense. \begin{defn} A map $u\in W^{1,2}(D, X)$ is infinitesimally isotropic if for almost every $z\in D$ the approximate metric derivative of $u$ at $z$ is an isotropic seminorm. \end{defn} \tref{optregul} is thus an immediate consequence of the following theorem. \begin{thm} Let $\Gamma$ be a Jordan curve in a metric space $X$. Assume that $X$ satisfies the $(C,l_0,\mu )$-quadratic isoperimetric inequality and let $u\in\Lambda (\Gamma ,X)$ be an infinitesimally isotropic map having minimal $\mu$-area in $\Lambda (\Gamma ,X)$. Then $u$ has a locally $\alpha$-Hoelder continuous representative with $\alpha = q(\mu)\cdot\frac{1}{4 \pi C}$. \end{thm} \begin{proof} Due to \cite{LW}, $u$ has a unique continuous representative. For any subdomain $\Omega $ of $D$ we have \begin{equation} \label{verylast} E_+^2 (u|_{\Omega }) =\operatorname{Area} _{\mu ^i} (u|_{\Omega} ) \end{equation} by \lref{mainlem}. Looking into the proof of the Hoelder continuity of $u$ in \cite{LW}, Proposition 8.7, we see that the quasiconformality factor $Q$ of $u$ (which, as we know, is bounded by $\sqrt 2$) comes into the game only once. Namely, this happens in the estimate (40) in Lemma 8.8, where the inequality $E_+^2 (u|_{\Omega} ) \leq Q^2\cdot \operatorname{Area} _{\mu} (u|_{\Omega }) $ appears for open balls $\Omega\subset D$. Using \eqref{verylast} together with \eqref{areafunct} we can replace this estimate (40) by $$E_+^2 (u|_{\Omega})=\operatorname{Area} _{\mu ^i} (u|_{\Omega} ) \leq q(\mu)^{-1} \cdot \operatorname{Area} _{\mu} (u|_{\Omega}). $$ Hence we can replace the factor $Q^2$ in the proof of \cite{LW}, Proposition 8.7 by the factor $q(\mu )^{-1}$. Leaving the rest of that proof unchanged, we get $\alpha = q(\mu)\cdot \frac {1} {4\pi C}$ as a bound for the Hoelder exponent of $u$. \end{proof} \section{Quasi-convexity of $\mu ^{\mathcal{I}}$} \label{sec5} This section is devoted to the \begin{proof}[Proof of \tref{quasiarea}] Assume on the contrary, that the definition of energy $\mathcal{I}$ is quasi-convex, but that $\mu ^{\mathcal{I}}$ is not quasi-convex. Consider a finite-dimensional normed vector space $Y$, a linear map $L: \mathbb{R}^2 \to Y$ and a sequence of smooth embeddings $\psi _m: \bar D\to Y$ as in Subsection \ref{subsecquas}, such that the following holds true. The maps $\psi _m$ coincide with $L$ on the boundary circle $S^1$, they are $K$-biLipschitz with a fixed constant $K$, and they converge uniformly to the restriction of $L$ to $\bar D$. Finally, for some $\epsilon >0$ and all $m>0$, we have $$\operatorname{Area}_{\mu^{\mathcal{I}}} (L|_D) \geq \operatorname{Area} _{\mu ^{\mathcal{I}}} (\psi _m) +\epsilon.$$ We will use this sequence to obtain a contradiction to the semi-continuity of $E_{\mathcal{I}}$. The idea is to modify $\psi _m$ by (almost) homeomorphisms, so that the new maps satisfy equality in the main area-energy inequality \eqref{genineq}. We explain this modification in a slightly more abstract context of general biLipschitz discs. The first observation is a direct consequence of the fact that the diameter of a simple closed curve in $\mathbb{R}^2$ equals the diameter of the corresponding Jordan domain. \begin{lem} \label{pseudo} Let $Z$ be a metric space which is $K$-biLipschitz to the disc $\bar D$ and let $u:\bar D\to Z$ be any homeomorphism. Then for any Jordan curve $\gamma \subset \bar D$ and the corresponding Jordan domain $J\subset \bar D$ we have $\mathrm{diam} (u(\gamma) ) \geq K^2 \cdot \mathrm{diam} (u(J))$. \end{lem} By continuity, the same inequality holds true for any uniform limit of homeomorphisms from $\bar D$ to $Z$, the class of maps we are going to consider now more closely. Let again the space $Z$ be $K$-biLipschitz to the unit disc, let us fix three distinct points $p_1,p_2,p_3$ on $S^1$ and three distinct points $x_1,x_2,x_3$ on the boundary circle $\Gamma$ of $Z$. Let $\Lambda _0 (Z)$ denote the set of all continuous maps $u:\bar D\to Z$, which send $p_i$ to $x_i$, which are uniform limits of homeomorphisms from $\bar D$ to $Z$, and whose restrictions to $D$ are contained in the Sobolev space $W^{1,2} (D,Z)$. As uniform limits of homeomorphisms, any map $u\in \Lambda _0 (Z)$ has the whole set $Z$ as its image. When applied to all circles $\gamma $ contained in $D$, the conclusion of \lref{pseudo} shows that any $u\in \Lambda _0(Z)$ is $K^2$-pseudomonotone in the sense of \cite{MM}. Fixing a biLipschitz homeomorphism $\psi:\bar D \to Z$, we see that $\psi ^{-1} \circ u:\bar D\to \bar D$ is pseudomonotone as well. Using \cite{MM}, we deduce that $\psi ^{-1} \circ u$ satisfies Lusin's property (N), for any $u\in \Lambda _0 (Z)$. Hence, any $u\in \Lambda _0 (Z)$ satisfies Lusin's property (N) as well. See also \cite{Kar07}, Theorem 2.4. \begin{lem} \label{samemes} For all elements $u\in\Lambda _0 (Z)$ the value $\operatorname{Area}_{\mu ^{\mathcal{I}}} (u)$ is independent of the choice of $u$. \end{lem} \begin{proof} Fix again the biLipschitz homeomorphism $\psi:\bar D\to Z$ and consider $v=\psi ^{-1} \circ u \in \Lambda _0 (\bar D)$. Since $v$ is a uniform limit of homeomorphisms, any fiber of $v$ is a cell-like set (\cite{HNV04}, p.97), in particular, any such fiber is connected. Applying the area formula to the continuous Sobolev map $v: D\to \bar D$ which satisfies Lusin's property (N) (cf. \cite{Kar07}), we see that for almost all $z\in D$ the preimage $v^{-1} (z)$ has only finitely many points. By the connectedness of the fibers, we see that almost every fiber $v^{-1} (z)$ has exactly one point. Now we see: $$\operatorname{Area}_{\mu ^{\mathcal{I}}} (u) = \int_D \mathbf{J}^{\mathcal{I}} (\ap\md u_z)dz =\int_D |\det(d_zv)| \mathbf{J}^{\mathcal{I}} (\operatorname{md}\psi_{v(z)})dz.$$ The area formula for the Sobolev map $v:D\to D$ (\cite{Kar07}) gives us: $$\operatorname{Area} _{\mu^{\mathcal{I}}} (u)= \mathbf{J}^{\mathcal{I}} (\operatorname{md}\psi_y)dy = \operatorname{Area} _{\mu^{\mathcal{I}}}(\psi).$$ \end{proof} The next lemma is essentially taken from \cite{Jost}: \begin{lem} \label{equicon} For any $C>0$, the set $\Lambda _0 ^C (Z)$ of all elements $u$ in $\Lambda_0 (Z)$ with $E^2 _+ (u) \leq C$ is equi-continuous. \end{lem} \begin{proof} The equi-continuity of the restrictions of $u$ to the boundary circle $S^1$ is part of the classical solution of the Plateau problem, see \cite{LW}, Propostion 7.4. By the Courant-Lebesgue lemma (\cite{LW}, Lemma 7.3), for any $\epsilon>0$ there is some $\delta =\delta (\epsilon, C)$ such that for any $x\in \bar D$ and any $u\in \Lambda ^{C} _0 (Z)$ there is some $\sqrt {\delta }> r>\delta$ such that $\partial B_r (x) \cap \bar D$ is mapped by $u$ to a curve of diameter $\leq \epsilon$. If $B_{\delta} (x)$ does not intersect the boundary circle $S^1$ then $u(B_{\delta} (x))$ has diameter $\leq K^2\cdot \epsilon$ by \lref{pseudo}. On the other hand, if $B_{\delta} (x)$ intersects $S^1$, then we see that the image of the intersection of $B_{\delta} (x)$ with $S^1$ has diameter bounded as well by some $\epsilon ' >0$ depending only on $\delta $ and going to $0$ with $\delta $, due to the equi-continuity of the restrictions $u|_{S^1}$. We may assume $\epsilon=\epsilon '$. Then the Jordan curve consisting of the corresponding parts of $\partial B_{\delta} (x)$ and boundary $S^1$ has as its image a curve of diameter at most $2 \epsilon$. Thus using the biLipschitz property of $Z$ as in \lref{pseudo}, we see that the ball $B_{\delta} (x)$ is mapped onto a set of diameter $\leq 2K^2 \cdot \epsilon$. \end{proof} The proof above shows that the modulus of continuity of any $u\in \Lambda _0 ^C (Z)$ depends only on the constants $C,K$, the boundary circle $\Gamma \subset Z$ and the choice of the fixed points $x_i \in \Gamma$. \begin{cor} \label{corfin} There is a map $u\in \Lambda _0 (Z)$ with minimal $\mathcal{I}$-energy in $\Lambda _0 (Z)$. This element $u$ satisfies $\operatorname{Area} _{\mu ^{\mathcal{I}}} (u) =\lambda _{\mathcal{I}} \cdot E_{\mathcal{I}}(u)$. \end{cor} \begin{proof} Take a sequence $u_n \in \Lambda _0 (Z)$ whose $\mathcal{I}$-energies converge to the infimum of $\mathcal{I}$ on $ \Lambda _0 (Z)$. By \eqref{2ener}, $E^2_+$ is bounded by a multiple of $\mathcal{I}$. Therefore, we can apply \lref{equicon} and deduce that the sequence $u_n$ is equi-continuous. By Arzela-Ascoli, we find a map $u:\bar D\to Z$ as a uniform limit of a subsequence of the $u_n$. This map $u$ is a uniform limit of uniform limits of homeomorphisms, hence $u$ itself is a uniform limit of homeomorphisms. Moreover, $u (p_i)=x_i$ for $i=1,2,3$. Finally, the map is contained in $W^{1,2} (D,X)$ as an $L^2$-limit of Sobolev maps with uniformly bounded energy, hence $u\in \Lambda _0 (Z)$. Since $\mathcal{I}$ is quasi-convex, we have $E_{\mathcal{I}} (u) \leq \lim _{n\to \infty} E_{\mathcal{I}}(u_n)$, see \cite{LW}, Theorem 5.4. Therefore, $u$ has minimal $\mathcal I$-energy in $\Lambda _0 (Z)$. If $\phi:\bar D\to \bar D$ were a biLipschitz homeomorphism with $E_{\mathcal{I}}(u\circ \phi )< E_{\mathcal{I}}(u)$ we would consider a M\"obius map $\phi _0:\bar D\to \bar D$, such that $\phi \circ \phi _0$ fixes the points $p_i$. Then the map $u':= u\circ \phi \circ \phi _0$ is in $\Lambda _0 (Z)$ and has the same $\mathcal{I}$-energy as $u\circ \phi$, due to the conformal invariance of $\mathcal{I}$. This would contradict the minimality of $E_{\mathcal{I}}(u)$ in $\Lambda _0 (Z)$. Hence such a homeomorphism $\phi$ cannot exist and we may apply \lref{mainlem}, to obtain the equality $\operatorname{Area} _{\mu^{\mathcal{I}}} (u) = \lambda _{\mathcal{I}} \cdot E_{\mathcal{I}}(u)$. \end{proof} Now it is easy to use $\psi _n$ to obtain a contradiction to the quasi-convexity of $\mathcal{I}$. Denote by $Z_n$ the image $\psi _n (\bar D)$ and by $Z$ the ellipse $L(\bar D)$. By construction, all $Z_n$ and $Z$ are $K$-biLipschitz to $\bar D$ and share the same boundary circle. We denote it by $\Gamma$ and fix the same triple $x_1,x_2,x_3$ in $\Gamma$ for all $Z_n$ and $Z$. Consider a map $v_n \in \Lambda _0 (Z_n)$ with minimal $\mathcal{I}$-energy in $\Lambda_0 (Z_n)$. By \cref{corfin}, such $v_n$ exists and satisfies $\operatorname{Area}_{\mu ^{\mathcal{I}}}(v_n) =\lambda _{\mathcal{I}} \cdot E_{\mathcal{I}}(v_n)$. Moreover, by \lref{equicon} and the subsequent observation, the maps $v_n$ are equi-continuous. Finally, by \lref{samemes}, we have $\operatorname{Area} _{\mu ^{\mathcal{I}}} (v_n)= \operatorname{Area} _{\mu ^{\mathcal{I}}} (\psi _n)$. The images of the maps $v_n:\bar D\to Z_n\to Y$ are contained in a compact set. Hence, by Arzela-Ascoli after choosing a subsequence, the maps $v_n$ uniformly converge to a map $v:\bar D\to Z$. Moreover, identifying $Z_n$ with $Z$ by some uniformly biLipschitz homeomorphisms point-wise converging to the identity of $Z$, we see that the limiting map $v$ can be represented as a uniform limit of homeomorphisms from $\bar D$ to $Z$. Since the $v_n$ have uniformly bounded energies, the limit map $v$ lies in the Sobolev class $W^{1,2} (D,Z)$. Thus, by construction, $v \in \Lambda _0 (Z)$. Finally, by the semi-continuity of $\mathcal{I}$, we must have $E_{\mathcal{I}}(v) \leq \liminf _{n\to \infty} E_{\mathcal{I}}(v_n)$. Taking all inequalities together we get for large $n$: \begin{equation*} \begin{split} \operatorname{Area} _{\mu ^{\mathcal{I}}} (v)&=\operatorname{Area}_ {\mu ^{\mathcal{I}}} (L|_D )\geq \operatorname{Area} _{\mu ^{\mathcal{I}}}(\psi _n) +\epsilon = \lambda _{\mathcal{I}} \cdot E_{\mathcal{I}}(v_n) +\epsilon\\ & \geq \lambda _{\mathcal{I}} \cdot E_{\mathcal{I}}(v) + \frac 1 2 \epsilon. \end{split} \end{equation*} But this contradicts the main inequality \eqref{genineq} and finishes the proof of \tref{quasiarea}. \end{proof} \providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR } \providecommand{\MRhref}[2]{% \href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \providecommand{\href}[2]{#2}
1,108,101,565,838
arxiv
\section{Introduction}\label{section:introduction} This paper studies two new facility location problems relevant to questions of Internet traffic monitoring and content distribution. These problems differ from their more standard predecessors in that each customer must be served by two facilities rather than one. In addition, the service routes must be shortest paths and vertex-disjoint. More specifically, suppose we are given a network modeled as an arc-weighted, strongly connected directed graph $G=(V,A)$, together with sets $C \subseteq F \subseteq V$, where $C$ is the set of customer locations and $F$ is the set of potential facility locations. For each pair $(c,f)$, $c\in C$ and $f \in F$, let $P(c,f)$ be the set of all shortest paths from $c$ to $f$ in $G$, which is nonempty by our strong connectivity assumption (and would be so for typical computer networks). \begin{Def} Suppose $c \in C$ is a customer location and $\{f_1,f_2\}$ is a pair of potential facility locations in $F - \{c\}$. Then $\{f_1,f_2\}$ {\em covers $c$ in a pathwise-disjoint fashion} if there exist paths $p_1 \in P(c,f_1)$ and $p_2 \in P(c,f_2)$ that have no common vertex except $c$. Such a pair {\em covers $c$ in a setwise-disjoint fashion} if {\em no} path in $P(c,f_1)$ shares a vertex (other than $c$) with {\em any} path in $P(c,f_2)$. \end{Def} \begin{Def} A subset $F' \subseteq F$ is called a {\em pathwise-disjoint} (respectively, {\em setwise-disjoint}) {\em cover} for $C$ if, for every $c\in C-F'$, there is a pair $\{f_1,f_2\} \subseteq F'-\{c\}$ such that $\{f_1,f_2\}$ covers $c$ in a pathwise-disjoint (respectively, setwise-disjoint) fashion. (Note that if $c \in F'$, we are assuming that $c$ covers itself, and hence the set $C$ is a valid cover for itself.) \end{Def} The two problems we study are defined as follows: \begin{Def} In {\sc Pathwise-Disjoint Facility Location} {\sc (PDFL)}, we are given $G$, $C$, $F$, and asked to find a pathwise-disjoint cover of minimum size for $C$. {\sc Setwise-Disjoint Facility Location} {\sc (SDFL)} is the same problem except that the cover must be setwise-disjoint. \end{Def} The {\sc Pathwise-} and {\sc Setwise-Disjoint Facility Location} problems arise in a variety of networking contexts. Our primary motivation for studying them comes from a scheme proposed in \citet{GBDS-loss08} for active monitoring of end-to-end network performance, which we shall describe in Section \ref{section:applications}. However, both variants have a simple alternative motivation in terms of an idealized content distribution problem, which we shall use to help motivate the definitions. Suppose we wish to distribute data, such as video-on-demand, over a network that connects our service hubs but does not provide a rapid method for repairing link or vertex failures. Suppose further that the service interruptions caused by such failures would be costly to us, and that we want our distribution process to be relatively robust against them. A common standard of robustness is immunity to any single vertex or link failure (as for instance might result from an accidental cable cut). To guarantee such resilience, we would need to place multiple copies of our data source in the network, but because of the costs of hosting such copies, we would like to minimize the number of such hosting sites that we deploy, rather than placing a copy of the data at each service hub. {\sc Pathwise-Disjoint Facility Location} models this application as follows. The network $G = (V,A)$ is the underlying fiber network linking various service hubs, with the directions of the arcs all reversed, so that $P(c,f)$ is the set of all shortest paths from $f$ to $c$ in the original network, rather than those from $c$ to $f$. The set $C$ of customer locations is the set of service hubs that need access to the data. The set $F$ of facility locations is the set of potential sites where the data can be hosted, which we assume includes the service hubs and possibly other network vertices. If we assume the standard Internet default that shortest paths should be used for routing, the sets $P(c,f)$ now correspond to the paths in the fiber network over which we can route content from facility $f$ to customer $c \neq f$. If we further assume that link capacity is not an issue, then the pathwise-disjoint cover of minimum size for $C$ represents the minimum-cost choice of hosting locations for our data, subject to the constraint that no single vertex or link failure can disconnect a (nonfailed) service hub from all the data sources. {\sc Setwise-Disjoint Facility Location} models the variant of this application in which we do not have control over the routing, but instead must rely on the network to do our routing for us. Many Internet Service Providers (ISP's) route packets within their networks using a shortest-path protocol such as OSPF or IS-IS. In such protocols, packets must be routed along shortest paths, where the weight (length) of an arc is set by the network managers so as to balance traffic and optimize other performance metrics. If there is more than one shortest path leaving a given router for a given destination, then the traffic is split evenly between the alternatives. This can be of further help in balancing traffic, and so traffic engineers may specifically set weights that yield multiple shortest paths between key routers. The actual splitting is performed based on computing hash functions of entries in a packet's header (such as the source and destination IP addresses). These functions are themselves randomly chosen, are subject to change at short notice, and are typically not available to us. Thus when there are multiple shortest paths, although contemporaneous packets from a given router to the same destination are likely to follow the same path, the actual route chosen may not be readily predictable. All we know is that it must be a member of the set $P(c,f)$ of all shortest paths from $c$ to $f$. This means that the only way to guarantee vertex-disjoint paths to a customer $c$ from two facility locations $f$ and $f'$ is to restrict attention to pairs $(f,f')$ such that the corresponding shortest path sets intersect only in $c$, and consequently our problem becomes a {\sc Setwise-Disjoint Facility Location} problem. \medskip In this paper, we analyze the complexity of the PDFL and SDFL problems and propose and test algorithms for them. A first observation is that both problems can be viewed as special cases of {\sc Set Cover By Pairs} (SCP), first described in \citet{HS05}. \medskip \noindent {\sc Set Cover By Pairs} (SCP): Given a ground set $U$ of elements, a set $S$ of {\em cover objects}, and a set $T$ of triples $(u,s,t)$, where $u \in U$ and $s,t \in S$, find a minimum-cardinality covering subset $S' \subseteq S$ for $U$, where $S'$ {\em covers} $U$ if for each $u \in U$, there are $s,t \in S'$ such that $(u,s,t) \in T$. \medskip PDFL and SDFL can be formulated as SCP by taking $U = C$, $S = F$, and $$ T = \{(c,c,c): c \in C \} \ \cup\ \mbox{\huge \{} \begin{array}{ll} (c,f_1,f_2): & c \notin \{f_1,f_2\} \mbox{ and } \{f_1,f_2\} \mbox{ covers } c \mbox{ in a }\\ & \mbox{ pathwise-disjoint (setwise-disjoint) fashion} \end{array} \mbox{\huge \}.} $$ We prove, subject to a complexity assumption, that no polynomial-time algorithm can approximate SCP to within a factor which is $2^{\log^{1-\epsilon} n}$ for any $\epsilon>0$. The best previous hardness bound for SCP was just {\sc Set Cover}-hardness \citep{HS05}, which implies that no $o(\log n)$ approximation algorithm can exist unless P = NP \citep{RS97}. We then show that SDFL is just as hard to approximate as SCP, and that PDFL is at least as hard to approximate as {\sc Set Cover}. These complexity results (assuming their widely-believed hypotheses) rule out both polynomial-time heuristics that are guaranteed to find good covers and the existence of good lower bounds on the optimal cover size that can be computed in polynomial time. Nevertheless, there still may exist algorithms and lower bounds that are useful ``in practice.'' In this paper we describe and experimentally evaluate our candidates for both. We test four main heuristics. Each uses as a subroutine a standard randomized greedy heuristic (\textsc{Greedy}) that actually solves the general SCP problem. The first of our main algorithms, {\textsc{Greedy(400)}}, is the variant of {\textsc{Greedy}} that performs 400 randomized runs and returns the best solution found. The second is a genetic algorithm ({\textsc{Genetic}}) that uses {\textsc{Greedy}} as a subroutine. The third and fourth, {\em Single Hitting Set} (\textsc{SHS}) and {\em Double Hitting Set} (DHS), apply only in the Setwise-Disjoint case, exploiting the graph structure in ways that are unavailable to us in the path-disjoint case. The quality of the solutions that the heuristics produce can, for small instances, be evaluated by applying a commercial optimization package (CPLEX\texttrademark\, Version 11 in our case) to an integer programming formulation of the derived instances of {\sc SCP}. This is usually feasible when $|F|\le 150$, although running times grow dramatically with graph size. For the set-disjoint case we introduce a new lower bound that exploits the graphical nature of our problem to create an instance of the {\sc Hitting Set} problem (a dual to {\sc Set Cover}) whose optimal solution value can be no greater than the optimal solution value for our problem. Although this lower bound can underestimate the optimal by a linear factor in the worst case, and its computation requires solving an NP-hard problem, it turns out to be easily computed using CPLEX and to give surprisingly good bounds on all our test instances. In fact, it yielded the optimal solution value for all of them, since for each instance at least one of our heuristics produced a solution whose value matched the bound. Moreover, when restricted to our test instances with optimized OSPF weights, the Hitting Set solution itself was a feasible (and hence optimal) solution to our original problem. For the Path-Disjoint case, our only lower bound is the comparatively weak one of considering the variant of the problem where we drop the requirement that the paths be shortest paths. The optimum for the resulting problem can be computed in linear time, but is substantially below the optimal PDFL solution, where the latter can be computed. However, for these instances, it at least shows us the penalty imposed by our restriction to shortest paths. For larger instances, we can use our {\textsc{Genetic}} algorithm to provide some idea of how much we might be able to improve on {\textsc{Greedy(400)}} if time were not an issue. A final algorithmic challenge was that of constructing the derived {\sc SCP} instances needed by all our heuristics. This involves exploiting shortest path graphs to determine the (often quite large) sets of relevant triples. Significant algorithmic ingenuity is needed to prevent this computation from being a major bottleneck, and we will describe the mathematical observations and algorithmic techniques that make this possible. \subsection{Outline} The remainder of the paper is organized as follows: In Section \ref{section:applications} we describe the network monitoring application that motivated our study. In Section \ref{section:complexity} we present our complexity results. In Section \ref{section:lbs} we present the lower bounds we use for evaluating our cover-creating heuristics, and our algorithms for computing them. This includes the integer programming formulations for computing the true optima for our problems from their Cover-by-Pairs formulations. In Section \ref{section:heuristics} we describe the heuristics we have devised, as well as the (nontrivial) algorithms we use to convert our problems to their Cover-by-Pairs formulations. Our test instances are described in Section \ref{section:instances}, and our experiments and their results are summarized in Section \ref{section:experiments}. We conclude in Section \ref{section:further} with a discussion of further research directions, including preliminary results for significant variants on our problems, such as the cases where not all members of $C$ need be in $F$, and where facility locations have costs and we wish to find a minimum-cost cover. \subsection{Related Work} This paper is, in part, the journal version of a conference paper \citep{BDD11}. However, it contains substantial amounts of new material, including the new SDFL lower bound and related algorithms, a faster version of the Genetic algorithm, details of the omitted proofs, key implementation details of our algorithms, and more detailed experimental results and analysis, including an expansion of the experimental testbed to contain significantly larger synthetic and real-world instances. The only previous work on {\sc Set Cover by Pairs}, as far as we know, is that of \citet{HS05}, which is theoretical rather than experimental. That paper considers two applications that were significantly different from those introduced here, and, from a worst-case point of view, much easier to approximate. The paper also introduces a variant of the Greedy algorithm studied here for the general {\sc SCP} problem and analyzes its (poor) worst-case behavior. \section{Our Motivating Application: Host Placement for End-to-End Monitoring}\label{section:applications} In this section, we describe the monitoring application of \citet{GBDS-loss08} that motivated this paper, an application that is more realistic than the content distribution application mentioned in the previous section, but also more complicated. We describe it in some detail here to better motivate our study, and also to present a key new lemma that is actually needed to guarantee that the proposed monitoring scheme provides valid results. Suppose we are an Internet Service Provider (ISP) and provide ``virtual private network'' (VPN) service to some of our customers. In such a service, we agree to send traffic between various locations specified by the customer, promising to provide a certain level of service on the connections, but not specifying the actual route the packets will take. (The actual routing will be done so as to optimize the utilization of our network, subject to the promised levels of service.) Our network is a digraph $G = (V,A)$, in which the vertices correspond to routers and the arcs to the links between routers. A key service quality metric is packet loss rate (the fraction of packets on a path that fail to reach their destination). Let $p(r_1,r_2)$ denote the probability that a packet sent from router $r_1$ to router $r_2$ will successfully arrive. Our goal is to obtain estimates for $p(r_i,r_j)$ for a collection of customer paths $P_{r_i,r_j}$. Note that, in contrast to our content distribution application, we here do not worry about links' failing (which would cause re-routing), but merely about their underperforming. One way to measure the loss rate on the path in our network from router $r_1$ to router $r_2$ is to attach extra equipment to the routers, use the equipment at $r_1$ to send special measurement packets to $r_2$, and use the equipment at $r_2$ to count how many of the packets arrive. If $N$ packets are sent and $N'$ arrive, then $N'/N$ should be a good estimate for $p(r_1,r_2)$, assuming $N$ is sufficiently large. Unfortunately, the process of authorizing, installing, and maintaining the extra equipment can be time-consuming and expensive. Thus, this scheme may not be practical in a large network with hundreds or thousands of distinct path endpoints. For this reason, \citet{GBDS-loss08} proposed an alternative scheme that may yield a substantial reduction in the total amount of monitoring equipment needed. \begin{figure}[h] \vspace{-.20in} \begin{center} \centerline{\includegraphics[width=3.5in]{Scheme1.pdf}} \vspace{-.25in} \caption{The centralized monitoring scheme of \citet{GBDS-loss08}.\label{fig:scheme1}} \end{center} \vspace{-.25in} \end{figure} In this new scheme, all the monitoring is initiated from a single special measurement vertex $M$, as originally proposed in \citet{burch:sigm05} and \citet{breslau:inm06}. See Figure \ref{fig:scheme1}. To measure loss on the path from vertex $r_1$ to vertex $r_2$, the equipment at $M$ sends a packet on a circular path that first goes from $M$ to $r_1$ (the {\em hop-on} path), then traverses the path from $r_1$ to $r_2$, and finally returns from $r_2$ to $M$ (the {\em hop-off} path). Let us make the following assumptions: \begin{enumerate} \item Packets are only dropped by arcs, not by vertices. (This is a close approximation to reality in modern-day networks, where an arc models the system consisting of the physical wire/fiber connecting its endpoints, together with the line card at each of its ends.) \item The three paths $P_{M,r_1}$, $P_{r_1,r_2}$, and $P_{r_2,M}$ are pairwise arc-disjoint. (As we shall show below, this will typically be true under shortest-path routing.) \item Loss rates on different arcs are independent of each other. (This is somewhat less realistic, but is approximately true except in heavily-loaded networks.) \end{enumerate} Then if $N$ packets are sent on the circular path $P_{M,r_1,r_2,M}$, the expected number $N'$ of packets successfully making the roundtrip will be $N' = Np(M,r_1)p(r_1,r_2)p(r_2,M)$. Thus if we measure $N'$ and have good estimates for $p(M,r_1)$ and $p(r_2,M)$, we will have the estimate $$ p(r_1,r_2) = \frac{N'/N}{p(M,r_1)p(r_2,M)}\:. $$ Thus we have reduced the problem of measuring the loss rates for a collection of paths between arbitrary vertices to that of measuring the loss rates on a collection of round-trip paths and estimating the loss rates for a collection of hop-on and hop-off paths, all of which either begin or end at $M$. \citet{breslau:inm06} proposed that these loss rates for a given path endpoint $r$ be estimated by sending packets along an $(M,r,M)$ circuit and, if, here, $N$ packets were sent and $N'$ received, concluding that $p(M,r) = p(r,M) = \sqrt{N'/N}$. Unfortunately, this assumes that Internet performance is symmetric, which it definitely is not. A quite accurate way to measure the loss rates would of course be to put equipment at both ends of each of the hop-on and hop-off paths, but this method would require installing equipment at just as many routers as in the original scheme for measuring the $P_{r_1,r_2}$ paths directly -- indeed at one more vertex, since now we need equipment at $M$. {\sc Setwise-} and {\sc Pathwise-Disjoint Facility Location} arise in the context of a ``tomographic'' method proposed by \citet{GBDS-loss08} for estimating loss rates on hop-on and hop-off paths in a potentially much more efficient fashion. \begin{figure} \begin{center} \vspace{-.35in} \centerline{\includegraphics[width=3.5in]{Scheme2.pdf}} \vspace{-.1in} \caption{Scheme of \citet{GBDS-loss08} for measuring loss rate of hop-on and hop-off paths.\label{fig:scheme1a}} \end{center} \vspace{-.25in} \end{figure} In terms of the facility location problems, the set $C$ of ``customer'' vertices will consist of the endpoints of the original paths whose loss rates we wish to estimate. The set $F$ of ``facility locations'' will be these plus those additional vertices that are capable of hosting monitoring equipment. In this context, we will call $F$ the set of (potential) monitoring vertices. Assuming as before that we are in a shortest-path routing regime such as OSPF or IS-IS, the set $P(r,m)$ is the set of all legal routes from $r$ to $m$. We have already observed that, if we install equipment at $r$ itself, it is straightforward to estimate the loss rates $p(M,r)$ (for the hop-on path to $r$) and $p(r,M)$ (for the hop-off path from $r$) -- simply send packets between $M$ and $r$ and counting the number of them that successfully arrive at their destinations. Suppose $r$ is a path endpoint without equipment and $(m_1,m_2)$ is a pair of monitoring vertices that cover $r$ in a pathwise-disjoint fashion. We will now explain how, by installing monitoring equipment at $m_1$ and $m_2$, we can estimate both loss rates. See Figure \ref{fig:scheme1a}. Assuming we are allowed to specify the routing paths from $r$ to $m_1$ and $m_2$, the fact that $m_1$ and $m_2$ cover $r$ in a pathwise-disjoint fashion means that we can pick legal routing paths $P_1$ and $P_2$ from $r$ to $m_1$ and $m_2$, respectively, that are vertex-disjoint except for $r$ (and hence arc-disjoint). Moreover, as we shall see, we also have that the two paths $P_1$ and $P_2$ are arc-disjoint from the path $P$ from $M$ to $r$, which itself is arc-disjoint from the path $P'$ from $r$ to $M$ (under reasonable assumptions). This is a consequence of the following lemma, whose additional assumptions have to do with the arc weights used by OSPF and IS-IS in their shortest path computations. These weights are set by traffic engineers to help balance traffic loads and normally obey certain restrictions. First, they are positive integers. Second, in practice networks are typically {\em symmetric} directed graphs, in that the digraph contains an arc $(a,b)$, then it must also contain arc $(b,a)$, and we assume that the weights $w$ for our digraph are themselves symmetric, in that for every arc $(a,b)$, we have $w(a,b) = w(b,a)$. In real world networks, the weights typically are symmetric, and even networks with asymmetric weights have very few arcs where $w(a,b) \neq w(b,a)$. \begin{lemma}\label{sptheo} Suppose we are given a symmetric directed graph $G=(V,A)$, a weight function $w$ on the arcs that is symmetric and positive, and three vertices $a,b,c$. If $P_{a,b}$ and $P_{b,c}$ are shortest-weight paths in this digraph from $a$ to $b$ and $b$ to $c$, respectively, then they are arc-disjoint. \end{lemma} \smallskip \noindent \proof Suppose that, contrary to the Lemma, $P_{a,b}$ and $P_{b,c}$ share a common arc $(x,y)$. Then the path $P_{a,b}$ can be broken up into a path $P_{a,x}$ followed by arc $(x,y)$ followed by a path $P_{y,b}$, where $a$ and $x$ may possibly be the same node, as may $y$ and $b$. Similarly the path $P_{b,c}$ can be broken up into a path $P_{b,x}$ followed by arc $(x,y)$ followed by a path $P_{y,c}$. See Figure \ref{fig:theorem1}. \begin{figure} \begin{center} \includegraphics[width=4in]{Theorem1.pdf} \caption{Illustration for proof of Lemma \ref{sptheo}.\label{fig:theorem1}} \end{center} \end{figure} Note that each of the subpaths of these paths must themselves be shortest paths between their endpoints since the overall path is itself a shortest path. For instance the path $P_{x,b}$ that is the concatenation of $(x,y)$ with $P_{y,b}$ is a shortest path from $x$ to $b$. Let us extend the definition of $w$ to paths $P$ by letting $w(P)$ be the sum of the weights of the arcs in $P$. Then, for instance, $w(P_{x,b}) = w(x,y) + w(P_{y,b})$. If we let $P_{b,y}$ be the path from $b$ to $y$ obtained by reversing the path $P_{y,b}$, then, since weights are symmetric, we must have $w(P_{b,y}) = w(P_{y,b}) = w(P_{x,b}) - w(x,y) = w(P_{b,x}) - w(x,y)$. But now consider the path from $b$ to $c$ obtained by concatenating the two paths $P_{b,y}$ and $P_{y,c}$. It has length $w(P_{b,y}) + w(P_{y,c}) = w(P_{b,x}) - w(x,y) + w(P_{y,c})$. On the other hand, by hypothesis, the shortest path from $b$ to $c$ has length $ w(P_{b,x}) + w(x,y) + w(P_{y,c})$, and is hence $2w(x,y)$ longer than the length of this $P_{b,y}$ plus $P_{y,c}$ path. Since by hypothesis $w(x,y) > 0$, this is a contradiction. $\Box$ \smallskip The basic idea of the technique of \citet{GBDS-loss08} for estimating the loss rate $p(M,r)$ using these paths is to send multicast packets from $M$ to $r$ along path $P$, replicate them at $r$, and then send the copies along paths $P_1$ and $P_2$ to $m_1$ and $m_2$, respectively. After this, $m_1$ and $m_2$ report back to $M$ (using a guaranteed-delivery service such as TCP) as to which packets arrived. Based on this information, $M$ estimates $p(M,r)$. The loss rate $p(r,M)$ can be estimated by sending packets along the $(M,r,M)$ loop and counting the number that arrive back at $M$, using the fact that the loss rate for the loop should be $p(M,r)p(r,M)$. (We note that a result like Lemma \ref{sptheo} is needed if this method is to provide reliable estimates, a fact not observed in \citet{GBDS-loss08}, which contained no such result.) This scheme may require two monitoring hosts to measure the hop-on and hop-off rates for a path endpoint $r$, rather than the single one that would be required if we placed the monitoring equipment at vertex $r$ itself. However, the scheme has the potential advantage that a given monitoring vertex can be re-used to handle many different path endpoints. Thus there could be a substantial net overall savings in the total number of monitoring vertices used, and hence in equipment and operational cost. As stated, the problem of finding a minimum-sized set of monitoring vertices at which to place equipment so that we can estimate loss rates for all hop-on and hop-off paths is simply our original {\sc Pathwise-Disjoint Facility Location} problem. In practice, however, we will most-likely have to rely on the ISP's routing protocol (OSPF or IS-IS) to deliver our packets, and so, as with our the first application, will face the {\sc Setwise-Disjoint Facility Location} problem. It should be noted that, in contrast to that first application, the necessity for {\em vertex}-disjoint paths from $r$ to $m_1$ and $m_2$, rather than simply arc-disjoint paths, is less clear, since by the previous lemma we can only guarantee that these paths are arc-disjoint from the path from $M$ to $r$. This is a meaningless distinction in the Setwise-Disjoint case, however, in light of the following lemma. \begin{lemma}\label{avtheo} Suppose $P(c,f)$ and $P(c,f')$ are the sets of all shortest paths from vertex $c$ to vertices $f$ and $f'$, respectively, in a given digraph $G$. Then no path in $P(c,f)$ shares an arc with any path in $P(c,f')$ if and only if no path in $P(c,f)$ shares a vertex other than $c$ with any path in $P(c,f')$. \end{lemma} \noindent \proof The no-shared-vertex case trivially implies the no-shared-arc case, so let us assume that there are no shared arcs and argue that there are also no shared vertices other than $c$. Suppose there were a shared vertex $x \neq c$. Then there is a shortest path $P$ from $c$ to $f$ that is the concatenation of a path $P_{c,x}$ from $c$ to $x$ with a path $P_{x,f}$ from $x$ to $f$, both themselves shortest paths, and a shortest path $P'$ from $c$ to $f'$ that is the concatenation of a path $P_{c,x}'$ from $c$ to $x$ with a path $P_{x,f'}'$ from $x$ to $f'$, both themselves shortest paths. But then the paths $P_{c,x}$ and $P_{c,x}'$ must have the same length. Thus the path $P''$ that is the concatenation of $P_{c,x}$ with $P_{x,f'}'$ is a shortest path from $c$ to $f'$, and must be contained in $P(c,f')$. Moreover, since $x \neq c$, the path $P_{c,x}$ contains at least one arc $a$. Thus the path $P'' \in P(c,f')$ shares the arc $a$ with the path $P \in P(c,f)$, a contradiction. $\Box$ \smallskip A detailed description of the implementation of this scheme and the formulas used for estimating $p(M,r)$ and $p(r,M)$ is presented in \citet{GBDS-loss08}. \section{Complexity}\label{section:complexity} In this section we investigate the computational complexity of {\sc Pathwise-} and {\sc Setwise-Disjoint Facility Location}, covering both general hardness results and a polynomial-time solvable special case. \subsection{Hardness of Approximation}\label{section:hardness} We first observe that the more general {\sc Set Cover by Pairs} problem is not only NP-hard, but also strongly inapproximable in the worst-case. Let $n = |U|$. \citet{HS05} observed that SCP is at least as hard to approximate as {\sc Set Cover}, which cannot be approximated to within a $o(\log n)$ factor unless P = NP \citep{RS97}. We can prove much a stronger inapproximability result (albeit with a slightly stronger complexity assumption). \begin{theorem} \label{scp-hard} If ${\rm NP} \not \subseteq {\rm DTIME} (n^{O({\rm polylog}(n))})$, no polynomial-time algorithm can be guaranteed to find a solution to SCP that is within a factor of $2^{\log^{1-\epsilon} n}$ of optimal for any $\epsilon > 0$. \end{theorem} \medskip \noindent \begin{proof} The theorem follows via an approximation-preserving transformation from the {\sc MinRep} problem of Kortsarz, who showed the above inapproximability bound to hold for {\sc MinRep} \citep{Kortsarz01}. The reader may readily confirm that, for the type of approximation bound being proved here, it suffices for the transformation to provide a one-to-one correspondence between solutions for the source and target instances (and their values) and take polynomial time (which also causes the blow-up in the size of the constructed instance to be bounded by a polynomial). In {\sc MinRep}, we are given a bipartite graph $G$ with vertex set $V=A \cup B$ and edge set $E$, where $|A|=|B|=kq$ for positive integers $k,q$, together with partitions of the vertex sets on each side of the bipartite graph into $k$ groups of $q$ vertices, $A_1$ through $A_k$ on the left and $B_1$ through $B_k$ on the right. We are also given an integer $K$. We ask whether there is a subset $V' \subseteq V$ with $|V'| \leq K$ such that for every pair $(A_i,B_j)$, $1 \leq i,j \leq k$, if $G$ contains an edge between a vertex in $A_i$ and one in $B_j$, then so does the subgraph induced by $V'$. We transform {\sc MinRep} to SCP by letting the items to be covered be the pairs $(A_i,B_j)$ where $G$ contains an edge between a member of $A_i$ and a member of $B_j$. The set of covering objects is $V$, with the item $(A_i,B_j)$ covered by all pairs $\{u,v\} \subseteq V$ in which $u \in A_i$, $v \in B_j$, and $(u,v) \in E$. There is then a one-to-one correspondence between SCP cover sets and subsets $V'$ meeting the {\sc MinRep} requirements, with the two sets having the same size. Hence any approximation guarantee for SCP implies an equally good guarantee for {\sc MinRep}, and so SCP must be at least as hard to approximate. \end{proof} \begin{theorem} \label{equivalence} The special cases PDFL and SDFL retain much of the complexity of SCP: \begin{enumerate} \item SDFL is at least as hard to approximate as SCP. \item PDFL is at least as hard to approximate as {\sc Set Cover}. \end{enumerate} \end{theorem} \begin{proof} We shall initially prove the results without the restriction that $C \subseteq F$, and then sketch how the proofs can be modified (by appropriate replication) to enforce the restriction. For Claim 1, we will show how to transform an arbitrary instance of SCP into an equivalent instance of SDFL. Suppose the given instance of SCP consists of sets $U = \{u_1,\ldots ,u_p\}$ and $S = \{s_1,\ldots ,s_q\}$ and relation $T \subseteq U \times S \times S$. By the definition of SCP, we may assume without loss of generality that if $(u,s_i,s_j) \in T$, then $i \leq j$. In the corresponding instance of SDFL, there are four types of vertices. \begin{enumerate} \item For each $u \in U$, a customer vertex $c_u \in C$. \item For each $u \in U$, a {\em connector} vertex $x_u$. \item For each $s \in S$, a facility location vertex $f_s \in F$. \item For each pair $(s_i,s_j)$, $1\leq i < j \leq q$, a {\em pair vertex} $v_{s_i,s_j}$. \end{enumerate} There are four types of edges, all having length 1. (The constructed digraph is symmetric, so instead of constructing both $(a,b)$ and $(b,a)$, we just construct one undirected edge $\{a,b\}$.) \begin{enumerate} \item For each customer vertex $c_u \in C$, an edge $\{c_u,x_u\}$. \item For each connector vertex $x_u$ and each facility location vertex $f_s$, an edge $\{x_u,f_s\}$. \item For each pair $(s_i,s_j)$, $i < j$, an edge between $c_u$ and $v_{s_i,s_j}$ {\em unless} $(u,s_i,s_j) \in T$. \item For each pair $(s_i,s_j)$, $1\leq i < j \leq q$, the two edges $\{v_{s_i,s_j},f_{s_i}\}$ and $\{v_{s_i,s_j},f_{s_j}\}$. \end{enumerate} We claim that $F' \subseteq F$ is a feasible cover for the constructed SDFL instance if and only if $S' = \{ s: f_{s} \in F'\}$ is a feasible cover for the original SCP instance. Note that in the constructed digraph, there is a path of length two between each customer vertex $c_u$ and each facility location vertex $f_{s_i}$, i.e., $\langle c_u, x_u, f_{s_i}\rangle$, and this is the shortest possible path. Thus $P(c_u,f_{s_i})$ consists precisely of this path, together with all paths $\langle c_u,v_{s_i,s_j},f_{s_i}\rangle$ where $i \leq j$ and $(u,s_i,s_j) \notin T$ and all paths $\langle c_u,v_{s_j,s_i},f_{s_i}\rangle$ where $i > j$ and $(u,s_j,s_i) \notin T$. Thus the only vertex that a path in $P(c_u,f_{s_i})$ can possibly share with a path in $P(c_u,f_{s_j})$, $i \leq j$, other than $c_u$ itself, is $v_{s_i,s_j}$, which will be in shortest paths from $c_u$ to $f_{s_i}$ and to $f_{s_j}$ if and only if $(u,s_i,s_j) \notin T$. Hence a pair $\{s_i,s_j\}$ will cover an element $u$ in the SCP instance if and only if the pair $\{f_{s_i},f_{s_j}\}$ jointly covers the customer vertex $c_u$ in the constructed SDFL instance, so the optimal solution values for the two instances coincide. To get an equivalent instance in which $C \subseteq F$, we replicate the above construction $|S|+1$ times, while, for each $s \in S$, identifying the $|S|+1$ copies of vertex $f_s$. We then add the $|S|+1$ copies of each customer vertex $c_u$ to $F$ to obtain a new set $F_+$ of facilities, one that now contains all the customer vertices in our expanded instance. Let $F'$ be an optimal solution to our original instance. Note that it remains an optimal solution to the expanded instance. Thus if $F_+'$ is an optimal solution to this new instance, we must have $|F_+'| \leq |F'| \leq |F| = |S| < |S|+1$. Thus at least one of the $|S|+1$ copies of our original instance fails to have any of its customer vertices in $F_+'$. This implies that $F \cap F_+'$ must be a feasible solution for that copy, and so $|F_+'| \geq |F'|$. But this implies that $|F_+'| = |F'|$. So the optimal solution value for our expanded instance still equals that for the original instance of SCP. Moreover, the expanded instance can be constructed in polynomial time and has size within a polynomial factor of the size of the original SCP instance. Hence the inapproximability result for SCP carries over to SDFL. \smallskip For Claim 2, we again prove the result without the restriction that $C \subseteq F$. The extension to the case where the restriction holds follows by a replication construction similar to that given in the proof of Claim 1. Our transformation is from {\sc Set Cover}, which we already know cannot be approximated to within a $o(\log n)$ factor unless P = NP \citep{RS97}. Given a {\sc Set Cover} instance, we construct an instance of PDFL whose optimal solution differs from the optimal solution to the {\sc Set Cover} instance by at most 1. Thus a polynomial-time $o(\log n)$ approximation for our problem would imply one for {\sc Set Cover}. An instance of {\sc Set Cover} consists of a ground set $U = \{u_1,\ldots,u_n\}$ and a collection ${\cal C} = \{C_1,\ldots,C_m\}$ of subsets of $U$. Assume without loss of generality that $\cup_{C \in \cal{C}}C = U$. Our constructed graph has four types of vertices: \begin{enumerate} \item For each $i$, $1 \leq i \leq n$, a customer vertex $c_i \in C$. \item For each $i$, $1 \leq i \leq n$, a {\em connector} vertex $x_i$. \item For each $j$, $0 \leq j \leq m$, a potential facility vertex $f_j \in F$. Note that $f_0$ is an added facility, not corresponding to any member of ${\cal C}$. \item A {\em universal connector} vertex $x_0$. \end{enumerate} There are four types of (undirected) edges, all having length 1: \begin{enumerate} \item For $1 \leq i \leq n$, an edge $\{c_i,x_i\}$. \item For each $i,j$, $1 \leq i \leq n$ and $1 \leq j \leq m$, such that $u_i \in C_j$, an edge $\{x_i,f_j\}$. \item For each $i$, $1 \leq i \leq n$, an edge $\{c_i,x_0\}$. \item For each $j$, $0 \leq j \leq m$, an edge $\{x_0,f_j\}$. \end{enumerate} Note that for each pair $i,j$, $1 \leq i \leq n$ and $1 \leq j \leq m$, there is a path of length two from $c_i$ to $f_j$ through $x_0$, and this is the shortest possible path. Thus for each such $i,j$, $P(c_i,f_j)$ contains only paths of length two. More precisely, by construction, if $u_i \notin C_j$ it contains just the path $\langle c_i,x_0,f_j\rangle$, and otherwise it contains that path plus the path $\langle c_i,x_i,C_j\rangle$. Suppose first that $\cal{C}'$ is a feasible solution to our {\sc Set Cover} instance. Then it is easy to see that $\{f_0\}\cup\{f_j: C_j \in \cal{C}'\}$ is a feasible solution for our problem. Each $c_i$ is covered by $f_0$ and an $f_j$ such that $u_i \in F_j$, which must exist because $\cal{C}'$ is a cover. Thus the optimum for our constructed instance is at most one more than the {\sc Set Cover} optimum. Conversely, suppose that $F' \subseteq F$ is a feasible solution for our constructed instance. For each customer vertex $c_i$, there must be at least one vertex in $F'$ that does not equal $f_0$, and so $F' -\{f_0\}$ must be a set cover for the original instance. Thus the optimum for the original instance is at most the optimum for our constructed instance. As in the proof of Claim 1, we can now, by replication, construct an instance of PDFL where all customers are facilities, and the optimum value remains unchanged. Consequently, the two optima are within one of each other, which implies that a polynomial-time algorithm with $o(\log n)$ approximation guarantee for PDFL would yield one for {\sc Set Cover} as well. \end{proof} \subsection{The Special Case of Trees}\label{sec:trees} In this section, we consider networks derived from trees, where for a given tree $T = (V,E)$ the corresponding network is the symmetric directed graph $G = (V,A)$, with both $(u,v)$ and $(v,u)$ in $A$ whenever $\{u,v\} \in E$. We first note that, for such instances, the two problems PDFL and SDFL coincide. This follows from the fact that for every ordered pair $v,w$ of vertices, there is exactly one simple path from $u$ to $v$, which is of course the shortest path, and so the shortest path is unique. More interestingly, the problems can be solved in polynomial (in fact, linear) time. Indeed, the optimal solution has a simple combinatorial characterization, and can be constructed by a simple tree traversal. \begin{theorem} \label{treetheo} The following procedure constructs an optimum cover under PDFL. \begin{enumerate} \item While $T$ contains a leaf vertex $v$ that is not a customer, delete $v$. \item Return the set $F^*$ of leaves of the residual tree as our cover. \end{enumerate} \end{theorem} \begin{proof} Note that in the residual tree, all leaves are customers and hence facility locations, so $F^* \subseteq F$, as required. Every customer in $F^*$ is already covered by itself. If there is any customer $v \notin F^*$, it must be an internal vertex of the residual tree, and hence there must be at least two edges incident on $v$ in the residual tree. Each such edge must lie on a path to a leaf of the residual tree, and those paths can contain no common vertex other than $v$ because we are dealing with a tree. But all leaves of the residual tree are in $F'$, so this means that $v$ has vertex-disjoint shortest paths to at least two members of $F'$ and hence is covered by $F'$. \end{proof} \section{Algorithms}\label{section:lbs} The complexity results of the previous section rule out (assuming widely-believed hypotheses) both polynomial-time heuristics that are guaranteed to find good covers and the existence of good lower bounds on the optimal solution value that can be computed in polynomial time. Nevertheless, there still may exist algorithms and lower bounds that are useful ``in practice.'' In this section and the next, we describe our candidates for both, beginning with lower bounds, which provide us with a standard of comparison for evaluating the quality of the solutions our heuristics provide. \subsection{Optimal Solutions Using Mixed Integer Programming.}\label{section:MIP} The best standard of comparison is, of course, the optimal solution value. It is feasible to compute this for small instances using mixed integer linear programming, by essentially viewing our instances of PDFL and SDFL as {\sc Set Cover by Pairs} instances, modified to account for the fact that a customer, in its role as a facility, can cover itself. This allows us to leverage general purpose commercial software for solving our problems. Our MIP formulation is simple and straightforward. It has a zero-one variable $x_f$ for each facility $f \in F$, where $x_f=1$ if $f$ is chosen for our cover. In addition, we have a real nonnegative variable $y_{f,f'}$ for each pair $\{f,f'\}$ of facilities, subject to the constraints that $y_{f,f'}\le x_f$ and $y_{f,f'}\le x_{f'}$. Note that these together imply that $y_{f,f'}$ can be positive only if both $x_f$ and $x_{f'}$ equal 1 (i.e., are in the chosen cover). To guarantee that the chosen set is a cover, we have the following constraints, one for each facility $c \in C$. \begin{equation}\label{ILP1} x_c + \sum_{f,f':\{f,f'\} \mbox{ covers } c} y_{f,f'}\ge 1 \end{equation} \noindent where ``covers'' is interpreted as ``covers in a path-disjoint fashion'' for PDFL and as ``covers in a set-disjoint fashion'' for SDFL. The goal of our MIP is to minimize $\sum_{f \in F} x_f$. We derive the MIP instances from the corresponding PDFL/SDFL instances using the ``triple generation'' algorithms described at the end of this section, and then attempt to solve them using the version 11.0 CPLEX\texttrademark\ MIP solver. This MIP approach proved practical for a wide range of ``small'' instances, enabling us to find optimal solutions to all but two of the instances in our test set with $|F| \leq 150$. Our detailed experimental results will be summarized in Section \ref{section:experiments}. \subsection{A Hitting Set Lower Bound}\label{section:HSLB} For larger instances of the set-disjoint variant of our problem (SDFL), our standard of comparison is derived from a new lower bound, based on the construction of a special instance of the NP-hard {\sc Hitting Set} problem that CPLEX finds it particularly easy to solve. The bound depends on the following simple lemma. \begin{lemma}\label{neighborlemm} Suppose $G = (V,A)$ is an arc-weighted directed graph, and $u,v,w$ are distinct vertices in $V$. Let $P_{v}$ be a shortest path from $u$ to $v$, and let $x$ be the first vertex encountered on this path. If there is a shortest path $P_{w}$ from $u$ to $w$ that shares a common vertex other than $u$ with $P_{v}$, then there is a shortest path $P_{w}'$ from $u$ to $w$ that contains $x$. \end{lemma} \begin{proof} Let $y$ be the first vertex in $P_{w}$, other than $u$, that is also in $P_{v}$. If $y=x$, we are done, so suppose not. Divide $P_v$ into subpaths $P_{v,1}$ from $u$ to $y$ and $P_{v,2}$ from $y$ to $v$. Note that $x$ remains the first vertex encountered in $P_{v,1}$. Similarly divide $P_{w}$ into subpaths $P_{w,1}$ from $u$ to $y$ and $P_{w,2}$ from $y$ to $w$. Note that all these paths must be shortest paths from their sources to their destinations. Thus, in particular, $P_{v,1}$ and $P_{w,1}$ must have the same length since they are both paths from $u$ to $y$. Hence, the concatenated path $P_w' = P_{v,1} P_{w,2}$ must be a shortest path from $u$ to $w$, and it contains $x$. \end{proof} For a given customer $c$, let $N(c)$ denote the set of all vertices $x$ such that $(c,x) \in A$ -- the {\em neighbors} of $c$. For each facility $f \neq c$, let $N(c,f)$ be that subset of nodes of $N(c)$ that are included on shortest paths from $c$ to $f$. As a consequence of Lemma \ref{neighborlemm}, a pair $\{f_1,f_2\}$ will cover $c$ in a setwise-disjoint fashion if and only if $N(c,f_1)\cap N(c,f_2)$ is empty. Thus a necessary (although not sufficient) condition for $F' \subseteq F$ to contain a pair $\{f_1,f_2\}$ that covers $c$ is that $\bigcap_{f \in F'}N(c,f) = \emptyset$. We can use this observation to construct an integer program whose feasible set contains all legal setwise-disjoint covers, and hence whose optimal solution provides a lower bound on the size of the optimal set-disjoint cover. As in the MIP for an optimal solution, we have zero-one variables $x_f$ for all $f \in F$, with $x_f = 1$ meaning that facility $f$ is in our chosen cover, and we wish to minimize $\sum_{f \in F} x_f$. Now, however, these are the only variables, and we have a different type of constraint, one for each pair of a customer $c$ and a neighbor $x$ of $c$: \begin{equation}\label{LBIP} x_c + \sum_{f\neq c \mbox{ and }x \notin N(c,f)}x_f\ge 1. \end{equation} \begin{figure}[t] \vspace{-.1in} \begin{center} \includegraphics[width=3.5in]{BadHSLB-cropped0.pdf} \vspace{-.1in} \caption{Instances where the HSLB underestimates the optimal cover size by a factor of $n/3$.}\label{fig:HSLB} \end{center} \vspace{-.3in} \end{figure} This IP can be viewed as a ``Hitting Set'' problem, where a facility ``hits'' a pair $(c,x)$ if $x \notin N(c,f)$ or if $f=c$. For this reason, we call the optimal solution to the IP the {\em hitting set lower bound} (HSLB). It is easy to see that computing this bound is NP-hard. Thus its computation is likely to require exponential time in the worst case, and our SDFL hardness of approximation result, which only applies to polynomial-time algorithms, is not applicable. Nevertheless, the Hitting Set Lower Bound can be quite bad: In the worst case it can underestimate the optimal solution value by a linear factor. See Figure \ref{fig:HSLB}, which depicts a scheme for instances where the HSLB is too low by a factor of $n/3$. In this figure there are $n+3$ facility vertices, $f_1$, $f_2$, $f_3$, plus the customer vertices $c_1$ through $c_n$, together with three additional vertices ($v_1$, $v_2$, $v_3$) that are the neighbors of the customer vertices. Each (undirected) edge in the figure represents a pair of oppositely directed arcs of length 1. Note that for each neighbor vertex $v$, there is one of the $f_i$ whose shortest paths from the customer vertices $c_i$ do not contain $v$. Thus, for $n>3$, the solution to the HSLB integer program is the set $\{f_1,f_2,f_3\}$, for a lower bound of 3. On the other hand, for each $c_i$ and each pair of facilities $\{f_j,f_k\} \subseteq \{f_1,f_2,f_3\}$, we have $N(c_i,f_j) \cap N(c_i,f_k) \neq \emptyset$ since it contains one of the neighbor vertices. Thus no pair of these vertices covers any customer vertex. In addition, for any customer vertex $c_h$ other than $c_i$, {\em all} three neighbor vertices are on shortest paths from $c_i$ to $c_h$, so no other customer vertex can help cover $c_i$ either. Thus all the customer vertices must cover themselves, and hence the optimal cover has size $n$. Fortunately, the kind of structure occurring in Figure \ref{fig:HSLB}, where $|N(c,f)|/|N(c)| = 2/3$ for all customer/facility pairs, does not tend to occur in practice. Because of the way networks are designed and the way OSPF weights are typically set, the number of shortest path ties is typically limited, and the ratios $|N(c,f)|/|N(c)|$ are much smaller. In part because of this, the hitting set lower bound is quite good in practice. Indeed, as we shall see in Section \ref{section:experiments}, the HSLB equaled the optimal solution value for all our SDFL test instances. The solution to the IP was also often a feasible (and hence optimal) solution to the original SDFL instance. Moreover, the hitting set instances themselves turn out to be relatively easy for CPLEX to solve, so we were able to obtain HSLBs for all our SDFL test instances in reasonable time, and usually get optimal solutions as a welcome side-effect.. In light of these observations, we will also include our HSLB code in our comparisons of solution-finding heuristics, albeit one that either produces an optimal solution or no solution at all. \subsection{An Unconstrained Path-Disjoint Lower Bound}\label{sec:pdlb} The previous lower bound applied only to the set-disjoint variant of our problem, and required a linear programming package to compute. For the path-disjoint variant (PDFL), the best polynomial-time computable lower bound we currently have is not nearly as strong in practice, but can be computed in linear time. It applies only to networks that are symmetric directed graphs and hence can be modeled as undirected graphs, but, as remarked above, most real world networks have this property. It equals the minimum cover size for the variant of PDFL where we drop the constraint that our paths be shortest paths, and only insist that they be vertex-disjoint. Let us call the task of finding such a cover as the {\em unconstrained path-disjoint facility location problem} (UPDFL). The algorithm for solving the UPDFL problem relies on three observations. \begin{figure} \begin{center} \includegraphics[height=3in]{TreeFig.pdf} \caption{\label{treefig}\small Decomposition into biconnected components.} \end{center} \end{figure} \begin{enumerate} \item Any undirected graph can be decomposed in linear time into its 2-connected components, the resulting structure being treelike, with the 2-connected components playing the role of vertices. (See Figure \ref{treefig}.) \item If our network is 2-connected, then any pair of facility locations in it will cover all the customers it contains. \item We already have an easily modifiable algorithm for PDFL on trees (see Section \ref{sec:trees}). \end{enumerate} The first observation is attributed to Hopcroft in \citep{AHU}, which describes the algorithm in detail. To see the tree structure more clearly, call a vertex an {\em articulation point} if deleting it from the graph breaks the graph into two or more separate connected components, and consider the graph $t(G)$ derived from $G$ as follows. The vertices of $t(G)$ are the leaves, the articulation points, and for each 2-connected component $C$, a new vertex $v_C$ representing the component. The edges consist of all original edges that did not connect two vertices in the same 2-connected component, together with edges $\{u,v_C\}$ for every pair of an articulation point $v$ and a 2-connected component $C$ that contains it. This graph cannot contain a cycle. If it did, such a cycle would have to contain at least two vertices, each representing either a vertex not in any 2-connected component or a maximal 2-connected set. However, the existence of the cycle would imply that the set consisting of all the vertices and 2-connected components represented in the cycle would itself be 2-connected, a contradiction. Thus $t(G)$ has no cycles, and is a connected graph since by assumption $G$ is. Hence it is a tree. The proofs of observations (2) and (3) go as follows: \smallskip \noindent {\sc Proof of Observation 2}. By definition, if a graph $G$ is 2-connected and $u$ and $v$ are distinct vertices of $G$, then $G$ contains two paths joining $u$ to $v$ whose only common vertices are $u$ and $v$ themselves. Let $u$ and $v$ be two facility locations in our 2-connected graph $G$, and let $c$ be a customer vertex in $G$. If $c \in \{u,v\}$ we are done, so we assume it is distinct from both. Let $P_1(u)$ and $P_2(u)$ be two vertex-disjoint paths from $u$ to $c$, and similarly, let $P_1(v)$ and $P_2(v)$ be two vertex-disjoint paths from $v$ to $c$. Such paths exist by the definition of 2-connectivity. We are done if either of $P_1(v)$ and $P_2(v)$ is vertex-disjoint from one of $P_1(u)$ and $P_2(u)$. So assume that both $P_1(v)$ and $P_2(v)$ intersect both of $P_1(u)$ and $P_2(u)$. At most one of them can contain the vertex $u$, so assume without loss of generality that $P_1(v)$ does not contain $u$, and let $w$ be the first vertex it contains that is an internal vertex of one of $P_1(u)$ and $P_2(u)$, say, without loss of generality, $P_1(u)$. Then the path that starts with $v$, proceeds along $P_1(v)$ until it hits $w$, and then proceeds along $P_1(u)$ until it reaches $c$, is vertex-disjoint from $P_2(u)$, and once again $u$ and $v$ cover $c$, and this holds for all cases. $\Box$ \smallskip \noindent {\sc Proof of Observation 3}. Here is the needed modified version of the algorithm of Section \ref{sec:trees}. We assume there is at least one customer, as otherwise the empty set is an optimal cover. Call a 2-connected component a {\em leaf component} if it contains only one articulation point. As usual, call a vertex a {\em leaf} if it has degree 1. Note that no leaf can be contained in a 2-connected component, and that all nonleaf vertices that are not in a 2-connected components are articulation points. The {\em internal vertices} of a 2-connected component are all its vertices that are not articulation points. As in the algorithm of Section \ref{sec:trees}, we start with a pruning process. If a vertex is a leaf in the current graph and not a customer, delete it to get a new ``current graph.'' Similarly, if a leaf component has no internal vertices that are customers, delete all its internal vertices, leaving only its (single) articulation point. After the pruning is complete, the final graph $G^*$ is such that every leaf is a customer and every leaf component contains an internal customer. There are two cases to consider: \begin{enumerate} \item $G^*$ contains no leaves or leaf components. \item $G^*$ contains a leaf or leaf component. \end{enumerate} The first case implies that $G^*$ is a 2-connected graph. By assumption, it must contain at least one customer. If just one, say $v$, then $\{v\}$ is an optimal cover. If more than one, then no single facility can cover all of them, so pick any two customer vertices, say $u$ and $v$, and $\{u,v\}$ will be an optimal cover by our second observation. For the second case, first note that since all the leaves are now customers, they must be in any optimal cover. Similarly, any cover must contain at least one facility in the interior to each leaf component. To see this, let $v$ be the articulation point for the component, and $c$ an interior point of the component that is a customer. If our purported cover contains no facilities interior to the component, there must be two paths to $c$ from facilities it does contain, but all such paths must now contain $v \neq c$, and hence cannot be vertex-disjoint. Let $F^*$ be a set consisting of all the leaves, together with one internal customer from each remaining leaf component. By the above argument, we know that the optimal cover must be at least of size $|F^*|$. We now argue that $F^*$ itself is a cover, and hence an optimal one. Consider any customer vertex in $G^*$ that is not in $F^*$ and the corresponding vertex $v$ in the underlying tree $t(G^*)$. This is $v$ itself if $v$ is an articulation point or a vertex not contained in any 2-connected component, and otherwise is $v_C$ for the 2-connected component for which $v$ is an internal vertex. Note that every leaf of this tree either is a member $F^*$ or corresponds to a 2-connected component containing a member. Thus if $v$ corresponds to a nonleaf of the tree, it must be covered by $F^*$, since it has vertex-disjoint paths to at least two leaves. The only other possibility is if $v$ is an internal vertex of a leaf component $C$ of $G^*$. But then $F^*$ must contain a vertex $f \neq v$ that is one of $C$'s internal facility locations, since it contains at least one and does not contain $v$. Let $u$ be the single articulation point in $C$. By our second observation, there are vertex-disjoint paths in $C$ from $v$ to to $f$ and $u$. Since $u$ itself must be connected to some leaf outside of $C$, this means that $v$ has vertex-disjoint paths to two members of $F^*$. Thus $F^*$ is an optimal cover. $\Box$ \begin{figure}[t] \vspace{-.1in} \begin{center} \includegraphics[width=4in]{UPDFLfig0.pdf} \vspace{-.2in} \caption{Instances where the UPDFL lower bound underestimates the optimal cover size by a factor that is linear in $n$. The filled-in vertices are the only customers/facilities.}\label{UPDFLfig} \end{center} \vspace{-.3in} \end{figure} As with the Hitting Set Lower Bound for SDFL, the UPDFL lower bound for PDFL is quite bad in the worst case. For such examples we can restrict ourselves to 2-connected graphs, since, as implied by the arguments above, there is no advantage to allowing nonshortest paths except between vertices in the same 2-connected component. So consider the 2-connected graph schematized in Figure \ref{UPDFLfig}, where the ellipsis stands for $N-3$ additional connected vertex pairs in the middle row. Here the filled-in vertices are the customers and the empty vertices are neither customers nor facilities. There are $N$ customers in the middle row, and every PDFL cover must include all of them, since any shortest path to a target customer in the row from any other customer vertex (in the row above it or below it) has to go though the vertex connected to the target on its left. However, an optimal UPDFL cover consists simply of the two customers at the top and bottom of the figure, since the length-6 path from the top customer to each middle customer shares no vertices with the length-2 path to it from the bottom customer. The total number of vertices is $n = 7N+2$, and so the $N/2$ ratio between the size of the two covers is linear in $n$. Note that this is also a lower bound on how large a penalty we can experience by limiting ourselves to shortest paths. For any given instance, if there is a gap between the UPDFL lower bound and the PDFL cover we construct, that gap is the sum of (1) the amount by which the number of monitors used in our constructed solution exceeds the optimal number, plus (2) the penalty (in excess monitors needed) for restricting ourselves to shortest paths. (There may also, of course, be penalties for {\em not} using shortest paths, such as added congestion and lessened monitoring accuracy.) \section{Heuristics}\label{section:heuristics} \subsection{Greedy Heuristic}\label{sec:greedy} The simplest, and most general, heuristic that we tested is a straightforward greedy heuristic, denoted by \textsc{Greedy}\ in what follows. This is a heuristic for solving the problem described by the MIP in Section \ref{section:MIP}, and can be applied both to path-disjoint and set-disjoint versions of the problem. It consists of three phases. \begin{enumerate} \item Construct the triples $\{(c,f,f'): \{f,f'\}$ covers $c$ in a path-disjoint (set-disjoint) fashion\} and store them in appropriate data structures. \item Initialize our cover $F' \subseteq F$ to the empty set. Then, as long as $F'$ does not cover all $c\in C$, find an $f \in F - F'$ such that $F' \cup \{f\}$ covers a maximum number of additional elements. Since all customers are facilities, there is guaranteed to be a facility that will cover at least one more customer. \item {\em Minimalize} the cover by testing each facility in the cover in turn to see if its removal would still leave a valid cover, and if so remove it. \end{enumerate} Note that, except for the fact that we allow a customer to cover itself, this is essentially a greedy algorithm for the {\sc Set Cover by Pairs} problem. It differs from the ``Greedy'' algorithm of \citet{HS05} for that problem, however. The latter does not include a minimalization phase, and in its growth phase it chooses the best {\em pair} to add. It is hence likely to be significantly slower and less effective in practice than our greedy variant. Our implementation of {\textsc{Greedy}} uses randomized tie-breaking. It also allows two options for starting the construction phase: either take a pair that covers the maximum number of customers, or take a random customer, which at least covers itself. We also allow two options for the minimalization phase: either consider facilities in the reverse of the order in which they were added to the cover, or consider them in random order. None of these options (or combinations of options) seems to dominate any of the others, so here our primary tests cover a multiple-run variant on {\textsc{Greedy}} ({\textsc{Greedy(400)}}), which performs 400 runs and returns the best solution found. In those 400 runs, we cycle through the four option combinations, using each for 100 runs. Multiple runs help amortize the cost of triple generation. The latter takes time $\Omega(|C|\cdot |F|\cdot |F|)$ and typically yields a set $T$ of triples that is substantially smaller than the $|C|\cdot |F|\cdot |F|/2$ upper bound, whereas the time for one construction/minimalization cycle is $O(|T| + |F|^2)$. Moreover, as we shall see, the actual overall running times are not large enough to represent an obstacle in the proposed applications, where the time to deploy a solution will greatly exceed the time to compute it. Our experiments will, however, explore the tradeoffs between robustness and number of iterations. Aside from the triple-generation, which will be described in Section \ref{section:triples}, the implementation of \textsc{Greedy}\ is fairly straightforward, with two exceptions. First, to avoid the multiple cache misses that would occur if we did many random accesses into the list of triples $(c,f,f')$, we keep three copies of that list, one sorted by the customer $c$, one by the larger-indexed of $f$ and $f'$, and one by the smaller, with pointers into the start of the portion for each choice of the sorting key. The list indexed by $c$ is constructed as part of the triple generation process. The other two lists can be computed in linear time by first precomputing how many triples there are for each key and reserving space for them in the corresponding list, and then maintaining appropriate counters while the lists are being constructed. The relevant portions of the lists for each facility $f$ are accessed at most four times, once at the start, once when (and if) $f$ is added to the cover, once in preparation for minimalization, and once when (and if) $f$ is deleted. Another detail: Because we are storing the data three times, we trade a bit of time to save space, by running the triple generation code twice. The first run is used to compute the number of triples, which then allows us to reserve the correct amount of space for each list of triples, rather than simply pre-allocating room for the maximum number $|C|\cdot|F|\cdot(|F|-1)$ of triples. Our second nonobvious implementation detail consists of the data structures used to handle minimalization, where we need an efficient way of noticing when facilities are required, that is, can no longer be deleted. Let us call a pair of facilities $\{f,f'\}$ {\em valid} for a customer $c$ if the pair covers $c$ in the appropriate fashion, and both its members are still in our current cover. A facility $f$ can become required for one of two reasons: \begin{enumerate} \item $f$ is also a customer and there are no longer any valid pairs for it. \item There is a customer $c$ such that every valid pair for $c$ contains $f$. \end{enumerate} In order to determine when these situations occur, we keep track of two types of counts. First, for each customer $c$, we keep track of the current number {\tt mycount}$(c)$ of valid pairs for $c$. This makes it easy to check when the first of the above two conditions occurs. Second, for each customer/facility pair $(c,f)$, we keep track of the current number {\tt covercount}$(c,f)$ of valid pairs for $c$ that contain $f$. This information is then stored in a set of linked lists, one for each customer $c$ and number $n$ of pairs, where the list for $(c,n)$ contains all those facilities $f$ with {\tt covercount}$(c,f) = n$. To determine whether the second condition occurs, we simply look at the lists for $(c,\mbox{\tt mycount}(c))$ for all customers with {\tt mycount}$(c)$ less than the size of the current cover, since for a cover of size $s$, there can be at most $s-1$ valid pairs containing the same facility. It is not difficult to see that the total time for maintaining the data structures is $O(|T|)$, and the total time for spotting all the changes from nonrequired to required is at most $|C|$ times the number of deletions, which is $O(|C|\cdot |F|)$. \vspace{-.1in} \subsection{Genetic Algorithm} Genetic algorithms are variants on local search that mimic the evolutionary process of survival of the fittest. Our genetic algorithm, called \textsc{Genetic}\ in what follows, uses the ``random key'' evolutionary strategy proposed by \citet{Bea94}, as modified by \citet{GonRes10a} so as to bias the choices of parents and the operation of crossovers. In this approach, the ``chromosomes'' that do the evolving are not solutions themselves, but ``random-key'' vectors from which solutions can be derived. (This is in contrast to traditional genetic algorithms, in which the chromosomes are the solutions themselves.) As with \textsc{Greedy}\, which it uses as a subroutine, our \textsc{Genetic}\ algorithm applies to the MIP formulation of the set-disjoint and path-disjoint versions of the problem, and does not directly exploit any graph-theoretical properties of the underlying network. Let the set of facilities $F = \{f_1,f_2,\ldots ,f_k\}$. In \textsc{Genetic}, each chromosome is a 0-1 vector $( \mathit{gene}_1, \ldots, \mathit{gene}_k)$ of length $k$. We derive a solution (cover) $F'$ from such a chromosome as follows: Start with $F' = \{f_i: \mathit{gene}_i = 1,\ 1 \leq i \leq k\}$. If $F'$ is already a cover, halt. Otherwise, complete $F'$ to a cover using our basic \textsc{Greedy}\ algorithm (without randomization or minimalization). The ``fitness'' of the chromosome is then the size of the resulting cover $F'$. The overall genetic algorithm starts by creating a population of $p$ randomly generated chromosomes, in which each gene has an equal probability of being 0 or 1. The population then evolves in a sequence of generations. In each generation we start by computing the solution for each member of the population, yielding its fitness. The top 15\% of the population by fitness (the ``elite'' members) automatically pass on to the next generation's population. In addition, to provide diversity, 10\% of the next generation consists of randomly generated chromosomes, like those generated initially. For the remaining 75\%, we repeat the following ``biased crossover'' construction: Pick a random member $(x_1,x_2,\ldots,x_k)$ of the top 15\% of the current generation's population and a random member $(y_1, y_2,\ldots,y_k)$ of the remaining 85\%. The ``child'' $(z_1,z_2,\ldots,z_k)$ of this pairing is determined as follows: For each $i$, independently, set $z_i = x_i$ with probability 70\%, and otherwise set $z_i = y_i$. This scheme insures that the best solution always survives into the next generation, where it may continue as champion or be dethroned by a better solution. Generations are continued until $q$ have gone by without any improvement in the fitness (cardinality) of the best solution, which we then output. Our code includes the option of minimalizing this final solution, but in practice this never has helped. In our results for \textsc{Genetic}\, we typically took $p = \min\{300,|F|\}$ and $q = |V|$. For these and the other parameters of the algorithm (top 15\%, etc.) we chose values based on preliminary experimentation and on intuition derived from applications of this approach to other problems. They appear to yield a good tradeoff between running time and quality of solution for our application, but we make no claim as to their optimality. In our experiments, \textsc{Genetic}\ did not prove competitive for the set-disjoint case. The much faster heuristics of the next two sections, which exploit the properties of that case, found optimal solutions for all our test instances. In the path-disjoint case, where, for our larger test instances, multiple-iteration \textsc{Greedy}\ is the only competition, and where we have no readily-computable lower bound that is as good as {\textsc{HSLB}} is for the set-disjoint case, \textsc{Genetic}\ can at least give us some idea of the extent to which \textsc{Greedy(400)}\ can be improved upon. \subsection{The Single Hitting Set Heuristic (\textsc{SHS})} This section describes a simple heuristic that applies to the set-disjoint variant of our problem (SDFL), and is based on Lemma \ref{neighborlemm} and our Hitting Set lower bound for that variant. Instead of optimally solving the NP-hard Hitting Set problem described in Section \ref{section:HSLB}, we find a not-necessarily optimal solution using a straightforward greedy hitting set heuristic: Let $X \subseteq F$ denote the hitting set we are building, initially the empty set. While $X$ is not a valid hitting set, repeatedly add that facility that hits the most as-yet-unhit (customer,neighbor) pairs, ties broken randomly, where a facility $f$ hits a pair $(c,v)$ if either $f=c$ or $v \notin N(c,f)$. Once $X$ is a valid hitting set, we check whether it is also a valid cover of all the customers and, if not, we extend it to one using \textsc{Greedy}, this time including randomization and minimalization. (Minimalization is applied even if $X$ is itself already a cover.) As in \textsc{Greedy(400)}\, we use this simple heuristic in a multi-iteration scheme, where we take the best solution found. This allows us to exploit the variance introduced by all the randomization -- in addition to all the randomized tie-breaking, we also alternate between reverse delete and randomized delete in the minimalization phases. We shall denote this scheme by \textsc{SHS}$(k)$, where $k$ is the number of iterations, again typically 400. Since \textsc{SHS}$(k)$ is a polynomial-time algorithm, Theorem \ref{equivalence} implies that we cannot prove a polylogarithmic worst-case performance guarantee for it unless a widely-believed conjecture is false. However, in practice the initial hitting set $X$ is typically close to a cover, and so the following provides an informative bound. \begin{theorem} \label{SHSbound} For a given instance $I$ of SDFL based on a graph $G=(V,A)$, let {\sc OPT}$(I)$ be an optimal cover, and suppose we run {\textsc{SHS}} one time, with $X(I)$ being the hitting set generated and \textsc{SHS}$(I)$ being the final cover. Then, if we set $\Delta = |\mbox{\textsc{SHS}}(I)|-|X(I)|$, the number of facilities that needed to be added (removed) to turn $X$ into a minimalized cover, we have $$ \mbox{\textsc{SHS}}(I) \leq (\ln(|A|)+1)\cdot \mbox{\sc OPT}(I) + \Delta. $$ \end{theorem} \begin{proof} This follows from the fact that the optimal solution to the hitting set problem of Section \ref{section:HSLB} is a lower bound on OPT$(I)$, together with the result of \citet{Johnson74} and \citet{Lovasz75} that the greedy algorithm for {\sc Set Cover} (and hence for {\sc Hitting Set}) has worst-case ratio at most $\ln n + 1$, where $n$ is the size of the set being covered/hit. \end{proof} Note that the $\ln n +1$ bound for the greedy {\sc Hitting Set} heuristic is essentially the best possible guarantee for a polynomial-time algorithm for this problem, as follows from \citet{BGLR93}, \citet{Feige98}, and \citet{AMS06}. In practice, however, the greedy {\sc Hitting Set} heuristic typically returns a set of size much closer than this to the optimal, often only a single-digit percentage above optimal. As we shall see, the performance of \textsc{SHS}$(k)$ is similarly good. \subsection{The Double Hitting Set Heuristic (\textsc{DHS})} This section describes a second heuristic for SDFL that exploits Lemma \ref{neighborlemm}, as well as the way OSPF weights tend to be set. It involves computing two hitting sets rather than one, but is a good match for {\tt SHS}, in that both heuristics perform very well and neither consistently dominates the other. As remarked above, while OSPF can split traffic in the case of multiple shortest $c\rightarrow f$ paths, the amount of splitting that occurs in practice seems to be limited. The following two definitions provide a new way to quantify this effect. \begin{Def} A potential facility location $f$ is {\em good for} customer vertex $c$ if $f\ne c$ and $|N(c,f)| = 1$, that is, all shortest $c\rightarrow f$ paths leave $c$ via the same arc. \end{Def} If there is just one shortest $c\rightarrow f$ path, clearly $f$ is good for $c$, but $f$ may be good for $c$ even when there are many $c\rightarrow f$ paths. Note that if $f$ is good for $c$, then, under OSPF, there can be no splitting at $c$ of the traffic from $c$ to $f$, although splitting at later vertices in the path would be possible. \begin{Def} Let $t$ be an integer, $1 \leq t \leq |F|$. A customer vertex $c$ is {\em $t$-good} if there are $t$ or more good potential facility locations $f$ for $c$ (and {\em $t$-bad} otherwise). \end{Def} For our test instances, inspired or derived from real-world networks and models, a large majority of the customers tend to be $t$-good, even for $t\sim |F|/2$, and almost all are $1$-good. Our Double Hitting Set heuristic (DHS) takes $t$ as a parameter, and is designed to cover the $t$-good customers nearly optimally, via a union $X\cup Y$ of two hitting sets, leaving the $t$-bad customers, if any, to be covered via the \textsc{Greedy}\ algorithm. Let $C_t$ denote the set of all $t$-good customers. For a given $t$, we shall, when it makes a difference, denote the algorithm by ${\rm DHS}_t$. The first step of ${\rm DHS}_t$ is to choose one good potential facility location $f \in F$ for each $c \in C_t$, without choosing too many vertices in total. For each such $c$, let $S_c$ be the set consisting of $c$ plus all the good potential facility locations for $c$. By the definition of $t$-good and the fact that $C \subseteq F$, we must have $|S_c| \geq t+1 \geq 1$. We want to choose a small set $X \subseteq F$ such that $X\cap S_c\ne \emptyset$ for all $t$-good customers $c$. In other words, we want an $X$ that hits (intersects) all such sets $S_c$. We again settle for using the standard greedy {\sc Hitting Set} heuristic, as described in the previous section, to construct a hitting set $X$ that we hope is near-optimal. Next, let $C' \subseteq C_t$ be the set of $t$-good customers $c$ covered by $X$, either because $c \in X$ or because there exist distinct $f_1,f_2 \in X$ such that $c$ is covered by $\{f_1,f_2\}$ in a setwise disjoint fashion. Let $C'' = C_t - C'$, and for each customer $c \in C''$, choose a facility $f_c \in X\cap S_c$. Note that by definition of $C''$, we have $f_c \neq c$, so let $x_c \in N(c)$ be the unique neighbor of $c$ which is on all shortest paths from $c$ to $f_c$. This choice is unique since the fact that $f_c \in S_c$ implies that $f_c$ is good for $c$, and so by definition all shortest paths from $c$ to $f$ leave $c$ through the same arc. Our second {\sc Hitting Set} problem is designed so any solution $Y$ will, for each $c \in C''$, contain either $c$ itself or a facility $f \neq c$ such that $\{f_c,f\}$ covers $c$ is a setwise disjoint fashion. The {\sc Hitting Set} problem is specified as follows. For each such customer vertex $c$, let $F_c$ consist of $c$ together with all potential facility locations $f\ne c$ for which all shortest $c\rightarrow f$ paths {\em avoid} $x_c$. Note that by Lemma \ref{neighborlemm}, for any $f \in F_c$ (other than $c$ itself), the fact that $f_c$ is good for $c$ and the definition of $x_c$ together imply that the pair $\{f_c,f\}$ will cover $c$ in the desired fashion. We thus have the following: \begin{lemma} \label{XcupY} Given $X$ as above, any hitting set $Y$ of $\{F_c: c \in C''\}$ is such that $X\cup Y$ is a cover of $C' \cup C'' = C_t$, the set of all the $t$-good customer nodes $c$. \end{lemma} \noindent For the DHS algorithm, we also use the greedy {\sc Hitting Set} heuristic to produce $Y$. At this point, we have a cover $X \cup Y$ for $C_t$. DHS concludes by using \textsc{Greedy}\ to extend $X \cup Y$ to a minimalized cover $F'$ of {\em all} the customers, not just the $t$-good ones. As with {\textsc{SHS}}, we will consider multi-iteration schemes, \textsc{DHS}$_t(k)$, where we perform $k$ independent runs of \textsc{DHS}$_t$, in each using randomized tie breaking when running the greedy {\sc Hitting Set} heuristic and the final \textsc{Greedy}\ completion phase, and alternating between reverse and random deletes in the minimalization phases. Our main experiments will be with \textsc{DHS}$_1(400)$ and \textsc{DHS}$_{\lfloor|F|/2\rfloor}(400)$, which seemed somewhat complementary in our initial, single-iteration trials. For simplicity, we shall denote the latter by {\textsc{DHS}}$_H$(400) (with the $H$ standing for ``half''). As with SHS$(k)$, since DHS$_t(k)$ is a polynomial-time algorithm, Theorem \ref{equivalence} implies that we cannot prove a polylogarithmic worst-case performance guarantee for it unless a widely-believed conjecture is false. However, in practice the initial hitting set $X$ is typically quite small, and often there are very few $t$-bad customers, so meaningful bounds are possible. \begin{theorem} \label{DHSbound} Suppose $1 \leq t \leq |F|$, and let $I$ be an instance of SDFL with at least one $t$-good customer. Let ${\rm OPT}_t(I)$ be the size of an optimal cover for the set $C_t$ of all $t$-good customers in $I$. Suppose we run ${\rm DHS}_t$ one time on $I$, with $X(I)$ and $Y(I)$ being the two hitting sets generated, which, as observed constitute a cover of $C_t$. Then, \begin{eqnarray*} |X \cup Y| \leq (\ln(|C_t|)+1) \cdot{\rm OPT}_t(I) + |X| \end{eqnarray*} and, if $t \geq |F|/3$, we have \begin{eqnarray*} |X \cup Y| \leq (3.47\cdot\ln(|C_t|)+2)\cdot{\rm OPT}_t(I) \end{eqnarray*} \end{theorem} \noindent Note that, for our implementation choice of $t = \lfloor |F|/2 \rfloor$, we have $t \geq |F|/3$ as long as $|F| \geq 2$. Also, the cover of all of $C$ produced by ${\rm DHS}_t$ can exceed $|X \cup Y|$ by at most the number $|C-C_t|$ of $t$-bad customers, so if almost all customers are $t$-good, the above bounds will reflect the overall performance of DHS. Finally, note that the logarithm here is of $|C|$ or something less, whereas for the analogous Theorem \ref{SHSbound} for SHS, the logarithm was over the potentially much larger $|A|$, which might affect the relative constant factors in the bounds. \smallskip \begin{proof} Observe that the sets $F_c$ in our second hitting set constitute a subset of those used in our {\sc Hitting Set} lower bound, restricted to the $t$-good customers. Thus the optimal hitting set for this second problem cannot be larger than ${\rm OPT}_t(I)$. Thus, by the results of \citet{Johnson74} and \citet{Lovasz75} and the fact that, by assumption, $C_t \neq \emptyset$, we have $|Y| \leq (1 + \ln |C|)\cdot{\rm OPT}_t(I)$, and the first claim follows. We cannot apply the same argument to the set $X$, since in constructing $X$ we restricted attention to good potential facility locations, and ${\rm OPT}_t(I)$ need not be so restricted. However, a different argument applies when $t \geq |F|/3$, which will lead to the proof of our second claim. This choice of $t$ means that, for each $t$-good vertex $c$, $|S_c| \geq |F|/3$. Therefore, $\sum_{\mbox{$c$ is $t$-good}}|S_c| \geq |C_t|\cdot|F|/3$, and so some potential facility location $f$ must be in at least $|C_t|/3$ of the sets. Consequently, the greedy choice must hit at least that many sets. By essentially the same argument, it must hit at least $1/3$ of the remaining unhit sets each time it makes a choice, and so must have completed constructing its hitting set $X$ after $y$ steps, where $(2/3)^y|C| < 1$. This means it suffices that $y > \ln(|C|)/\ln(3/2) > 2.47\cdot\ln(|C|)$, and so $|X| \leq 1+ 2.47\cdot\ln(|C|)$. The second claim follows. \end{proof} \subsection{Triple Generation.}\label{section:triples} All our cover-generation algorithms require the generation of the set of relevant triples $(c,f_1,f_2)$ (set-disjoint or path-disjoint) used by our basic \textsc{Greedy}\ algorithm -- the algorithms other than \textsc{Greedy}, either use it as a core-subroutine (\textsc{Genetic}) or as a fallback device to complete the cover they generate (SHS and DHS). Because this set is potentially of size $\Theta(|C|\cdot|F|^2)$, and would naively take significantly more time than that to generate, its computation typically constitutes a significant portion of the running time. In this section we show how the time can be reduced to $O(|V|^3)$ for real-world networks, in both the set- and path-disjoint cases. In what follows, let $n = |V|$ and $m=|A|$. For real-world data networks, we may assume that $m \leq an$ for some relatively small constant $a$. We also have that $|C| \leq |F| \leq n$, although both $|C|$ and $|F|$ may be proportional to $n$. Let $T_S$ and $T_P$ be the sets of triples needed by \textsc{Greedy}\ in the set-disjoint and path-disjoint cases, respectively. Let us first consider the set-disjoint case. A triple $(c,f_1,f_2)$ is in $T_S$ if $c\in C$, $f_1,f_2 \in F$, and no shortest path from $c$ to $f_1$ shares any vertex other than $c$ with any shortest path from $c$ to $f_2$. The naive way to test this would be to construct, for each $f_i$, the set $S_i$ of vertices on shortest paths from $c$ to $f_i$, and then testing whether $S_i \cap S_j = \{c\}$. These sets could conceivably be of size proportional to $n$, yielding a running time that could be proportional to $|C||F|^2n$, so the best we can say about the running time of such an algorithm is that it is $O(n^4)$. However, recall from Lemma \ref{neighborlemm} that if $S_i \cap S_j$ contains some vertex other than $c$, then it contains a vertex that is a neighbor of $c$. Thus we may restrict attention to the sets $N_i$ of vertices adjacent to $c$ that are on shortest paths to $f_i$. To compute these sets we first construct a shortest path graph from $c$ to all other vertices in time $O(m\log n)$, and then for each facility $f$ we can compute $N_i$ by a single backward traversal of the shortest path graph from $f$ in time $O(m)$. For each pair $f_i,f_j$, the intersection test can then be performed in time $O(outdegree(c))$. Amortized over all customers in $C$, the tests for a given pair $f_i,f_j$ take time $O(\sum_{c \in C} outdegree(c)) = O(m)$. Thus we get an overall time bound of $O(m \log n|C| + m|F|\cdot|C| + m|F|^2) = O(n^2m)$ under our assumptions that $m$ is $O(n)$. For the pathwise-disjoint problem, the naive algorithm is even worse, since for a given triple $c,f_1,f_2$, there may be exponentially many paths of potential length $\Omega(n)$ to compare. Here we must be more clever. We first observe that we can actually reduce the test to a simple network flow problem, by augmenting the shortest path graph with a new vertex $t$, adding arcs from $f_1$ and $f_2$ to $t$, giving all vertices and edges capacity one, and asking whether there is a flow of value 2 from $c$ to $t$. Next, we observe that this is a particularly easy flow problem, which can be solved in linear time as follows. As before, for a given customer $c$ we begin by computing a shortest path graph from $c$ to all other vertices in time $O(m\log n)$, to be amortized over all facilities. Set the capacities of all the arcs to 1. We then add a new vertex $t$, and edges of length and capacity 1 from all facilities $f$ to $t$. To handle a given candidate pair $(f_1,f_2)$, we pick some shortest path from $c$ to $f_1$ using the shortest path graph, and extend it to a shortest path from $c$ to $t$ by adding the link from $f_1$ to $t$. This yields a flow of 1 from $c$ to $t$. Now add a link of capacity 1 from $f_2$ to $t$. There will be disjoint paths from $c$ to $f_1$ and to $f_2$ if and only if there is now a flow of value 2 from $c$ to $t$, and such a flow will exist if and only if there is an augmenting path from $c$ to $t$ in the graph. This can be found in linear time by a breadth-first search in the residual graph, where each arc $(u,v)$ in the first path is replaced by an arc $(v,u)$ of capacity 1. Finally, we observe that, having fixed the shortest path to $t$ through $f_1$ and constructed the residual graph, we can actually solve the network flow problems for all pairs $(f_1,f_i)$ in parallel with a single breadth-first search. The overall running time thus becomes $O(m\log n|C| + m|C|\cdot |F|) = O(n^2m)$, once again $O(n^3)$, assuming $m=O(n)$. What we have just described is actually the algorithm for generating path-disjoint triples when ``disjoint'' only means ``arc-disjoint.'' To get vertex-disjointness as well, one replaces each vertex $v$ in the shortest path graph be a directed arc $(v_{in},v_{out})$, with each arc $(u,v)$ replaced by $(u,v_{in})$ and each arc $(v,u)$ replaced by $(v_{out},u)$. In conclusion, we note that for our {\textsc{HSLB}} code in the set-disjoint case, some of the above work can be avoided, as the triples need only be considered implicitly. To construct the {\sc Hitting Set} linear program, we need only consider (customer,neighbor,facility) triples, at an amortized cost (and space) of $O(mn)$. The same triples can be reused in the verification phase, where we check to see whether the computed hitting set is actually a feasible solution to our problem, although now $O(mn^2)$ time may be required. \section{Network Test Instances}\label{sec:instances}\label{section:instances} We tested our algorithms on both synthetic and real-world instances. The two classes modeled different types of data networks and had distinctly different characteristics, enabling us to test the generality of our methods. In this section we will discuss each type of instance, and compare the structural properties of the resulting testbeds. \subsection{Synthetic LAN/WAN Instances} Our 630 synthetic instances were designed to reflect the structure of large real-world local- and wide-area networks (LAN's and WAN's) and were sized so that we could study the scalability of our algorithms and the solutions they produce. They were generated via the following four-step process. \begin{enumerate} \item A transit-stub skeleton graph is generated using the Georgia Tech Internetwork Topology Models (GT-ITM) package \citep{CalDoaZeg97a}. We generated 10 graphs each for parameter settings that yielded $|V|=50,100,190,220,250,300,558,984$. (The value of $|V|$ is an indirect result of one's choice of the allowed input parameters.) \item Traffic demand is generated between all pairs of vertices in the skeleton graph using a gravitational model (described below) with the shortest path metric. \item Using the heuristic network design algorithms of \citet{BurResTho07a}, we determine link capacities and OSPF link weights such that all traffic can be routed in a balanced way on the resulting network using the OSPF routing protocol. Links with no traffic routed on them are deleted. \item For specified numbers of customer and facility location vertices, the sets $C \subseteq F$ are then randomly generated. \end{enumerate} \vspace{-.1in} \subsubsection{Step 1: Transit-Stub Skeleton Graphs} Transit-stub graphs \citep{CalDoaZeg97a} are hierarchical graphs made up of transit vertex components and stub vertex components. The stub node components can be thought of as access networks while the transit node components make up the backbone of the network. See Figure \ref{transitstubfig}. \begin{figure}[t] \begin{center} \includegraphics[width=11cm,angle=0]{transit-stub-network.eps} \caption{A transit stub network with three transit domains and 16 stub domains.} \label{transitstubfig} \end{center} \vspace{-.25in} \end{figure} The GT-ITM package provides a parameterized method for generating such graphs randomly. The relevant papers and documentation are not totally consistent or clear, and the following description is our best guess as to how the networks are generated, based on the documentation and our inspection of selected outputs. The generation process involves many parameters, but we based our generation on the settings used in the illustrative example identified as ``file t100'' in the documentation file {\tt itm-doc.txt} available at {\tt http://www.cc.gatech.edu/projects/gtitm/}, varying only the parameters $T$, $N_T$, $S$, $N_S$, which determine the number of vertices. The constructed graphs consist of $T\cdot N_T$ {\em transit vertices} assigned randomly to $T$ {\em transit domains}. It is required that $T \geq 1$ and $N_T \geq 1$, and it appears that the generation process starts by placing one transit vertex in each transit domain so that none are empty. In addition, there are $S\cdot T\cdot N_T$ {\em stub domains} (one per transit vertex), and $S\cdot N_S\cdot T\cdot N_T$ {\em stub vertices}, assigned randomly to the stub domains. Again we must have $S \geq 1$ and $N_S \geq 1$ and it appears that the generation process starts by placing one stub vertex in each stub domain. Edges appear to be added to the network as follows: \begin{itemize} \item Each transit domain has an average of two edges to other transit domains. The endpoints of each edge are chosen randomly from the transit vertices in the two relevant domains. If the chosen edges fail to connect all the transit domains, they are deleted and the experiment is repeated until we do get a connected graph. \item Each transit vertex is associated with an average of $S$ stub domains, and is connected by an edge to a random member of each. \item For each pair of vertices within a given transit domain, there is an edge with probability 0.6. If the resulting choices do not make the transit domain connected, the experiment is repeated until a connected graph results. \item For each pair of vertices within a given stub domain, there is an edge with probability 0.42. If the resulting choices do not make the stub domain connected, the experiment is repeated until a connected graph results. \end{itemize} In addition, it appears that the code also adds a few extra edges between stub domains or between stub and transit domains, even though there should be none, given the parameter settings we chose. We deduce this from the fact that, although the above description suggests that the largest 2-connected component should be of size $\max(N_S,T \cdot N_T)$, we typically find that the largest is two to four times larger than that. Note that this process yields a network with a fixed number $T\cdot N_T\cdot S\cdot N_S$ vertices, but a random number of edges (some of which may be deleted in Step 3). Before proceeding to Step 2, we convert our graphs to symmetric directed graphs by replacing each edge $\{u,v\}$ with the two arcs $(u,v)$ and $(v,u)$. Since the original graph was designed to be connected, this directed graph will be strongly connected. The GT-ITM generator also produces a length $l(u,v)$ for each edge $\{u,v\}$, which we use in Step 3. We generated 10 graphs each for eight different sets of parameters, chosen to yield increasing values for $|V|$, from 50 to 984. In Table \ref{transitstubparams} we present the parameter choices, the corresponding values for $|V|$, the final average number of edges and the average degree (when the graph is viewed as undirected). The graphs for instances with up to 558 vertices were generated by the 32-bit version of the GT-ITM code, which is apparently no longer supported. Those for the graphs with 984 vertices, which we generated more recently, were produced using the new, 64-bit code from Version 2.33 on {\tt ns-allinone}, downloaded from {\tt http://sourceforge.net/projects/nsnam/files/}. This version apparently has a bug, in that the parameters for the edge densities seem to be ignored. Consequently, both the stub and transit domains start out as complete graphs, and their sparsification, while still substantial, is due only to the Step 3 deletions, described in more detail below. \begin{table} \begin{center} { \begin{tabular}{rrrr|rrc} $T$ & $N_T$ & $S$ & $N_S$ & $|V|$ & Average $|A|$ & Average Degree\\ \hline 1 & 2 & 3 & 8 & 50 & 91.1 & 3.6\\ 1 & 4 & 3 & 8 & 100 & 177.2 & 3.5\\ 2 & 5 & 3 & 6 & 190 & 284.6 & 3.0\\ 2 & 5 & 3 & 7 & 220 & 360.2 & 3.3\\ 2 & 5 & 3 & 8 & 250 & 435.0 & 3.5\\ 2 & 6 & 3 & 8 & 300 & 531.6 & 3.5\\ 3 & 6 & 3 & 8 & 558 & 1172.2 & 4.2\\ 4 & 6 & 4 & 10 & 984 & 2083.3 & 4.2\\ \hline \end{tabular} } \caption{Input parameters and measured statistics of generated transit-stub graphs.}\label{transitstubparams} \end{center} \vspace{-.25in} \end{table} \subsubsection{Step 2: Traffic Demands} The traffic demands are created via a randomized ``gravitational'' method from \citet{FT00}. We first generate random numbers $o(v)$ and $d(v)$ for each vertex $v$, chosen independently and uniformly from the interval $(0,1)$. Here ``o'' stands for origin and ``d'' for destination. Then, for each pair of vertices $(u,v)$, we compute $D(u,v)$, the shortest length (in edges) of a path from $u$ to $v$. Let $Dmax$ be the largest of these distances. Then, for any ordered pair $(u,v)$ of distinct vertices, we choose a random number $r(u,v) \in (0,1)$ and set the traffic demand from $u$ to $v$ to be the (nonzero) value $$ {\Large t(u,v) = r(u,v) \cdot o(u) \cdot d(v)\cdot e^{ -\left(\frac{D(u,v)}{Dmax}\right)}}. $$ Note that, since our network is strongly connected, all of this traffic can be routed. \subsubsection{Step 3: OSPF Weights} We next choose OSPF weights and arc capacities that allow for an efficient routing of the traffic specified by the matrix $t(u,v): u,v \in V$. To do this, we apply the survivable IP network design tool developed by Buriol et al.\ and described in \citet{BurResTho07a}. The tool takes as input the skeleton graph and the traffic matrix. For each arc $(u,v)$, it produces an integer OSPF weight $w(u,v)$ in the range $[0,30]$ and an integer capacity $c(u,v)$, such that if all traffic is routed according to the OSPF weights (with traffic splitting if there is more than one shortest path from one vertex to another), no arc will have traffic load exceeding 85\% of its capacity. It does this while attempting to minimize the total arc cost $\sum_{(u,v)}l(u,v)\cdot c(u,v)$. The optimization is done via a biased random key genetic algorithm, using a population of size $50$ and run for $100$ iterations. For more details, see \citet{BurResTho07a}. (Running with larger parameter values would likely produce lower-cost solutions to the design problem, but this would increase the running time, and our goal here is simply to produce a sparse network on which the demand can be routed.) Note that any arc $(u,v)$ that is on some shortest path for the computed OSPF weights must have nonzero capacity $c(u,v)$, since every traffic demand is nonzero and so every shortest path must carry {\em some} traffic. We thus can safely delete all capacity-zero arcs from our network. In order to insure that our graphs remain symmetric, however, we only delete a capacity-zero arc $(a,b)$ if its partner $(b,a)$ also has zero capacity. \subsubsection{Step 4: Customers and Potential Facility Locations} For each of our 80 skeleton graphs (10 for each of our eight sets of GT-ITB parameters), we generated seven networks, differing in their choices of the sets $C \subseteq F$ of customer vertices and potential facility locations. The sizes were chosen as a set of fixed fractions of the set $V$ of vertices. Let (C$x$,F$y$) denote the set of instances with $|C| = \lceil|V|/x\rceil$ and $|F| = \lceil|V|/y\rceil$. For each graph we generated one instance of each of the following types: (C1,F1), (C2,F2), (C4,F4), (C8,F8), (C2,F1), (C4,F1), and (C8,F1). Our synthetic instance testbed thus contains a total of 560 instances, each consisting of a network, OSPF weights (which determine the shortest paths in the network), and the sets $C$ and $F$. The full set of synthetic instances is available from author Resende, and currently can be downloaded from {\tt http://mauricio.resende.info/covering-by-pairs/}. \subsection{Real-World ISP-Based Instances} The 70 real-world instances in our testbed were derived from 10 proprietary Tier-1 Internet Service Provider (ISP) backbone networks and many of them use actual OSPF weights. Because of the proprietary nature of these instances, we will not be able to report as detailed and precise results about them, although we shall summarize how they differ from the synthetic instances, and how the results for the two instance types compare. The ISP networks ranged in size from a little more than 100 routers to over 5000, each with between $3.5|V|$ and $4.3|V|$ arcs (similar to the range for our synthetic instances). We shall denote them by R100a, R100b, R200, R300, R500, R700, R1000, R1400, R3000, and R5500. where the number following the R is the number of routers, rounded to the nearest multiple of 100. (Because of their proprietary nature, these instances cannot be made publicly available.) For four of these instances (R100a, R100b, R200, and R500), we only had information about the topology, but not the roles of the routers. For each of these, we constructed 16 instances, starting with the case in which $F = C = V$. The other instances also had $F = V$, but $C$ was a random sample of roughly $1/2$, $1/4$, or $1/8$ of the vertices. Following our notation for the synthetic instances, we shall refer to these four alternatives as (C1,F1), (C2,F1), (C4,F1), and (C8,F1). In addition to the single (C1,F1) instance, we generated five distinct instances for each alternative from {(C1,F2), (C1,F4), (C1,F8)}, based on different random choices for $C$. For R300, R700, R1000, R1400, R3000, and R5500, where we have more detailed information about the roles played by the routers, we constructed instances, one for each topology, that took those roles into account: routers most closely associated with network customers (access routers, etc.) served as the customer vertices, and the potential facility locations consisted of these, together with all other routers except for the backbone routers (thus modeling the likely situation in which extra monitoring equipment cannot easily be connected to the backbone routers). The number of customer vertices ranged between 50\% and 93\% of the total, and the number of vertices that were neither customers nor potential facility locations ranged from 2\% to 41\%. \vspace{-.1in} \subsection{Instance Properties}\label{propertysect} \begin{table} \begin{center} {\Large Synthetic Instances} \bigskip { \begin{tabular}{c|r @{\ \ \ \ \ \ \ \ } r @{\ \ \ \ \ } r @{\ \ \ \ \ } r @{\ \ \ \ \ } r @{\ \ \ \ \ } r @{\ \ \ } r @{\ \ } r} Class & 50 & 100 & 190 & 220 & 250 & 300 & 558 & 984\\ \hline \multicolumn{9}{c}{\rule[-.2cm]{0cm}{.6cm}Average Number of Triples in Thousands}\\ \hline C1,F1 & \ 12.9 & 57.8 & \ \ 224.8 & \ \ \ \ \ 340 & \ \ \ \ \ 487 & \ \ \ \ \ 756 & \ \ \ \ \ 4,111 & 19,954 \\ C2,F1 & 6.3 & 31.6 & 119.1 & 184 & 270 & 375 & 2,070 & 10,310 \\ C4,F1 & 3.5 & 15.1 & 53.6 & 92 & 118 & 185 & 1,131 & 5,136 \\ C8,F1 & 2.2 & 7.5 & 28.0 & 44 & 59 & 98 & 471 & 2,642 \\ C2,F2 & 1.5 & 6.7 & 26.9 & 43 & 55 & 93 & 507 & 2,438 \\ C4,F4 & 0.2 & 0.8 & 3.7 & 5 & 8 & 11 & 62 & 299 \\ C8,F8 & 0.0 & 0.1 & 0.4 & 1 & 1 & 1 & 6 & 38 \\ \hline \multicolumn{9}{c}{\rule[-.2cm]{0cm}{.6cm}Average Percent of Total Number of Possible Triples for Set-Disjoint Instances}\\ \hline C1,F1 & 21.99 & 11.92 & 6.66 & 6.48 & 6.30 & 5.65 & 4.76 & 4.20\\ C2,F1 & 21.58 & 13.02 & 7.05 & 7.00 & 6.98 & 5.61 & 4.79 & 4.34\\ C4,F1 & 22.63 & 12.43 & 6.28 & 7.00 & 6.09 & 5.53 & 5.22 & 4.33\\ C8,F1 & 26.42 & 11.92 & 6.56 & 6.60 & 5.94 & 5.80 & 4.35 & 4.45\\ C2,F2 & 22.03 & 11.33 & 6.48 & 6.59 & 5.73 & 5.65 & 4.72 & 4.12\\ C4,F4 & 22.03 & 11.30 & 7.20 & 6.50 & 6.82 & 5.63 & 4.64 & 4.07\\ C8,F8 & 20.10 & 15.23 & 7.32 & 7.24 & 6.99 & 5.57 & 3.83 & 4.16\\ \hline \multicolumn{9}{c}{\rule[-.2cm]{0cm}{.6cm}Average Percent Increase in Triples for Pathwise-Disjoint Instances over Set-Disjoint Ones}\\ \hline C1,F1 & 6.94 & 6.63 & 4.96 & 5.99 & 3.88 & 6.68 & 4.29 & 5.60\\ C2,F1 & 6.93 & 5.73 & 5.05 & 5.10 & 3.25 & 5.99 & 4.09 & 4.90\\ C4,F1 & 7.52 & 5.00 & 4.87 & 6.08 & 3.84 & 7.10 & 2.74 & 4.23\\ C8,F1 & 5.28 & 5.14 & 3.93 & 7.49 & 3.26 & 5.60 & 3.37 & 2.88\\ C2,F2 & 8.32 & 7.13 & 4.36 & 5.37 & 4.50 & 6.62 & 3.88 & 6.15\\ C4,F4 & 3.99 & 6.98 & 7.46 & 8.12 & 4.38 & 7.60 & 4.97 & 5.69\\ C8,F8 & 17.71 & 7.20 & 5.61 & 3.32 & 1.98 & 5.63 & 4.62 & 6.15\\ \hline \multicolumn{9}{c}{\rule[-.2cm]{0cm}{.6cm}Average Percent Decrease in Triples for Equal-Weights on Set-Disjoint Instances} \\ \hline C1,F1 & 16.88 & 13.82 & 9.07 & 10.29 & 14.53 & 13.22 & 26.89 & 31.13\\ C2,F1 & 15.48 & 15.12 & 10.48 & 9.67 & 15.11 & 14.89 & 26.68 & 31.09\\ C4,F1 & 11.82 & 15.48 & 9.52 & 9.79 & 14.50 & 14.44 & 30.95 & 32.24\\ C8,F1 & 22.68 & 8.64 & 4.92 & 7.68 & 12.71 & 15.31 & 24.03 & 39.41\\ C2,F2 & 13.81 & 14.27 & 4.47 & 13.34 & 11.84 & 11.26 & 26.48 & 30.23\\ C4,F4 & 13.60 & 13.03 & 7.46 & 9.13 & 20.49 & 3.16 & 24.81 & 32.15\\ C8,F8 & 8.35 & 17.06 & 5.55 & 19.95 & 14.86 & 4.89 & 27.72 & 37.81\\ \hline \end{tabular} } \caption{Average numbers of triples for synthetic instances, the percentage of all possible triples that these numbers represent, the percentage increases in numbers of triples for the arc-disjoint version of pathwise-disjoint instances, and for equal-weight instances.}\label{tripletab} \end{center} \vspace{-.25in} \end{table} \vspace{-.1in} \begin{table} \begin{center} {\Large ISP-Based Instances} \bigskip { \begin{tabular}{c|r @{\ \ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ \ \ } r @{\ \ \ \ } r @{\ \ \ \ } r @{\ \ \ \ } r} Class & R100a & R100b & R200 & R300 & R500 & R700 & R1000 & R1400 & R3000 & R5500\\ \hline \multicolumn{11}{c}{\rule[-.2cm]{0cm}{.6cm}Average Number of Triples in Millions}\\ \hline C1,F1 & 0.22 & 0.36 & 1.44 & 2.47 & 17.7 & 22.0 & 47.2 & 134 & 816 & 8,156\\ C2,F1 & 0.11 & 0.18 & 0.73 & & 8.8 & & & & & \\ C4,F1 & 0.06 & 0.09 & 0.36 & & 4.4 & & & & & \\ C8,F1 & 0.03 & 0.05 & 0.20 & & 2.2 & & & & & \\ \hline \multicolumn{11}{c}{\rule[-.2cm]{0cm}{.6cm}Average Percent of Total Number of Possible Triples for Set-Disjoint Instances}\\ \hline C1,F1 & 30.0 & 34.0 & 25.6 & 33.7 & 23.9 & 28.2 & 11.2 & 21.4 & 35.0 & 20.9\\ C2,F1 & 30.1 & 34.1 & 26.0 & & 23.5 & & & & & \\ C4,F1 & 32.2 & 32.7 & 25.4 & & 23.6 & & & & & \\ C8,F1 & 26.5 & 32.8 & 27.0 & & 23.3 & & & & & \\ \hline \multicolumn{11}{c}{\rule[-.2cm]{0cm}{.6cm}Average Percent Increase in Triples for Pathwise-Disjoint Instances over Set-Disjoint Ones}\\ \hline C1,F1 & 43.6 & 40.1 & 59.8 & 41.7 & 24.9 & 31.0 & 335.6 & 33.0 & 25.9 & 101.7\\ C2,F1 & 42.6 & 39.9 & 58.4 & & 25.3 & & & & & \\ C4,F1 & 41.2 & 46.5 & 63.6 & & 26.7 & & & & & \\ C8,F1 & 53.4 & 36.2 & 56.8 & & 24.0 & & & & & \\ \hline \multicolumn{11}{c}{\rule[-.2cm]{0cm}{.6cm}Average Percent Increase in Triples for Arc-Disjoint over Vertex Disjoint Instances} \\ \hline C1,F1 & 1.0 & 1.3 & 1.3 & .1 & 1.8 & .3 & 15.5 & 1.5 & .1 & 5.5\\ C2,F1 & 1.0 & 1.3 & 1.2 & & 1.9 & & & & & \\ C4,F1 & 1.4 & 1.4 & 1.3 & & 1.9 & & & & & \\ C8,F1 & .8 & 1.6 & 1.3 & & .9 & & & & & \\ \hline \multicolumn{11}{c}{\rule[-.2cm]{0cm}{.6cm}Average Percent of Total Number of Possible Triples for Arc-Path-Disjoint Instances} \\ \hline C1,F1 & 43.5 & 48.3 & 41.5 & 47.8 & 30.3 & 37.0 & 56.4 & 28.9 & 44.1 & 44.3\\ C2,F1 & 43.2 & 48.3 & 41.6 & & 30.0 & & & & & \\ C4,F1 & 46.2 & 48.4 & 42.0 & & 30.4 & & & & & \\ C8,F1 & 40.9 & 45.1 & 42.4 & & 29.2 & & & & & \\ \hline \multicolumn{11}{c}{\rule[-.2cm]{0cm}{.6cm}Average Percent Decrease in Triples for Equal-Weights on Set-Disjoint Instances} \\ \hline C1,F1 & 27.4 & 16.0 & 4.9 & 29.4 & 14.2 & 31.6 & -16.7 & 15.7 & 55.1 & 58.7\\ C2,F1 & 26.1 & 16.1 & 5.7 & & 13.7 & & & & & \\ C4,F1 & 27.0 & 11.9 & 6.3 & & 12.0 & & & & & \\ C8,F1 & 30.1 & 15.3 & 2.7 & & 16.9 & & & & & \\ \hline \end{tabular} } \caption{Average percentage of valid triples for real world instances, the percentage increases in numbers of triples for the vertex- and arc-disjoint versions of pathwise-disjoint instances, and for equal-weight instances.}\label{realtripletab} \end{center} \vspace{-.25in} \end{table} Our synthetic instances reflect the structure of real-world LANs and WANs, and consist of many, relatively small 2-connected components. For instances with $|V| \geq 100$, the largest 2-connected component averages around 12\% of the vertices (112.5 vertices for our $|V| = 984$ instances). In contrast, the Tier-1 ISP instances consist of one very large 2-connected component, typically containing over 95\% of the vertices, with small 2-connected components hanging off of it, most of size 2. One consequence of this difference in topology is differing numbers of triples for the two instance types. For a given instance with a set $C$ of customers and a set $F$ of potential facility locations, there are $|C|(|F|-1)(|F|-2)/2$ potential triples $(c,f,f')$, each one containing a customer $c$ and two distinct potential facility locations in $F - \{c\}$, For our synthetic instances, only a relatively small fraction of the potential triples are actually valid triples in the set-disjoint case. For each class $(Cx,Fy)$, the percentage of valid triples declines as $N$ increases, until it appears to stabilize somewhere between 4 and 5\%. See Table \ref{tripletab}. The ISP-based instances yield substantially more set-disjoint triples than the synthetic ones of the same size. The percentage of valid triples, with one exception, ranges between 20 and 35\%, the exception being R1000, which at 11.2\% still has more than double the percentage for the large synthetic instances. See Table \ref{realtripletab}. Note that, because of the cubic growth in the number of potential triples, the actual number of triples can become quite large. For our largest instance, R5500, there are over 5.7 billion valid triples in the set-disjoint case. The choice of arc weights also has an effect on our instances. The optimized weights of our synthetic instances lead to relatively few shortest-path ties, and so the pathwise-disjoint instances typically have only 5 to 6\% more valid triples than the corresponding set-disjoint instances. The weights in the ISP instances were constrained by a variety of factors in addition to traffic balancing, and yield far more ties. As a result, the number of path-disjoint triples exceeds the number of set-disjoint triples by from 24 to 335\%, that latter occurring for our anomalous R1000 instance. We also measured the effect of relaxing the constraints on our path-disjoint instances, by requiring only that the paths be arc-disjoint, not vertex-disjoint. (Although vertex-disjointness adds robustness to our monitoring application, the mathematics of the application only requires arc-disjointness.) Here the effects were relatively minor for both synthetic and ISP-based instances, typically yielding an increase of less than 2\% in the number of valid triples. Instance R1000 was again the major exception, yielding an increase of 15\%, to reach a total of more than half the maximum possible number of triples. Finally, to further explore the effect of increased numbers of ties in the set-disjoint case, we considered modified versions of our instances in which all edge weights are set to 1. As might be expected for such weights, the number of set-disjoint triples drops substantially for almost all our synthetic and ISP-based instances (the only exception again being instance R1000, where we saw a 16\% increase). \section{Summary of Experimental Results}\label{section:experiments} This section evaluates our algorithms in three areas. \begin{itemize} \item \textsl{Accuracy:} How close to optimal were the solutions provided by each of heuristics \textsc{Greedy}, \textsc{SHS}, and \textsc{DHS}? For small instances, for which the exact optimal can be computed using CPLEX, we can compare to that. In the set-disjoint case, where the SHS lower bound turns out to equal the optimal solution value for all our instances, we can compare to that. For large path-disjoint instances, we have no good, readily computable, standard of comparison. We consider two options. First, we can compare the results to the (very weak but linear-time computable) lower bound obtained when one requires only that paths be vertex-disjoint, not that they be shortest paths. Second, we can evaluate the results for {\textsc{Greedy(400)}} in the path-disjoint case by attempting to improve on them using the {\textsc{Genetic}} algorithm and large amounts of computing time. (Note that {\textsc{Genetic}} optimally solved all the bad set-disjoint instances for {\textsc{Greedy(400)}} except R5500, which is too big for {\textsc{Genetic}} to handle.) \item \textsl{Running Time:} How fast are the algorithms? Our runs were primarily performed on two machines. We used a shared-memory machine with 1.5 Ghz Itanium processors and 256 Gigabytes of random access memory, running SUSE LINUX with kernel 2.6.16.60-0.81.2-default, for the heuristic runs on instance R5500, where 80 gigabytes of main memory were needed because of the large number of triples involved. In the text this will be referred to as ``our slower machine.'' The remainder of our runs were primarily performed on a shared memory multiprocessor machine with 24 2.7 Ghz Intel Xeon processors and 50 gigabytes of random access memory running CentOS Linux with kernel 2.6.32-431.11.2.el6.x86\_64Linux. In addition, we did some confirmatory runs on a MacBook Pro with a 2.4 Ghz dual-core Intel i5 processor and 8 gigabytes of RAM, running Mac OS 10.9.4 and with Gurobi 5.6 replacing CPLEX 11.0 as our MIP solver. When we report running times, we give the sum of the system and user times, and will specify the machine used. (Each algorithm run used only a single processor/core of the respective machine.) All our codes were written in {\tt C}, and compiled using {\tt gcc}. Unless otherwise stated, the running times reported are for code compiled with no optimization flags set, although, where appropriate, we will indicate the kinds of speedups one might expect using {\tt -O2} optimization. It should be noted that on none of these machines were running times precisely repeatable. Differences of 20\% or more on duplicate runs were not uncommon. So our running time reports can provide only order-of-magnitude information, and can provide evidence that one code is faster than another only if the differences are substantial and hold across many instances. \item \textsl{Solution Quality (Cost Reduction):} The first two measures address the quality of the algorithms. This one addresses what they tell us about the applications. Even an optimal solution will not be enough to justify the proposed monitoring scheme of \citet{GBDS-loss08} if it does not provide a significant savings over the default solution that simply takes {\em all} customers (which for our instances are themselves facility locations) and is likely to yield more reliable measurements. We also consider how much further improvement is obtainable when going from the setwise-disjoint to the pathwise-disjoint versions of the problem. \end{itemize} \noindent \subsection{Accuracy and Running Times for the Set-Disjoint Case} \subsubsection{Optimization via our {\textsc{MIP}} Code} In Table \ref{lb+opttab}, we summarize the results for our {\textsc{MIP}} optimization code on our synthetic and ISP-based set-disjoint instances. All the runs were performed on the faster of our two Linux machines. The first section of the table reports how many of the 10 instances of each type that the code was able to solve within a 24-hour time bound. For $|V|=190$, we eventually solved the missing (C4,F1) and (C8,F1) instances, although the longest (C4,F1) took slightly more than a week. We also solved the two missing (C4,F1) instances based on the ISP-based R200 instances, although the longer took over a week. On the other hand, for 299 of the 383 solved instances, {\textsc{MIP}} took less than 10 minutes. The second section of the table reports the worst running times (system time plus user time, rounded to the nearest second) for the classes where all instances were solved in less than 24 hours. For our testbed in general, the code tends to run into trouble as the number of customers grows beyond 100, with exceptions when the number of facilities is also relatively small. Its running times seem clearly to be growing in exponential fashion as $|C|$ increases. Note also that, when all vertices are potential facility locations, reducing the number that are also customers can actually make the instance harder. \begin{table}[t] \begin{center} \bigskip { \begin{tabular}{c|r @{\ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ } r| r r r } \multicolumn{9}{c}{\rule[-.2cm]{0cm}{.6cm}\ \ \ \ \ \ \ \ Synthetic Instances} & \multicolumn{3}{c}{\rule[-.2cm]{0cm}{.6cm}ISP-Based Instances}\\ \hline Class & 50 & 100 & 190 & 220 & 250 & 300 & 558 & 984 & R100a & R100b & R200\\ \hline \multicolumn{12}{c}{\rule[-.2cm]{0cm}{.6cm}Number of Instances Solved in Less Than 24 Hours by MIP}\\ \hline (C1,F1) & 10 & 10 & 10 & - & - & - & - & - & 1 & 1 & 1\\ (C2,F1) & 10 & 10 & 10 & - & - & - & - & - & 5 & 5 & -\\ (C4,F1) & 10 & 10 & 9 & - & - & - & - & - & 5 & 5 & 3\\ (C8,F1) & 10 & 10 & 9 & - & - & - & - & - & 5 & 5 & 5\\ \cline{10-12} (C2,F2) & 10 & 10 & 10 & 10 & 10 & 10 & - & - \\ (C4,F4) & 10 & 10 & 10 & 10 & 10 & 10 & 10 & 10 \\ (C8,F8) & 10 & 10 & 10 & 10 & 10 & 10 & 10 & 10 \\ \hline \multicolumn{12}{c}{\rule[-.2cm]{0cm}{.6cm}Worst Running Times (in Seconds) for Instances of Each Completed Class} \\ \hline (C1,F1) & 20 & 89 & 23 & 518 & 42,998 & - & - & - & 0 & 0 & 74\\ (C2,F1) & 18 & 86 & 1725 & - & - & - & - & - & 8 & 1 & -\\ (C4,F1) & 36 & 46 & - & - & - & - & - & - & 291 & 92 & -\\ (C8,F1) & 35 & 803 & - & - & - & - & - & - & 13 & 46 & 33,789\\ \cline{10-12} (C2,F2) & 0 & 0 & 0 & 0 & 2 & 25 & - & - \\ (C4,F4) & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 227 \\ (C8,F8) & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline \end{tabular} } \caption{Results for our {\textsc{MIP}} optimization code on set-disjoint instances.}\label{lb+opttab} \end{center} \vspace{-.25in} \end{table} \subsubsection{Lower Bounds and Solutions via our {\textsc{HSLB}} Code} Fortunately, the exponential behavior of our {\textsc{MIP}} code is not an insurmountable difficulty in the case of our set-disjoint instances. Not only did our {\textsc{HSLB}} code produce a lower bound that equals the optimal solution value for all our instances, but the hitting set it constructed was always itself a feasible, and hence optimal, solution. Moreover, it took no more than 4 seconds on any instance besides R5500, for which it only took 22 seconds. In general, it was far faster than any of our heuristics, often by a factor of 100 or more, even though it involves finding an optimal solution to an instance of the NP-hard {\sc Hitting Set} problem. Moreover, its space requirements are much less than those of our heuristics, given that the (Customer,Neighbor,Facility) triples it stores are far fewer in number than the standard (Customer,Facility,Facility) triples required by the heuristics (and stored by them in triplicate). Consequently, for our largest instance, R5500, it required just over 1 GB, whereas the heuristics need in excess of 65 GB. \subsubsection{The Heuristics} As for the heuristics, the ``best-of-400-iterations'' versions of {\textsc{Greedy}}, {\textsc{SHS}}, and our two variants on {\textsc{DHS}} also all performed well in general on our test bed, although two instances were seriously problematical for at least one of them. The best solutions found (over 400 runs) by {\textsc{SHS}} were optimal for {\em all} the instances in our testbeds, both synthetic and ISP-based. {\textsc{DHS}}$_H$ and {\textsc{Greedy}} performed almost as well. The former failed to produce an optimal solution for just two instances, on one missing optimal by just a little, but on the other missing by a lot. Its solution for one of our (C1,F1), $|V|=984$ synthetic instances was one facility larger than optimal, but its solution for the ISP-based instance R1000 was off by more than a factor of 3. {\textsc{Greedy}} was optimal for all our synthetic instances and all but three of our ISP-based instances. It was slightly worse than {\textsc{DHS}}$_H$ on R1000 and off by a factor of 2.2 on R5500. Its third nonoptimality was minor, consisting of a solution with one more facility than optimal on the second of five (C2,F1) variants on R500. Although {\textsc{DHS}}$_1$ found optimal solutions on all our ISP-based instances, if failed to find optimal solutions for seventeen of our synthetic instances, six of the ones with $|V|=558$ and eleven of those with $|V|=984$, including the one that on which {\textsc{DHS}}$_H$ failed. On twelve it was off by one facility, on four it was off by two, and on one it was off by five. In no case, however, was the discrepancy greater than 3.3\%. The better of the {\textsc{DHS}}$_H$ and {\textsc{DHS}}$_1$ solutions was always optimal. To further compare these heuristics, let us consider two other metrics, running time and robustness, together with the tradeoffs between the two. See Table \ref{heurtimetab}, which reports the running times in seconds for the 400-iteration versions of our heuristics on our faster Linux machine (80-iteration runs on our slower machine for R5500, which would not fit on the faster machine), and on our MacBook Pro, where Gurobi 5.6 replaced CPLEX 11.0 as our MIP solver, and we compiled with the -O2 optimization flag set. The instances covered are the largest ones in our testbed for each ISP topology with $|V| \geq 200$, i.e., the single instance for each topology where we knew the OSPF weights and router types, and the (C1,F1) instance for each of the instances where we optimized the weights ourselves. For comparison purposes, the table also breaks out the time devoted to constructing the triples used by the heuristics, and the times for our code that computes the {\textsc{HSLB}} and determines whether the optimal hitting set produced is a feasible solution. Running times for our synthetic instances of similar sizes were roughly comparable. \begin{table} \begin{center} { \begin{tabular}{r|r @{\ \ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ \ \ } r @{\ \ \ \ } r r} Algorithm & R200 & R300 & R500 & R700 & R1000 & R1400 & R3000 & R5500*\\ \hline \multicolumn{9}{c}{\rule[-.2cm]{0cm}{.6cm}Linux Machine(s), Compiled without Optimization} \\ \hline \textsc{HSLB} & 0.1 & 0.1 & 0.4 & 0.3 & 1 & 1 & 4 & 23\\ Triple Gen & 0.1 & 0.2 & 0.9 & 1.4 & 4 & 8 & 41 & 1,800\\ \textsc{SHS} & 7.3 & 4.7 & 68.3 & 32.0 & 125 & 361 & 452 & 23,170\\ {\textsc{DHS}}$_1$ & 7.2 & 6.7 & 73.1 & 37.9 & 147 & 389 & 524 & 24,560\\ {\textsc{DHS}}$_H$ & 8.4 & 11.6 & 74.1 & 75.8 & 396 & 730 & 1165 & 47,970\\ \textsc{Greedy} & 11.8 & 14.0 & 116.1 & 90.9 & 578 & 704 & 2171 & 70,100\\ \hline \multicolumn{9}{c}{\rule[-.2cm]{0cm}{.6cm}MacBook Pro, Compiled with Optimization} \\ \hline \textsc{HSLB} & 0.8 & 0.7 & 1.2 & 1.0 & 1 & 1 & 3 & 14\\ Triple Gen & 0.5 & 0.5 & 0.8 & 0.9 & 2 & 3 & 14 & -\\ \textsc{SHS} & 2.2 & 2.0 & 17.7 & 8.8 & 32 & 94 & 179 & -\\ {\textsc{DHS}}$_1$ & 2.4 & 2.5 & 19.0 & 13.4 & 44 & 113 & 265 & -\\ {\textsc{DHS}}$_H$ & 2.5 & 2.3 & 19.8 & 14.9 & 130 & 119 & 277 & -\\ \textsc{Greedy} & 3.2 & 4.1 & 30.3 & 25.9 & 188 & 198 & 866 & -\\ \hline \end{tabular} } \caption{Running times in seconds for triple construction, full 400-iteration heuristic runs, and {\textsc{HSLB}} computations. *For R5500, all Linux times except those for {\textsc{HSLB}} are on the slower of our two machines, because of memory constraints, and for 80-iteration runs. Memory constraints also prevented running anything except {\textsc{HSLB}} on the MacBook Pro for this instance. }\label{heurtimetab} \end{center} \vspace{-.25in} \end{table} The algorithms are typically ranked {\textsc{SHS}}, {\textsc{DHS}}$_1$ {\textsc{DHS}}$_H$, {\textsc{Greedy}} in order of increased running times for both machines, although there are minor inversions for a few instances. The large differences typically seen between {\textsc{DHS}}$_H$ and {\textsc{DHS}}$_1$ on the Linux machines occurred for only one instance (R1000) on the MacBook Pro runs. The fact that, for the smaller instances, the running times for {\textsc{HSLB}} and triple generation are larger on the optimized MacBook Pro runs than on the unoptimized Linux runs is attributable to larger system times for triple construction. For the (facility,neighbor,facility) triples used in computing the {\textsc{HSLB}}, the system time was roughly 0.7 seconds, independent of instance size, on the MacBook Pro, versus 0.1 seconds on the Linux machine, and the differences in system times for computing standard triples in our heuristics were similar. It is interesting to note that, on both machines, the growth in user time for triple construction appears to be somewhat slower than our theoretical bound of $\Theta(|V|^3)$, although no firm conclusions can be drawn given the differences in topology and weight settings for the various instances. The general take-away from these results, however, is that all of our algorithms have running times that would be practical in the context of our proposed applications, where the time for implementing a solution will still greatly exceed the time to compute it. The main obstacle to practicality is the memory requirement for instances significantly larger than R3000, and in the final section of this paper, we will discuss ways that this memory requirement might be reduced while keeping running times practical. Of course, although running time is not a substantial obstacle, it is always worthwhile to find ways to reduce it (beyond simply using optimized compilation). One obvious way would be to perform fewer than 400 iterations. Conversely, we might want give up running time and perform more iterations, if that would improve the robustness of our results. How robust are they? \begin{table} \begin{center} { \begin{tabular}{r|c c c c c} k & $N=100$ & $N=200$ & $N=400$ & $N=800$ & N=1600\\ \hline 1 & 0.77855704 & 0.60615106 & 0.36741911 & 0.13499680 & 0.01822414\\ 2 & 0.60577044 & 0.36695782 & 0.13465804 & 0.01813279 & 0.00032880\\ 3 & 0.47103323 & 0.22187230 & 0.04922732 & 0.00242333 & 0.00000587\\ 4 & 0.36603234 & 0.13397967 & 0.01795055 & 0.00032222 & 0.00000010\\ 5 & 0.28425652 & 0.08080177 & 0.00652893 & 0.00004263 & 0.00000000\\ 6 & 0.22060891 & 0.04866829 & 0.00236860 & 0.00000561 & 0.00000000\\ 7 & 0.17110232 & 0.02927600 & 0.00085708 & 0.00000073 & 0.00000000\\ 8 & 0.13261956 & 0.01758795 & 0.00030934 & 0.00000010 & 0.00000000\\ 9 & 0.10272511 & 0.01055245 & 0.00011135 & 0.00000001 & 0.00000000\\ 10 & 0.07951729 & 0.00632300 & 0.00003998 & 0.00000000 & 0.00000000\\ 11 & 0.06151216 & 0.00378375 & 0.00001432 & 0.00000000 & 0.00000000\\ 12 & 0.04755251 & 0.00226124 & 0.00000511 & 0.00000000 & 0.00000000\\ 13 & 0.03673647 & 0.00134957 & 0.00000182 & 0.00000000 & 0.00000000\\ 14 & 0.02836164 & 0.00080438 & 0.00000065 & 0.00000000 & 0.00000000\\ 15 & 0.02188134 & 0.00047879 & 0.00000023 & 0.00000000 & 0.00000000\\ \hline \end{tabular} } \caption{Probabilities of failure to find an optimal solution in $N$ independent runs when the probability of finding one in 400 runs is $k/400$.}\label{probtab} \end{center} \vspace{-.25in} \end{table} In the context of our experiments, let $k$ be the number of runs (out of our 400) that returned an optimal solution. We can then roughly estimate the probability of finding an optimal solution to be $k/400$, and the probability that such a solution will {\em not} be found if we perform another $N$ independent random runs is $((400-k)/400)^N$. Table \ref{probtab} gives the probabilities for $1 \leq k \leq 15$ and $N \in \{100,200,400,800,1600\}$. Using this table, we can evaluate the likely robustness of our heuristics, based on the instances with the smallest values of $k$ for each heuristic. If we want to reduce the failure probability to, say, 0.1\%, we would need to have observed a 400-iteration success count of 2 if we were going to perform 1600 iterations, 4 if 800 iterations, 7 if 400 iterations, 14 if 200 iterations, and more than 15 if 100 iterations (actually, a count of 27 would suffice in this case). Given that the counts given by single 400-iteration runs give noisy estimate of the true probabilities, it might be safer to inflate the above count thresholds in practice. To help us evaluate our four heuristics in this context, Table \ref{heurprobtab} gives the 15 smallest success counts for each, both for synthetic and ISP-based instances, together with derived counts for an algorithm {\textsc{DHS}}$_{1H}$, which performs 200 runs each for {\textsc{DHS}}$_1$ and {\textsc{DHS}}$_H$, and reports the best solution overall. Note that the entries for the derived heuristic are not simply the averages of the values for {\textsc{DHS}}$_1$ and {\textsc{DHS}}$_H$ in the same row of the table, since these may represent counts for different instances -- the hardest instances for one of the two need not be the same as those for the other. The counts tend to be higher for our ISP-based instances, since 15 represents over 21\% of our 70 instances of that type, whereas it represents only about 2.4\% of our 630 synthetic instances, Based on the results in the table, one can conclude that {\textsc{Greedy}} is the most robust for synthetic instances, with {\textsc{SHS}} almost as good. Neither {\textsc{DHS}}$_1$ nor {\textsc{DHS}}$_H$ is competitive for such instances, and their combination seems less robust than {\textsc{DHS}}$_H$, the better of the two, by itself. In contrast, for the ISP-based instances, the combination is distinctly more robust than either {\textsc{DHS}}$_1$ or {\textsc{DHS}}$_H$ alone, and also dominates {\textsc{Greedy}}, although {\textsc{SHS}} is the best overall, having only one count less than 25. The corresponding instance, where {\textsc{SHS}} found an optimal solution only once in 400 times, is troublesome, however. This suggests performing a hybrid of \textsc{SHS}\ with one of our other three heuristics (again running each 200 times and taking the best). If we blend with {\textsc{Greedy}}, the smallest counts are 61 (synthetic) and 6.0 (ISP-based). Blending with {\textsc{DHS}}$_1$ yields corresponding smallest counts of 34.5 and 9.0, and blending with {\textsc{DHS}}$_H$ yields smallest counts of 46.5 and 10.0. The last choice would seem to be the best overall, as 400 total iterations should likely be enough to keep the failure rate for all instances less than 0.1\%, and, depending on the accuracy of our sample, 200 iterations might suffice. All three blends, however, offer significant improvements in robustness over any of the individual heuristics. \begin{table} \begin{center} { \begin{tabular}{r|r r r r r | r r r r r} & & \multicolumn{3}{c}{Synthetic Instances} & & & \multicolumn{3}{c}{ISP-Based Instances} & \\ k & {\textsc{SHS}} & {\textsc{DHS}}$_1$ & {\textsc{DHS}}$_H$ & {\textsc{DHS}}$_{1H}$ & {\textsc{Greedy}} & {\textsc{SHS}} & {\textsc{DHS}}$_1$ & {\textsc{DHS}}$_H$ & {\textsc{DHS}}$_{1H}$ & {\textsc{Greedy}}\\ \hline 1 & 10 & 0 & 0 & 0.0 & 10 & 1 & 3 & 0 & 6.0 & 0 \\ 2 & 10 & 0 & 1 & 0.5 & 19 & 25 & 5 & 0 & 8.5 & 0 \\ 3 & 13 & 0 & 1 & 1.0 & 29 & 27 & 13 & 0 & 20.5 & 7 \\ 4 & 16 & 0 & 4 & 2.0 & 50 & 35 & 23 & 1 & 21.0 & 7 \\ 5 & 39 & 0 & 4 & 2.0 & 52 & 68 & 37 & 7 & 26.5 & 29 \\ 6 & 47 & 0 & 5 & 2.5 & 55 & 73 & 40 & 14 & 40.0 & 29 \\ 7 & 52 & 0 & 5 & 3.5 & 61 & 80 & 44 & 28 & 41.5 & 33 \\ 8 & 57 & 0 & 6 & 3.5 & 65 & 91 & 49 & 30 & 44.0 & 35 \\ 9 & 57 & 0 & 8 & 4.0 & 66 & 95 & 60 & 44 & 58.0 & 53 \\ 10 & 60 & 0 & 8 & 4.0 & 69 & 101 & 67 & 46 & 68.0 & 57 \\ 11 & 64 & 0 & 9 & 4.5 & 72 & 110 & 68 & 56 & 79.5 & 66 \\ 12 & 65 & 0 & 9 & 5.0 & 74 & 136 & 80 & 68 & 87.0 & 85 \\ 13 & 65 & 0 & 10 & 5.0 & 79 & 139 & 100 & 93 & 100.0 & 88 \\ 14 & 67 & 0 & 10 & 5.5 & 80 & 163 & 107 & 100 & 100.0 & 89 \\ 15 & 67 & 0 & 10 & 6.0 & 82 & 177 & 123 & 100 & 103.5 & 98 \\ \hline \end{tabular} } \caption{The 15 smallest numbers of optimal solutions found in 400 independent runs over synthetic and ISP-based instances.}\label{heurprobtab} \end{center} \vspace{-.25in} \end{table} \subsubsection{Challenging {\textsc{HSLB}}: Equal-Weight Edges} Given the fact that our {\textsc{HSLB}} code produced hitting sets that were optimal solutions for all 700 instances in our set-disjoint testbed, one might wonder why we should bother with heuristics at all for the set-disjoint case. Perhaps instances where the {\textsc{HSLB}} is smaller than the optimal solution value, or where the {\textsc{HSLB}} solution is otherwise infeasible, simply do not arise in practice, possibly because of the restricted topologies in real world LAN/WANs and ISP networks, or because edge weights are optimized to manage traffic. To investigate the latter possibility, we repeated our experiments with all the weights set to 1 -- not a very good choice for traffic management, but certainly a natural default, and one that yields a major change in the set of triples in the derived ``cover-by-pairs'' instances, as observed in Section \ref{propertysect}. With these instances, the hitting sets constructed by our {\textsc{HSLB}} code were still legal and hence optimal covers for all 70 of our ISP-based instances. However, for 51 of our 630 synthetic instances, the hitting set was {\em not} a legal cover, and for nine of these, the best solution found by our 400-iteration {\textsc{SHS}} run was still larger than the computed lower bound (although never by more than 2.1\%). These were all (C1,F1) instances, five with $|V|=984$, three with $|V|=558$, and one with $|V|=250$, and in all nine cases, the {\textsc{HSLB}} nevertheless continued to equal the optimal solution size. The optimal solutions for these nine were {\em not} found by any of our 400-iteration runs of {\textsc{DHS}}$_1$ or {\textsc{DHS}}$_H$. As in our experiments on the synthetic instances in our standard testbed, it was {\textsc{Greedy}} that proved to have the best synergies with {\textsc{SHS}}. Our 400-iteration runs of {\textsc{Greedy}} found solutions matching the {\textsc{HSLB}} for three of the 984's, for one of the 558's, and for the 250. For the remaining four instances, we needed to increase the number of iterations. For the two remaining 984's, 4000-iteration runs of {\textsc{SHS}} found solutions matching the {\textsc{HSLB}}. For one of the two remaining 558's, a 100,000-iteration run of {\textsc{Greedy}} found a lower-bound matching solution (three times), although a 200,000-iteration of {\textsc{SHS}} failed to find any. For the other remaining 558, a 100,000-iteration run of {\textsc{SHS}} found a lower-bound matching solution (twice), although a 200,000-iteration run of {\textsc{Greedy}} failed to find any. Thus the track record of {\textsc{HSLB}} equaling the optimal solution value for ``real world'' WAN and ISP topologies remains unblemished. We can conclude, however, that we should not count on our {\textsc{HSLB}} code always producing feasible optimal solutions, and that {\textsc{SHS}} and {\textsc{Greedy}} remain valuable, and complementary, backups. \begin{table}[t] \begin{center} \bigskip { \begin{tabular}{c|r @{\ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ } r| r r r } \multicolumn{9}{c}{\rule[-.2cm]{0cm}{.6cm}\ \ \ \ \ \ \ \ Synthetic Instances} & \multicolumn{3}{c}{\rule[-.2cm]{0cm}{.6cm}ISP-Based Instances}\\ \hline Class & 50 & 100 & 190 & 220 & 250 & 300 & 558 & 984 & R100a & R100b & R200\\ \hline \multicolumn{12}{c}{\rule[-.2cm]{0cm}{.6cm}Number of Instances Solved in Less Than 24 Hours by MIP}\\ \hline (C1,F1) & 10 & 10 & 10 & 10 & 10 & - & - & - & 1 & 1 & 1\\ (C2,F1) & 10 & 10 & 9 & - & - & - & - & - & 5 & 5 & -\\ (C4,F1) & 10 & 10 & 8 & - & - & - & - & - & 5 & 5 & 3\\ (C8,F1) & 10 & 10 & 9 & - & - & - & - & - & 5 & 5 & 5\\ \cline{10-12} (C2,F2) & 10 & 10 & 10 & 10 & 10 & 10 & - & - \\ (C4,F4) & 10 & 10 & 10 & 10 & 10 & 10 & 10 & 10 \\ (C8,F8) & 10 & 10 & 10 & 10 & 10 & 10 & 10 & 10 \\ \hline \multicolumn{12}{c}{\rule[-.2cm]{0cm}{.6cm}Worst Running Times (in Seconds) for Instances of Each Completed Class} \\ \hline (C1,F1) & 21 & 9 & 28 & 19,362 & 78,660 & - & - & - & 0 & 0 & 146\\ (C2,F1) & 18 & 212 & - & - & - & - & - & - & 17 & 21 & -\\ (C4,F1) & 35 & 818 & - & - & - & - & - & - & 335 & 474 & -\\ (C8,F1) & 25 & 321 & - & - & - & - & - & - & 50 & 644 & 40,739\\ \cline{10-12} (C2,F2) & 0 & 0 & 0 & 0 & 4 & 13 & - & - \\ (C4,F4) & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 8106 \\ (C8,F8) & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline \end{tabular} } \caption{Results for our {\textsc{MIP}} optimization code on vertex-disjoint instances.}\label{table:pathopt} \end{center} \vspace{-.25in} \end{table} \subsection{Accuracy and Running Times for the Path-Disjoint Case} \subsubsection{Optimization via our {\textsc{MIP}} Code} Table \ref{table:pathopt} summarizes our optimization results for the path-disjoint versions of our instances, and shares the format of Table \ref{lb+opttab}, which covered the set-disjoint versions. Once again, we report the number of instances of each class that were solved to optimality within 24 hours, and, for each class with all instances so solved, the {\em worst} running time encountered, as a measure of robustness. Note that we were able to solve about the same number of instances of each type within the 24-hour time bound as we did in the set-disjoint case, with the worst time for each type usually slightly worse. In more extended runs, we were able to compute the optimum for the one missing $|V|=190$, (C2,F1) synthetic instance (in 39 hours), and the two missing R200, (C4,F1) ISP-based instances (in 49 and 77 hours). The results for the arc-disjoint versions of our path-disjoint instances were roughly the same as reported in Table \ref{table:pathopt} for the vertex-disjoint versions. We were able to solve essentially the same types of instances as before, with times that were roughly equivalent. The maximum for each class tended to be shorter for the arc-disjoint versions, although not so often as to provide a statistically meaningful distinction. \subsubsection{Heuristic Results} In evaluating heuristics for the path-disjoint case, we suffer from several limitations in comparison to the set-disjoint case. First, the {\textsc{HSLB}} no longer applies, so we do not have a good standard of comparison for those instances that our {\textsc{MIP}} approach failed to solve. Second, we cannot use {\textsc{SHS}} or either of our {\textsc{DHS}} heuristics, which only apply to the set-disjoint case, and so only have {\textsc{Greedy(400)}} and its higher-iteration variants (including \textsc{Genetic}) to consider. Fortunately, {\textsc{Greedy(400)}} seems to do very well. It finds the optimal solution on all the instances for which that is known, and we have not been able to find any better solutions than the ones it provides on any of the instances for which the optimum is {\em not} known. In particular, we performed 1000-generation runs of our {\textsc{Genetic}} algorithm, on each of these instances, and only matched the {\textsc{Greedy(400)}} solutions on each. We had hopes that, if anything could improve on {\textsc{Greedy(400)}}, then {\textsc{Genetic}} would, based on results we obtained in the set-disjoint case. There, {\textsc{Genetic}} found optimal solutions for two of the three ISP-based instances where {\textsc{Greedy(400)}} failed to find the optimum (the third was R5500, which was far too large for {\textsc{Genetic}} to handle). Most impressively, for R1000 it found the optimum, whereas the best of even 100,000 independent runs of {\textsc{Greedy}} was over three times optimal. The failure of {\textsc{Genetic}} to find any improvements in the path-disjoint case thus at least suggests that our {\textsc{Greedy(400)}} path-disjoint results might indeed be optimal across the board. The results were also robust. For each of our 720 synthetic instances, the best solution found was found at least 20 times out of 400. For each of our 69 ISP-based instances other than R5500, the best solution found was found at least four times out of 400 (and for all but one, the count was 34 or greater). The best solution for R5500 was found 72 times out of 80. For the arc-disjoint versions of our path-disjoint instances, the results were again similar, both as to accuracy and robustness. For every instance where the optimum was known, {\textsc{Greedy(400)}} found it. For all 720 synthetic instances, the best solution found was found at least 34 times out of 400. For all 69 ISP-bases instances other than R5500, the best solution found was found at least six times out of 400 (and for all but one, the count was 41 or greater). The instance with the low count was the same for both vertex- and arc-disjoint versions of the problem. The best solution for R5500 was found 79 times out of 80. \begin{table} \begin{center} { \begin{tabular}{r|r @{\ \ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ } r @{\ \ \ \ \ } r @{\ \ \ \ } r r} Algorithm & R200 & R300 & R500 & R700 & R1000 & R1400 & R3000 & R5500*\\ \hline \multicolumn{9}{c}{\rule[-.2cm]{0cm}{.6cm}Linux Machine(s), Compiled without Optimization} \\ \hline Path-disjoint Triple Gen & 1.0 & 1.2 & 6.9 & 8.2 & 56 & 72 & 558 & 17,400\\ Arc-disjoint Triple Gen & 0.7 & 0.6 & 4.9 & 5.4 & 33 & 39 & 262 & 11,200\\ Set-disjoint Triple Gen & 0.1 & 0.2 & 0.9 & 1.4 & 4 & 8 & 41 & 1,800\\ \hline Path-disjoint \textsc{Greedy(400)} & 19.7 & 22.6 & 154.8 & 130.3 & 715 & 664 & 3612 & 79,000\\ Arc-disjoint \textsc{Greedy(400)} & 20.3 & 20.3 & 154.3 & 96.1 & 767 & 548 & 2801 & 68,500\\ Set-disjoint \textsc{Greedy(400)} & 11.8 & 14.0 & 116.1 & 90.9 & 578 & 704 & 2171 & 70,100\\ \hline \multicolumn{9}{c}{\rule[-.2cm]{0cm}{.6cm}MacBook Pro, Compiled with Optimization} \\ \hline Path-disjoint Triple Gen & 0.7 & 0.7 & 2.7 & 3.5 & 17 & 24 & 347 & -\\ Arc-disjoint Triple Gen & 0.6 & 0.6 & 1.5 & 1.8 & 9 & 10 & 95 & -\\ Set-disjoint Triple Gen & 0.5 & 0.5 & 0.8 & 0.9 & 2 & 3 & 14 & -\\ \hline Path-disjoint \textsc{Greedy(400)} & 4.6 & 5.4 & 38.3 & 35.8 & 239 & 224 & 1865 & -\\ Arc-disjoint \textsc{Greedy(400)} & 4.4 & 5.2 & 36.3 & 32.7 & 254 & 198 & 1360 & -\\ Set-disjoint \textsc{Greedy(400)} & 3.2 & 4.1 & 30.3 & 25.9 & 188 & 198 & 866 & -\\ \hline \end{tabular} } \caption{Running times in seconds for triple construction and {\textsc{Greedy(400)}} runs on path-disjoint instances (both vertex- and arc-disjoint versions), with results for set-disjoint instances included for comparison. *For R5500, all Linux times except those for {\textsc{HSLB}} are on the slower of our two machines, because of memory constraints, and for 80-iteration runs. }\label{heurtimetab2} \end{center} \vspace{-.25in} \end{table} As to running times, see Table \ref{heurtimetab2}, which mimics Table \ref{heurtimetab} in covering the maximum-customer versions of our ISP-based instances with more than 200 vertices, and both the times for triple-generation and the overall {\textsc{Greedy(400)}} times. As before, we present running times for unoptimized code on our Linux machines and optimized code on our MacBook Pro. We list times for both the vertex- and arc-disjoint versions of our path-disjoint instances, and, for comparison purposes, copy from Table \ref{heurtimetab} the corresponding times for our set-disjoint instances. Recalling from Table \ref{realtripletab} that the number of triples associated with an instances increases as one goes from set-disjoint to vertex-disjoint to arc-disjoint instances, one might expect running times to behave similarly, but this is not the case. For triple generation, the arc-disjoint triples take less time to generate than the vertex-disjoint ones. The explanation for relative triple-generation running times is algorithmic. As described in Section \ref{section:triples}, the path-disjoint constructions should take longer than the set-disjoint ones because they need an extra network-flow step, and the vertex-disjoint constructions take longer than the arc-disjoint ones because they need to add an extra edge for each vertex in order to insure vertex-disjointness as well as arc-disjointness. Note, however, that, as with the set-disjoint case, the time for vertex- and arc-disjoint triple generation again may well be growing at slightly less than the cubic rate of our worst-case analysis. Our overall running times also do not precisely track triple counts The times for the path-disjoint versions of the instances are indeed significantly slower than for the set-disjoint versions (except in the case of R1400), but once again the arc-disjoint times tend to be faster than those for the vertex-disjoint versions. Further study will be needed to understand these discrepancies. \subsection{Cost Reduction} \begin{table} \begin{center} \begin{tabular}{c|c | r@{\ \ }|c @{\ \ } c c @{\ \ }|c @{\ \ } c c @{\ \ } c| c c} & & &\multicolumn{3}{c|}{Set-Disjoint} & \multicolumn{6}{c}{Path-Disjoint} \\ \hline && & & & & \multicolumn{2}{c}{Vertex-Disjoint} & \multicolumn{2}{c|}{Arc-Disjoint} & \multicolumn{2}{c}{Unbounded} \\ $|V|$ & Class & $|C|$ &\multicolumn{2}{c}{\textsc{Greedy}} & {\textsc{Opt}} & \multicolumn{2}{c}{\textsc{Greedy}} & \multicolumn{2}{c|}{\textsc{Greedy}} & \multicolumn{2}{c} {\textsc{DP}} \\ \hline & (C1,F1) & 984 & 16.2 & (83.8) & 16.2 & 15.1 & (7.1) & 15.1 & (0.0) & 10.7 & (29.2) \\ & (C2,F1) & 492 & 24.0 & (76.0) & 24.0 & 23.2 & (3.6) & 23.1 & (0.4) & 19.1 & (17.1) \\ & (C2,F2) & 492 & 25.7 & (74.3) & 25.7 & 23.9 & (6.9) & 23.9 & (0.2) & 19.0 & (20.6) \\ 984 & (C4,F1) & 246 & 37.9 & (62.1) & 37.9 & 37.5 & (1.1) & 37.4 & (0.3) & 34.2 & ( 8.5) \\ & (C4,F4) & 246 & 40.1 & (59.9) & 40.1 & 38.4 & (4.4) & 38.3 & (0.2) & 33.3 & (13.0) \\ & (C8,F1) & 123 & 55.4 & (44.6) & 55.4 & 55.1 & (0.6) & 55.1 & (0.0) & 52.3 & ( 5.2) \\ & (C8,F8) & 123 & 58.2 & (41.8) & 58.2 & 57.2 & (1.8) & 57.1 & (0.1) & 51.2 & (10.3) \\ & & & & & & & & & &\\ & (C1,F1) &558 & 16.0 & (84.0) & 16.0 & 14.9 & ( 7.3) & 14.9 & ( 0.0) & 10.7 & (28.3) \\ & (C2,F1) & 279 & 24.6 & (75.4) & 24.6 & 23.6 & ( 4.2) & 23.5 & ( 0.2) & 19.6 & (16.7) \\ & (C2,F2) & 279 & 25.8 & (74.2) & 25.8 & 24.5 & ( 5.3) & 24.4 & ( 0.1) & 19.2 & (21.4) \\ 558 & (C4,F1) & 140 & 37.8 & (62.2) & 37.8 & 37.1 & ( 1.7) & 37.1 & ( 0.0) & 34.4 & ( 7.3) \\ & (C4,F4) & 140 & 42.1 & (57.9) & 42.1 & 41.2 & ( 2.2) & 41.1 & ( 0.3) & 34.6 & (15.7)) \\ & (C8,F1) & 70 & 56.3 & (43.7) & 56.3 & 56.1 & ( 0.3) & 56.1 & ( 0.0) & 53.0 & ( 5.6) \\ & (C8,F8) & 70 & 60.0 & (40.0) & 60.0 & 59.1 & ( 1.4) & 58.7 & ( 0.7) & 54.7 & ( 6.8) \\ & & & & & & & & & &\\ & (C1,F1) & 300 & 24.0 & (76.0) & 24.0 & 23.0 & ( 4.2) & 23.0 & ( 0.0) & 14.9 & (35.5) \\ & (C2,F1) & 150 & 33.7 & (66.3) & 33.7 & 32.6 & ( 3.2) & 32.6 & ( 0.0) & 25.2 & (22.7) \\ & (C2,F2) & 150 & 36.1 & (63.9) & 36.1 & 34.8 & ( 3.5) & 34.8 & ( 0.0) & 24.7 & (29.1) \\ 300 & (C4,F1) & 75 & 49.1 & (50.9) & 49.1 & 47.7 & ( 2.7) & 47.7 & ( 0.0) & 41.1 & (14.0) \\ & (C4,F4) & 75 & 50.8 & (49.2) & 50.8 & 50.0 & ( 1.6) & 49.9 & ( 0.3) & 40.5 & (18.7) \\ & (C8,F1) & 38 & 64.5 & (35.5) & 64.5 & 63.7 & ( 1.2) & 63.7 & ( 0.0) & 58.4 & ( 8.3) \\ & (C8,F8) & 38 & 68.4 & (31.6) & 68.4 & 68.4 & ( 0.0) & 68.4 & ( 0.0) & 57.4 & (16.2) \\ & & & & & & & & & &\\ & (C1,F1) & 190 & 28.7 & (71.3) & 28.7 & 27.7 & ( 3.5) & 27.7 & ( 0.0) & 21.0 & (24.3) \\ & (C2,F1) & 95 & 40.3 & (59.7) & 40.3 & 39.6 & ( 1.8) & 39.6 & ( 0.0) & 33.4 & (15.7) \\ & (C2,F2) & 95 & 41.7 & (58.3) & 41.7 & 40.2 & ( 3.5) & 40.1 & ( 0.3) & 33.2 & (17.3) \\ 190 & (C4,F1) & 48 & 55.8 & (44.2) & 55.8 & 55.2 & ( 1.1) & 55.2 & ( 0.0) & 51.7 & ( 6.4) \\ & (C4,F4) & 48 & 55.4 & (44.6) & 55.4 & 54.4 & ( 1.9) & 54.4 & ( 0.0) & 47.7 & (12.3) \\ & (C8,F1) & 24 & 67.1 & (32.9) & 67.1 & 66.7 & ( 0.6) & 66.7 & ( 0.0) & 60.4 & ( 9.4) \\ & (C8,F8) & 24 & 70.8 & (29.2) & 70.8 & 70.8 & ( 0.0) & 70.8 & ( 0.0) & 61.2 & (13.5) \\ & & & & & & & & & &\\ & (C1,F1) & 100 & 24.1 & (75.9) & 24.1 & 23.6 & ( 2.1) & 23.6 & ( 0.0) & 14.3 & (39.4) \\ & (C2,F1) & 50 & 34.2 & (65.8) & 34.2 & 34.0 & ( 0.6) & 34.0 & ( 0.0) & 23.4 & (31.2) \\ & (C2,F2) & 50 & 35.4 & (64.6) & 35.4 & 34.4 & ( 2.8) & 34.4 & ( 0.0) & 22.6 & (34.3) \\ 100 & (C4,F1) & 25 & 46.8 & (53.2) & 46.8 & 46.4 & ( 0.9) & 46.4 & ( 0.0) & 38.4 & (17.2) \\ & (C4,F4) & 25 & 49.2 & (50.8) & 49.2 & 48.4 & ( 1.6) & 48.4 & ( 0.0) & 37.2 & (23.1) \\ & (C8,F1) & 13 & 61.5 & (38.5) & 61.5 & 61.5 & ( 0.0) & 61.5 & ( 0.0) & 48.5 & (21.3) \\ & (C8,F8) & 13 & 63.8 & (36.2) & 63.8 & 62.3 & ( 2.4) & 61.5 & ( 1.2) & 47.7 & (22.5) \\ \hline \end{tabular} \caption{Average percentages of customer vertices in algorithmically generated covers for all seven classes of our $|V| \in \{984, 558, 300, 190, 100\}$ synthetic instances. Entries in parentheses are the percentage reduction in cover size from the results in the preceding column. }\label{coverreduction} \end{center} \vspace{-.25in} \end{table} In this section we consider the savings our heuristics can provide over simply choosing the cover consisting of the set $C$ of all customer vertices, which is always a feasible solution, and would be the default solution for network path monitoring if we did not use our proposed monitoring scheme. We first consider the results for our synthetic instances. Table \ref{coverreduction} presents our results for the seven classes of instances for each value of $|V| \in \{984, 558, 300, 190, 100\}$. (For space reasons, we omit the results for $|V| \in \{250, 220, 50\}$, but they do not evince any strikingly different behavior.) For each algorithm and instance class, we give both the average percentage of customers that are in the covers produced by the algorithm, and (in parentheses) the percentage reduction over the results in the column to the left. Here ``{\textsc{Greedy}}'' stands for {\textsc{Greedy(400)}}, the algorithm that takes the best of 400 independent runs of {\textsc{Greedy}}. ``Opt'' stands for the cover produced by our {\textsc{HSLB}} code for set-disjoint instances, which for these instances was always optimal. ``DP'' stands for the optimum when we do not require our vertex-disjoint paths to be shortest paths, as computed by the linear-time dynamic programming algorithm described in Section \ref{sec:trees}. This value is not relevant to either of our applications, but does provide a lower bound on what is possible in the path-disjoint case. These results clearly show that using our scheme for disjoint path monitoring offers a substantial reduction in the number of vertices needed to cover all the customers. In the set-disjoint case, the reduction is typically by a factor of 4 or more for the (C1,F1) instances, in which all vertices are customers, and grows to a factor of 6 when $|V| \in \{558, 984\}$. As the proportion of vertices that are customers is reduced for a given value of $|V|$, the savings decreases, but is always more than 29\%. Perhaps not surprisingly, when not all vertices are customers, the reduction is greater in the (C$k$,F1) classes, where all vertices are potential facility locations, than in the (C$k$,F$k$) classes, where the only potential facility locations are the customers themselves. Note that, as remarked before, {\textsc{Greedy(400)}} finds optimal set-disjoint solutions in all cases, so we omit the parenthetical ``percentage reduction'' column for {\textsc{Opt}}. Going from the set-disjoint to the path-disjoint instances requiring vertex-disjointness typically offers a significant reduction (up to 7\% in the (C1,F1) classes with $|V| \in \{558, 984\}$), although the percentage reduction declines with $|V|$ and $|C|$. Only requiring arc-disjoint paths often yields a small additional improvement, although typically less than 1\%. Allowing unbounded vertex-disjoint paths would allow a much more significant reduction, up to almost 40\% in one case, and typically more for the (C$k$,F$k$) classes, $k \geq 2$, than in the corresponding (C$k$,F1) classes, although the actual sizes of the covers still remain lower in the latter case. \begin{table} \begin{center} \begin{tabular}{c | c|r @{\ \ } c r @{\ \ } c|r @{\ \ } c r @{\ \ } c| c c} & &\multicolumn{4}{c|}{Set-Disjoint} & \multicolumn{6}{c}{Path-Disjoint} \\ \hline & Approx & & & & & \multicolumn{2}{c}{Vertex-Disjoint} & \multicolumn{2}{c|}{Arc-Disjoint} & \multicolumn{2}{c}{Unbounded} \\ Instance & $|C|$ &\multicolumn{2}{c}{\textsc{Greedy}} & \multicolumn{2}{c|}{\textsc{Opt}} & \multicolumn{2}{c}{\textsc{Greedy}} & \multicolumn{2}{c|}{\textsc{Greedy}} & \multicolumn{2}{c} {\textsc{DP}} \\ \hline R5500 & 3200 & 37.7 & (62.3) & 17.0 & (55.0) & 2.8 & (83.5) & 2.8 & ( 0.0) & 0.6 & (80.0)\\ R3000 & 1500 & 1.8 & (98.2) & 1.8 & ( 0.0) & 1.3 & (25.9) & 1.3 & ( 0.0) & 0.1 & (90.0)\\ R1400 & 900 & 14.3 & (85.7) & 14.3 & ( 0.0) & 3.2 & (77.6) & 3.2 & ( 0.0) & 0.3 & (90.0)\\ R1000 & 900 & 35.5 & (64.5) & 10.1 & (71.5) & 3.8 & (62.0) & 3.8 & ( 0.0) & 0.9 & (77.1)\\ R700 & 500 & 4.9 & (95.1) & 4.9 & ( 0.0) & 2.8 & (43.5) & 2.8 & ( 0.0) & 0.6 & (76.9)\\ R500 & 500 & 13.9 & (86.1) & 13.9 & ( 0.0) & 12.4 & (10.8) & 12.4 & ( 0.0) & 1.9 & (84.8)\\ R300 & 200 & 4.1 & (95.9) & 4.1 & ( 0.0) & 3.6 & (11.1) & 3.6 & ( 0.0) & 2.7 & (25.0)\\ R200 & 200 & 7.6 & (92.4) & 7.6 & ( 0.0) & 6.7 & (11.8) & 6.7 & ( 0.0) & 2.2 & (66.7)\\ R100a & 100 & 10.9 & (89.1) & 10.9 & ( 0.0) & 7.8 & (28.6) & 7.8 & ( 0.0) & 4.7 & (40.0)\\ R100b & 100 & 10.9 & (89.1) & 10.9 & ( 0.0) & 7.8 & (28.6) & 7.8 & ( 0.0) & 4.7 & (40.0)\\ \hline \end{tabular} \caption{Average percentages of customer vertices in algorithmically generated covers for our 10 ISP-based instances. Entries in parentheses are the percentage reduction in cover size from the results in the preceding column. }\label{Rcoverreduction} \end{center} \vspace{-.25in} \end{table} The results for our ISP-based instances are presented in Table \ref{Rcoverreduction}, where we only include the (C1,F1) versions of those instances where we did not know the types of routers and so could not deduce what the actual sets $F$ and $C$ should be. The resulting value $|C|/|V| = 1$ is reasonably close to the corresponding values for the instances where we {\em can} estimate the actual sets $F$ and $C$; for these the ratio ranges from about 0.6 to 0.9. Because the instances where we {\em do} have this information are proprietary, the column labeled by ``$|C|$'' only lists the value of $C$ rounded to the nearest 100. The average cover sizes reported, however, are given as percentages of the true value of $|C|$. Note that in this table we do include a parenthetical improvement column for {\textsc{Opt}} over {\textsc{Greedy(400)}}, since here there {\em are} improvements for two of the instances. As we can see, the effectiveness of our disjoint path monitoring scheme is even more substantial for these instances than for our synthetic ones. The best point of comparison is to the (C1,F1) variants of the latter, where the smallest ratio of set-disjoint cover size to $|C|$ was roughly 16\%. Here the {\em largest} percentage is 17\%, and four of the ten are less than 10\%, with the percentage for R3000 being just 1.8\%. Moreover, going to the path-disjoint case now offers major improvements, in comparison to the at-most 7.3\% improvement for our synthetic instances. Here the range is from 10.8\% to 83.5\%, and now all but one of our instances have covers with size less than 8\% of $|C|$. Allowing arc-disjoint paths offers no further improvement. However, if one could consider arbitrarily long vertex-disjoint paths, even more substantial reductions are possible. Now half the instances have covers with size less than 1\% of $|C|$. This should not be a surprise, however. As we already observed in Section \ref{propertysect}, typically 95\% or more of the vertices in our ISP-based instances are in a single large 2-connected component. By the definition of 2-connectedness, {\em any} two vertices in such a component will constitute a cover. Additional vertices are only needed to handle the few additional small components hanging off the large one. \section{Further Results and Directions}\label{section:further} The problems described here can be generalized in a variety of directions, for two of which we already have some preliminary results. \begin{enumerate} \item Variants of SDFL and PDFL in which we omit the requirement that every customer vertex also be a potential facility location (i.e., that $C \subseteq F$). In our monitoring application, it might well be the case that some of the customer vertices are housed in locations that do not have enough free physical space to house our monitoring equipment, or are in leased space into which we do not have permission to add additional equipment. \item Variants in which for each vertex a given nonnegative cost must be paid if that vertex is used as a facility, and our goal is to minimize total cost. Note that the second variant includes the first as a special case, since we can simply set the cost of each member of $F$ to be 1 and the cost of all other vertices to be $|F|+1$. \end{enumerate} Most of our algorithms and lower bounds carry over more-or-less directly to both types of generalization, with the two exceptions being the linear-time algorithms of Section \ref{treetheo} and Section \ref{sec:pdlb} for computing the optimum covers in the special case of trees and our general lower bound for the path-disjoint case. Linear-time algorithms still exist for both of these tasks, but the simple combinatorial characterizations that previously sufficed must now be replaced by dynamic programs of some complexity. Our other algorithms and lower bounds carry over with almost no change for the first type of generalization. For the second type, the integer programs for computing the optimum and for computing the Hitting Set lower bound must change their objective functions so that the variables $x_f$ are multiplied by the corresponding costs. And the various cover-constructing heuristics must modify their greedy choices by taking costs into account, for instance by choosing the $f \in F$ which yields the highest ratio (increase in coverage)/(cost of $f$). One question that arises for the first type of variant that was not an issue in our original version of the problems is the question of feasibility. In the original version, where $C \subseteq F$, there was always a feasible cover, mainly the set $C$ itself, with each customer covering itself. When not all customers are potential facility locations, it may be that {\em no} cover exists. Thus the first question one must ask is whether such a cover exists. This will be true if and only if the full set $F$ is a valid cover, a property that can easily be checked in $O(n^2m)$ tine by generating the corresponding SCP problem and verifying that there is a valid triple for each customer. We leave the empirical testing of our algorithms for these generalizations of SDFL and PDFL for further research. At present the variety of potential instance classes is too large for any one or two choices to provide meaningful insights, although future research into potential applications may identify particular instance classes whose practical relevance justifies such study. \medskip \noindent {\bf {\Large Acknowledgment}} \smallskip \noindent David S. Johnson, the lead author of this paper, passed away on March 8, 2016. David's co-authors dedicate this paper to his memory. The authors thank David Applegate for helpful discussions and in particular for simplifying the statement and proof of Theorem \ref{treetheo}. The authors also thank Rodrigo Toso for implementing the version of \textsc{Genetic}\ used in the experiments. \bibliographystyle{plainnat}
1,108,101,565,839
arxiv
\section{Introduction}\label{sec:intro} The last two decades have witnessed rapid advances in experimental demonstration and theoretical investigation of quantum control systems due to their promising applications in a wide range of areas such as quantum communication, quantum computing, and quantum metrology \cite{DJ99, AA03, vHSM05, MvH07, JNP08, BCS09, GJ09, LK09, NJD09, YB09, WM10, BBR10, BT10, DP10, WS10, AT12, AMR12, ZWL+12, BQ13, ZJ13, PR14, HIN14, NY17, CKS17}. Within this program quantum linear systems play a prominent role. Quantum linear systems are characterized by linear quantum stochastic differential equations (linear QSDEs), and have found applications in quantum optics, opto-mechanical systems, circuit quantum electro-dynamical systems, atomic ensembles, etc.. From a signals and systems point of view, quantum linear systems driven by Gaussian input states have been well studied; results like quantum filtering and measurement-based feedback control have been well established \cite{WM10}. In addition to Gaussian states there are other types of non-classical states, for example single-photon and multi-photon states. Roughly speaking, a light field is in an $\ell$-photon state if there is a definite number of $\ell$ photons in this field. A \textit{continuous-mode} $\ell$-photon state are characterized by the frequency (or equivalently, temporal) profiles of these photons. Interaction between photons and quantum finite-level systems has received considerable attention recently, as the precise control of the interactions between photons (flying qubits) and matter (stationary qubits) is fundamentally important for quantum information processing \cite{kimble08, Kolchin11, LMS+17, RWF17}. For example, single photon transistors and switches could be realized by engineering photon-matter interactions \cite{Chang07, Chen13, Neumeier13,KS16}. When photons are used to encode quantum information, the photon-photon interaction mediated by a quantum system can be explored to synthesize high-fidelity controlled-phase (CPHASE) gates for quantum computing \cite{BrodA16,Brod16}. Moreover, engineered routing and scattering of single photons could provide a scalable way for implementing quantum computation \cite{Childs791}. Due to their promising applications in quantum information and communication, the nonlinear dynamics of few-photon scattering by finite-level systems has been studied extensively. For example, single-photon filters have been derived in \cite{GHN+12, DSC17}. The multi-photon version has been given in \cite{SZX16}, which contains as a special case the multi-photon master equations studied in \cite{BCB+12}. The problem of fault tolerant and fault detection for systems driven by single photon states has been studied in \cite{GDP+16}. The problem of single-photon storage in linear networks has been investigated in \cite{YJ14}. Photon-matter interactions can be enhanced if photons are confined to small volumes in space such as cavities, optical fibres and one-dimensional waveguides. Waveguides can be realized in photonic nanostructures, or transmission lines in superconducting microwave circuits \cite{RWF17, LMS+17}. Due to the multi-mode nature of these devices, a continuous-mode analysis of photon states is necessary; i.e., in these systems, the frequency distribution (in other words, the pulse shape) of the wavepacket of photons is an important factor for efficient photon-matter interaction. For example, in an ideal situation an inverting pulse is able to excite a two-level atom fully, but on average, the maximum excitation probability of a Gaussian pulse is 0.8 \cite{SAL09,WMS+11,YJ14,PZJ15}. In \cite{Shen07}, two-photon transport properties in a one-dimensional waveguide coupled to a two-level system have been studied. The exact scattering matrix is constructed by means of a generalized Bethe-ansatz technique. In \cite{Fan10}, the authors studied the transport of one and two photons in a nanophotonic waveguide with an embedded two-level system. The exact forms of the scattering matrix is derived by combining the scattering matrix theory and the input-output formalism \cite{GZ00, GJ09, WM10}. This problem has also been studied in \cite{Pan16} from a QSDE approach. In \cite{RS12}, a detailed study of stimulated emission of an excited atom in a waveguide driven by a single photon was presented, where the interplay between the frequency bandwidth of the input single photon and the atom-photon coupling strength has been investigated. In \cite{Roy10}, the author showed that the asymmetric coupling between a two-level system and a photonic waveguide improves the bunching of the two photons, thus the proposed scheme can be used to realize an optical diode. The general multi-photon case has been studied in \cite{ZGB10}. Experimental studies on photon-atom interactions can be found in, e.g., \cite{Chang07,Chen13,vLFL+13} and recent survey papers \cite{RWF17, LMS+17}. In this paper, we investigate the dynamics of a coherent feedback network composed of two two-level systems; Fig. \ref{fig_sys}. The feedback network has two input channels, each containing one photon described in terms of its continuous-mode pulse shape. This coherent feedback network could be realized in waveguide quantum electro-dynamics (QED) devices. A significant feature of the coherent feedback structure is that photons can interact multiple times inside the loop, leading to enhanced nonlinear effect. For this reason, this design can be viewed as a simplification of the $N$-site interaction structure proposed in \cite{BrodA16}. Interestingly, the coherent feedback network studied here is not Hurwitz stable, actually it is marginally stable. Consequently, the initial system information has to be taken care of. The main result is an analytical form of the steady-state output two-photon state. To derive this output two-photon state, both Heisenberg picture and Schr\"{o}dinger picture have to be used. On one hand, as the system dynamics are given by a set of QSDEs for system operators as well as output operators, we need to work in the Heisenberg picture and in the input-output formalism \cite{HP84, GC85, GJ09}; on the other hand, to get the output field state from the input field state, we need the Schr\"{o}dinger picture. The combination of these two pictures complicates the derivation of the exact form of the output two-photon state. Indeed, quite a few lemmas are pre-requisite of the main result, Theorem \ref{thm:output_state}. In this paper, we also show that the proposed method is applicable to the single-photon case. The rest of the paper is organized as follows. The coherent quantum feedback network and two-photon input state are introduced in Sec. \ref{sec:pre}. The main result of this paper, an analytic form of the steady-state output two-photon state, is presented in Sec. \ref{sec:main_result}. In Sec. \ref{sec:1_photon}, it is shown that the proposed framework is also applicable to the single-photon input case. Numerical studies are carried out in Sec. \ref{sec:example}. Sec. \ref{sec:conclusion} concludes this paper. {\it Notation.} $x^{\ast }$ denotes the complex conjugate of a complex number $x$ or the adjoint of an operator $x$. The commutator of two operators $X$ and $Y$ is defined as $[X,Y]\triangleq XY-YX$. For a column vector $X=[x_{i}]$ with number or operator entries, $ X^{\#}=[x_{i}^{\ast }]$. $I_{k}$ is the identity matrix and $0_{k}$ the zero matrix in $ \mathbb{C}^{k\times k}$. $\delta _{ij}$ denotes the Kronecker delta and $\delta(t)$ denotes the Dirac delta. \vspace{-2mm} \section{Coherent feedback network and input state} \label{sec:pre} \begin{figure}[tbp] \centering \includegraphics[scale=0.68]{sys} \caption{$G_1$ and $G_2$ are two-level systems. The coherent feedback network is driven by two photons, one in each input channel designated by $b_{\rm L}$ and $b_{\rm R}$ respectively. $B_{\rm out,L}$ and $B_{\rm out,R}$ denote the two output channels.} \label{fig_sys} \end{figure} In this section, we introduce the coherent feedback network, as is shown in Fig. \ref{fig_sys}. We also introduce the two-photon input state for this feedback network. \vspace{-2mm} \subsection{Coherent feedback network}\label{subsec:network} The coherent feedback network, as shown in Fig. \ref{fig_sys}, has two inputs represented by annihilation operators $b_{\rm L}$ and $b_{\rm R}$ respectively. For simplicity, in this paper we assume that the central frequencies of these two input fields are the same, denoted by $\omega _{o}$. Moreover, we also suppose that the two two-level systems $G_1$ and $G_2$ have the same transition frequency between the ground state and excited state, denoted by $\omega_{a} $. Thus, the detuning frequency is $\omega _{c}=\omega _{o}-\omega _{a}$. Moreover, in this paper we assume that $G_1$ and $G_2$ have the same coupling strength $\kappa$ to the optical fields. The ground and excited states of $G_1$ and $G_2$ are $\ket{g_j}$ and $\ket{e_j}$, ($j=1,2$) respectively. Let $t_0$ be the time when the system and its inputs starts to interact. What we are interested in this paper is the steady-state dynamics of the coherent feedback network in the limit $t_{0}\rightarrow -\infty $ and $t\to \infty$; i.e., the interaction occurs in the remote past and we look at the dynamics in the distant future; see e.g., \cite{Fan10, ZJ13, Pan16}. Define \begin{equation} \alpha \triangleq -i\omega _{c}-\kappa . \label{alpha} \end{equation} The QSDEs for the two-level system $G_{1}$ are, \cite{GZ00, GJ09, CKS17}, \begin{eqnarray*} \dot{\sigma}_{-,1}(t)& =&\alpha \sigma _{-,1}(t)+\sqrt{\kappa }\sigma _{z,1}(t)b_{\rm L}(t)+\sqrt{\kappa }\sigma _{z,1}(t)b_{4}(t), \label{sys_1a} \\ b_{3}(t)& =&\sqrt{\kappa }\sigma _{-,1}(t)+b_{\rm L}(t), \label{sys_1b} \\ b_{\mathrm{out,R}}(t)& =&\sqrt{\kappa }\sigma _{-,1}(t)+b_{4}(t),\ \ \ t\geq t_{0}. \label{sys_1c} \end{eqnarray*} Similarly, the QSDEs for the two-level system $G_{2}$ are \begin{eqnarray*} \dot{\sigma}_{-,2}(t)& =&\alpha \sigma _{-,2}(t)+\sqrt{\kappa }\sigma _{z,2}(t)b_{3}(t)+\sqrt{\kappa }\sigma _{z,2}(t)b_{\rm R}(t), \label{sys_2a} \\ b_{\mathrm{out,L}}(t)& =&\sqrt{\kappa }\sigma _{-,2}(t)+b_{3}(t), \label{sys_2b} \\ b_{4}(t)& =&\sqrt{\kappa }\sigma _{-,2}(t)+b_{\rm R}(t),\ \ \ t\geq t_{0}. \label{sys_2c} \end{eqnarray*} In the above QSDEs, $\sigma_{-,j}$ and $\sigma_{z,j}$ are respectively the lowering operator and Pauli operator for $G_j$, ($j=1,2$). More specifically, $\sigma_{-,j}=\ket{g_j}\bra{e_j}$ and $\sigma_{z,j}=\ket{e_j}\bra{e_j}-\ket{g_j}\bra{g_j}$. Consequently, the QSDEs for the coherent feedback network in Fig. \ref{fig_sys} are \begin{subequations} \begin{align} \left[ \begin{array}{c} \dot{\sigma}_{-,1}(t) \\ \dot{\sigma}_{-,2}(t) \end{array} \right] & =\alpha \left[ \begin{array}{c} \sigma _{-,1}(t) \\ \sigma _{-,2}(t) \end{array} \right] +\kappa \left[ \begin{array}{c} \sigma _{z,1}(t)\sigma _{-,2}(t) \\ \sigma _{z,2}(t)\sigma _{-,1}(t) \end{array} \right] \nonumber \\ &\ \ \ +\sqrt{\kappa }\left[ \begin{array}{c} \sigma _{z,1}(t) \\ \sigma _{z,2}(t) \end{array} \right] \left( b_{\rm L}(t)+b_{\rm R}(t)\right) , \label{sys_e} \\ \nonumber \\ b_{\mathrm{out}}(t)& =\sqrt{\kappa }\ C \left[ \begin{array}{c} \sigma _{-,1}(t) \\ \sigma _{-,2}(t) \end{array} \right] +b_{\mathrm{in}}(t), \ t\geq t_{0}, \label{sys_f} \end{align} \end{subequations} where \begin{equation} C\triangleq \left[ \begin{array}{cc} 1 & 1 \\ 1 & 1 \end{array} \right] , \label{C} \end{equation} and \begin{equation*} b_{\mathrm{in}}(t)\triangleq \left[ \begin{array}{c} b_{\rm L}(t) \\ b_{\rm R}(t) \end{array} \right] ,\ b_{\mathrm{out}}(t)\triangleq \left[ \begin{array}{c} b_{\mathrm{out,L}}(t) \\ b_{\mathrm{out,R}}(t) \end{array} \right] \end{equation*} are input and output fields for the feedback network respectively. For the vector of inputs $b_{\mathrm{in}}(t)$ in the time domain, we define its Fourier transform as \begin{equation} b_{\mathrm{in}}[i\omega ] \triangleq \frac{1}{\sqrt{2\pi }}\int_{t_{0}}^{\infty }dt\ e^{-i\omega t}b_{\mathrm{in}}(t), \ \ \ \omega \in \mathbb{R}. \label{b_s_1} \end{equation} The inverse Fourier Transform is \begin{equation} b_{\mathrm{in}}(t) = \frac{1}{\sqrt{2\pi }}\int_{-\infty}^{\infty }d\omega\ e^{i\omega t}b_{\mathrm{in}}[i\omega],\ \ \ \ t\geq t_0. \label{b_t_1} \end{equation} \begin{remark}\label{rem:t_0} As mentioned above, the initial time $t_0$ will be sent to $-\infty$ later, thus Eq. (\ref{b_s_1}) is indeed the conventional Fourier transform. The same is true for Fourier transforms of other operators or functions to be presented in the sequel. \end{remark} The adjoint $b_{\mathrm{in}}^{\# }[i\omega ]$ of $b_{\mathrm{in}}[i\omega] $ is obtained by conjugating both sides of Eq. (\ref{b_s_1}), specifically, \begin{equation} b_{\mathrm{in}}^{\# }[i\omega ] = \frac{1}{\sqrt{2\pi }}\int_{t_{0}}^{\infty }dt\ e^{i\omega t}b_{\mathrm{in}}^{\# }(t), \ \ \ \omega \in \mathbb{R}. \label{b_s_2} \end{equation} Noticing \begin{equation} \lim_{t_{0}\rightarrow -\infty }\frac{1}{2\pi }\int_{t_{0}}^{\infty }dt\ e^{i\omega t} = \frac{1}{2\pi }\int_{-\infty }^{\infty }dt\ e^{i\omega t} =\delta (\omega ), \label{july2_delta} \end{equation} and the commutation relation \begin{equation} \lbrack b_{\mathrm{in}}(t),b_{\mathrm{in}}^{\# }(r)]=\delta (t-r)I_2,\ \ \ t,r\geq t_{0}, \label{CCR_t} \end{equation} we have \begin{align*} & \lim_{t_{0}\rightarrow -\infty }[b_{\mathrm{in}}[i\omega _{1}],\ b_{\mathrm{in}}^{\# }[i\omega _{2}]] = \lim_{t_{0}\rightarrow -\infty }\frac{1}{2\pi }\int_{t_{0}}^{\infty }dt\ e^{-i(\omega _{1}-\omega _{2})t} \nonumber \\ =& \delta (\omega _{1}-\omega _{2}) I_2,\ \ \ \omega _{1},\omega _{2}\in \mathbb{R}. \label{CCR_f} \end{align*} Similarly, we denote the Fourier transform of the vector of outputs $b_{\rm out}[t]$ by $b_{\rm out}[i\omega]$, whose adjoint is denoted by $b_{\rm out}^\# [i\omega]$. Finally, the Fourier transform of $\sigma _{-}(t)$ is \begin{equation} \sigma _{-}[i\omega ] = \frac{1}{\sqrt{2\pi }}\int_{t_{0}}^{\infty }dt\ e^{-i\omega t}\sigma _{-}(t), \label{july11_1a} \end{equation} whose adjoint is \begin{equation} \sigma _{+}[i\omega ] = (\sigma _{-}[i\omega ])^{\ast }=\frac{1}{\sqrt{2\pi }}\int_{t_{0}}^{\infty }dt\ e^{i\omega t}\sigma _{+}(t). \label{july11_1b} \end{equation} \vspace{-2mm} \subsection{Two-photon input state}\label{subsec:state} In this subsection, we introduce the input to the feedback network in Fig. \ref{fig_sys}. The input field $b_{\rm L}$ is in the continuous-mode single-photon state $b_{\rm L}^{\ast }(\xi_{\rm L})\vert 0_{\rm L}\rangle$, where $\vert 0_{\rm L}\rangle$ denotes the vacuum state of this field, and the operator $b_{\rm L}(\xi_{\rm L}) $ is defined to be \[ b_{\rm L}(\xi_{\rm L}) \triangleq \int_{t_{0}}^{\infty }b_{\rm L}(t)\xi_{\rm L} ^{\ast }(t)dt \] with $\xi_{\rm L}\in L_{2}(\mathbb{R},\mathbb{C})$ satisfying the normalization condition $\left\Vert \xi_{\rm L}\right\Vert \equiv \sqrt{\int_{t_0}^\infty |\xi_{\rm L}(t)|^2 dt}=1$. The physical interpretation of $\xi(t)$ is that $|\xi(t)|^2 dt$ is the probability of finding the photon in the time interval $[t, t+dt)$. Similarly, the input field $b_{\rm R}$ is in the continuous-mode single-photon state $b_{\rm R}^{\ast }(\xi_{\rm R})\vert 0_{\rm R}\rangle$, where $\vert 0_{\rm R}\rangle$ denotes the vacuum state of this field, and the operator $b_{\rm R}(\xi_{\rm R} ) $ is defined to be \[ b_{\rm R}(\xi_{\rm R} ) \triangleq \int_{t_{0}}^{\infty }b_{\rm R}(t)\xi_{\rm R} ^{\ast }(t)dt \] with $\xi_{\rm R} \in L_{2}(\mathbb{R},\mathbb{C})$ satisfying $\left\Vert \xi_{\rm R}\right\Vert=1$. Therefore, the two-photon input field state is \begin{equation} \left\vert \Psi _{\mathrm{in}}(t_{0})\right\rangle = b_{\rm L}^{\ast }(\xi_{\rm L})b_{\rm R}^{\ast }(\xi_{\rm R})\left\vert 0_{\rm L}0_{\rm R}\right\rangle . \label{initial} \end{equation} The adjoints of $b_{\rm L}(\xi_{\rm L})$ and $b_{\rm R}(\xi_{\rm R})$ are \begin{subequations} \begin{eqnarray} b_{\rm L}^{\ast }(\xi_{\rm L}) &\triangleq& (b_{\rm L}(\xi_{\rm L}))^\ast = \int_{t_{0}}^{\infty }b_{\rm L}^{\ast}(t)\xi_{\rm L}(t)dt, \label{may22_B_LR0} \\ b_{\rm R}^{\ast }(\xi_{\rm R} ) &\triangleq& (b_{\rm R}(\xi_{\rm R} ))^\ast = \int_{t_{0}}^{\infty }b_{\rm R}^{\ast}(t)\xi_{\rm R} (t)dt, \label{may22_B_LR} \end{eqnarray} \end{subequations} respectively. Similar to Eq. (\ref{b_s_1}), the Fourier transform of a function $\xi \in L_{2}(\mathbb{R},\mathbb{C})$ is \begin{equation}\label{xi_nu} \xi[i\nu ]=\frac{1}{\sqrt{2\pi }}\int_{t_{0}}^{\infty }dt\ e^{-i\nu t}\xi(t), \end{equation} whose inverse Fourier transform is \begin{equation}\label{xi_t} \xi(t)=\frac{1}{\sqrt{2\pi }}\int_{-\infty}^{\infty }d\nu\ e^{i\nu t}\xi[i\nu], \ \ \ t\geq t_0. \end{equation} \begin{example}\label{ex:photon} For the purpose of demonstration, we consider two single-photon states of Lorentzian-type pulse shape \begin{equation} \xi _{j}[i\nu ]=\frac{1}{\sqrt{2\pi }}\frac{\sqrt{\gamma _{j}}}{i(\nu +\omega _{o})-\frac{\gamma _{j}}{2}},\ \ j= {\rm L,R}, \label{dec9_xi_f} \end{equation} which in the time-domain are \begin{equation} \xi _{j}(t)=\left\{ \begin{array}{cc} 0, & t\geq 0, \\ -\sqrt{\gamma _{j}}e^{(\frac{\gamma _{j}}{2}-i\omega _{o})t}, & t<0, \end{array} \right. ,\ \ j= {\rm L,R}. \label{dec9_xi_t} \end{equation} Here, $\omega_o$ is the central frequency of the fields, as discussed in Subsection \ref{subsec:network}. In particular, when $\gamma _{\rm L}=\gamma _{\rm R} =\gamma$, the two photons have the same pulse shape, $\xi_{\rm L} = \xi_{\rm R} \equiv \xi $, given by \begin{equation}\label{jan14_xi} \xi \lbrack i\nu ]=\frac{1}{\sqrt{2\pi }}\frac{\sqrt{\gamma }}{i(\nu+\omega_o) -\frac{\gamma }{2}}, \end{equation} for which $\gamma $ is commonly called the full width at half maximum (FWHM); see, e.g., \cite[Chapter 2]{RL00}. It has been shown that a Lorentzian-type single-photon can excite a two-level atom fully; see, e.g., \cite{SAL09,WMS+11,YJ14,PZJ15}. \end{example} More discussions on continuous-mode single- and multi- photon states can be found in, e.g., \cite{RL00}, \cite{BCB+12}, \cite{SZX16}. \vspace{-2mm} \section{Steady-state output field state} \label{sec:main_result} In this section, we derive the steady-state output field state of the 2-qubit coherent feedback network driven two photons, described in the previous section. The basic set-up is presented in Subsection \ref{subsec:setup}, a key lemma is presented in Subsection \ref{subsec:key lemma}, the analytic expression of the steady-state output field state is derived in Subsection \ref{subsec:main}, calculation of probabilities of finding photons in output channels are given in Subsection \ref{subsec:prob}. \vspace{-2mm} \subsection{Basic set-up} \label{subsec:setup} Let the two-level systems $G_1$ and $G_2$ be initialized in the ground states $|g_1\rangle$ and $|g_2\rangle$ respectively, and the input be in the two-photon state as given in Eq. (\ref{initial}). Then the initial joint system-field state is \begin{equation*} \left\vert \Psi (t_{0})\right\rangle = \left\vert \Psi _{\mathrm{in}}(t_{0})\right\rangle \left\vert g_{1}g_{2}\right\rangle = b_{\rm L}^{\ast }(\xi_{\rm L})b_{\rm R}^{\ast }(\xi_{\rm R})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle . \label{Psi_t0} \end{equation*} In the Schr\"{o}dinger picture, at time instant $t\geq t_{0}$, the joint system-field state is \begin{equation*} \left\vert \Psi (t)\right\rangle = U(t, t_{0})b_{\rm L}^{\ast }(\xi_{\rm L})b_{\rm R}^{\ast }(\xi_{\rm R})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle , \label{t} \end{equation*} where $U(t, t_0)$ is a unitary operator defined on the joint system+field system satisfying $U(t_0,t_0)=I$ (namely, the identity operator). In the steady state ($t_{0}\rightarrow -\infty ,t\rightarrow \infty $), the two photons are in the two output channels, leaving the two-level systems in their ground state. As a result, the steady-state output two-photon state can be obtained by tracing out the system state; i.e., \begin{eqnarray} && \left\vert \Psi _{\mathrm{out}}\right\rangle \triangleq \lim_{t_{0}\rightarrow -\infty ,t\rightarrow \infty }\left\langle g_{1}g_{2}|\Psi (t)\right\rangle \label{infty} \\ &=& \lim_{t_{0}\rightarrow -\infty ,t\rightarrow \infty }\left\langle g_{1}g_{2}\right\vert U(t, t_{0})b_{\rm L}^{\ast }(\xi_{\rm L})b_{\rm R}^{\ast }(\xi_{\rm R})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle . \nonumber \end{eqnarray} Using Eqs. (\ref{may22_B_LR0})-(\ref{may22_B_LR}), Eq. (\ref{infty}) can be re-written as \begin{eqnarray} && \left\vert \Psi _{\mathrm{out}}\right\rangle \nonumber \\ &=&\lim_{t_{0}\rightarrow -\infty ,t\rightarrow \infty }\int_{t_{0}}^{t}dt_{1}\int_{t_{0}}^{t}dt_{2}\ \xi_{\rm L}(t_{1})\xi_{\rm R}(t_{2}) \nonumber \\ && \ \ \ \times \left\langle g_{1}g_{2}\right\vert U(t, t_{0})b_{\rm L}^{\ast }(t_{1})b_{\rm R}^{\ast }(t_{2})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle . \label{temp_1a} \end{eqnarray} Moreover, noticing that in the input-output formalism \cite{GZ00,GJ09}, $U(t, t_{0})^\ast b_{\rm L}(t)U(t, t_{0}) = b_{\mathrm{out,L}}(t)$ and $U(t, t_{0})^\ast b_{\rm R}(t)U(t, t_{0}) = b_{\mathrm{out,R}}(t)$, the inner product on the the right hand-side of Eq. (\ref{temp_1a}) can be expressed as \begin{align} & \left\langle g_{1}g_{2}\right\vert U(t, t_{0})b_{\rm L}^{\ast }(t_{1})b_{\rm R}^{\ast }(t_{2})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \label{temp_1b} \\ =& \frac{1}{2}\int dp_{1}\int dp_{2}\ \left\vert1_{Lp_{1}}1_{Lp_{2}}\right\rangle \nonumber \\ & \times \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{out} ,L}(p_{1})b_{\mathrm{out,L}}(p_{2})b_{\rm L}^{\ast }(t_{1})b_{\rm R}^{\ast }(t_{2})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ & +\int dp_{1}\int dp_{2}\ \left\vert 1_{Lp_{1}}1_{{\rm R}p_{2}}\right\rangle \nonumber \\ & \times \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{out} ,L}(p_{1})b_{\mathrm{out,R}}(p_{2})b_{\rm L}^{\ast }(t_{1})b_{\rm R}^{\ast }(t_{2})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ & +\frac{1}{2}\int dp_{1}\int dp_{2}\ \left\vert 1_{{\rm R}p_{1}}1_{{\rm R}p_{2}}\right\rangle \nonumber \\ & \times \langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\vert b_{\mathrm{out,R}}(p_{1})b_{\mathrm{out,R}}(p_{2})b_{\rm L}^{\ast }(t_{1})b_{\rm R}^{\ast}(t_{2})\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\rangle, \nonumber \end{align} where the time-domain 2-photon basis for the input fields \begin{align} &\bigg\{\frac{1}{2}\int_{-\infty}^{\infty }dp_{1}\int_{-\infty}^{\infty }dp_{2}\ \left\vert 1_{{\rm L}p_{1}}1_{{\rm L}p_{2}}\right\rangle \left\langle 1_{{\rm L}p_{1}}1_{{\rm L}p_{2}}\right\vert , \nonumber \\ &\int_{-\infty}^{\infty }dp_{1}\int_{-\infty}^{\infty }dp_{2}\ \left\vert 1_{{\rm L}p_{1}}1_{{\rm R}p_{2}}\right\rangle \left\langle 1_{{\rm L}p_{1}}1_{{\rm R}p_{2}}\right\vert , \nonumber \\ &\ \frac{1}{2}\int_{-\infty}^{\infty }dp_{1}\int_{-\infty}^{\infty }dp_{2}\ \left\vert 1_{{\rm R}p_{1}}1_{{\rm R}p_{2}}\right\rangle \left\langle 1_{{\rm R}p_{1}}1_{{\rm R}p_{2}}\right\vert\bigg\} \label{dec6:basis} \end{align} has been used. (It should be noticed that the notation \begin{equation} \left\vert 1_{Lt}\right\rangle \equiv b_{\rm L}^{\ast }(t)\left\vert 0_{\rm L}\right\rangle =\int_{-\infty}^{\infty }dr\ \delta (t-r)b_{\rm L}^{\ast }(r)\left\vert 0_{\rm L}\right\rangle ,\ \ t\geq t_{0} \label{impulse} \end{equation} in the time domain has been used in Eq. (\ref{dec6:basis}).) The substitution of Eq. (\ref{temp_1b}) into Eq. (\ref{temp_1a}) gives \begin{align} & \left\vert \Psi _{\mathrm{out}}\right\rangle \label{mar17_1} \\ =& \lim_{t_0\to-\infty}\frac{1}{2}\int_{-\infty}^\infty dp_{1}\int_{-\infty}^\infty dp_{2} \left\vert 1_{{\rm L}p_{1}}1_{{\rm L}p_{2}}\right\rangle \nonumber \\ &\times \int_{t_{0}}^{\infty}dt_{1}\int_{t_{0}}^{\infty }dt_{2}\ \xi_{\rm L}(t_{1})\xi_{\rm R}(t_{2}) \nonumber \\ & \times \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{out},L}(p_{1})b_{\mathrm{out,L}}(p_{2})b_{\rm L}^{\ast }(t_{1})b_{\rm R}^{\ast}(t_{2})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ & + \lim_{t_0\to-\infty}\int_{-\infty}^\infty dp_{1}\int_{-\infty}^\infty dp_{2} \left\vert 1_{{\rm L}p_{1}}1_{{\rm R}p_{2}}\right\rangle \nonumber \\ & \times\int_{t_{0}}^{\infty }dt_{1}\int_{t_{0}}^{\infty }dt_{2}\ \xi_{\rm L}(t_{1})\xi _{2}(t_{2}) \nonumber \\ & \times \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{out} ,L}(p_{1})b_{\mathrm{out,R}}(p_{2})b_{\rm L}^{\ast }(t_{1})b_{\rm R}^{\ast }(t_{2})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ & + \lim_{t_0\to-\infty}\frac{1}{2}\int_{-\infty}^\infty dp_{1}\int_{-\infty}^\infty dp_{2} \vert1_{{\rm R}p_{1}}1_{{\rm R}p_{2}}\rangle \nonumber \\ &\times \int_{t_{0}}^{\infty}dt_{1}\int_{t_{0}}^{\infty }dt_{2} \ \xi_{\rm L}(t_{1})\xi_{\rm R}(t_{2}) \nonumber \\ & \times \langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\vert b_{\mathrm{out,R}}(p_{1})b_{\mathrm{out,R}}(p_{2})b_{\rm L}^{\ast }(t_{1})b_{\rm R}^{\ast}(t_{2})\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\rangle . \nonumber \end{align} Next, we go to the frequency domain by applying the Fourier transform to the time variables $t_{1}$ and $t_{2}$, respectively. According to Eqs. (\ref{b_t_1}) and (\ref{xi_t}), we have \begin{equation*} \int_{t_{0}}^{\infty }dt_{1}\ \xi_{\rm L}(t_{1})b_{\rm L}^{\ast }(t_{1})=\int_{-\infty }^{\infty }d\nu \ \xi_{\rm L}[i\nu ]b_{\rm L}^{\ast }[i\nu ]. \label{july7_1} \end{equation*} As a result, Eq. (\ref{mar17_1}) becomes \begin{align} & \left\vert \Psi _{\mathrm{out}}\right\rangle \label{july1_1} \\ =& \frac{1}{2}\int_{-\infty}^\infty dp_{1}\int_{-\infty}^\infty dp_{2}\ \left\vert 1_{{\rm L}p_{1}}1_{{\rm L}p_{2}}\right\rangle \nonumber \\ & \times \int_{-\infty }^{\infty }d\nu _{1}\int_{-\infty }^{\infty }d\nu _{2}\ \xi_{\rm L}[i\nu _{1}]\xi_{\rm R}[i\nu _{2}] \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \nonumber \\ & \times b_{\mathrm{out,L}}(p_{1})b_{\mathrm{out,L}}(p_{2})b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast }[i\nu _{2}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ & +\int_{-\infty}^\infty dp_{1}\int_{-\infty}^\infty dp_{2}\ \left\vert 1_{{\rm L}p_{1}}1_{{\rm R}p_{2}}\right\rangle \nonumber \\ & \times \int_{-\infty }^{\infty }d\nu _{1}\int_{-\infty }^{\infty }d\nu _{2}\ \xi _{1}[i\nu _{1}]\xi_{\rm R}[i\nu _{2}] \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \nonumber \\ & \times b_{\mathrm{out,L}}(p_{1})b_{\mathrm{out,R}}(p_{2})b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast }[i\nu _{2}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ & +\frac{1}{2}\int_{-\infty}^\infty dp_{1}\int_{-\infty}^\infty dp_{2}\ \left\vert 1_{{\rm R}p_{1}}1_{{\rm R}p_{2}}\right\rangle \nonumber \\ & \times \int_{-\infty }^{\infty }d\nu _{1}\int_{-\infty }^{\infty }d\nu _{2}\ \xi_{\rm L}[i\nu _{1}]\xi_{\rm R}[i\nu _{2}] \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \nonumber \\ & \times b_{\mathrm{out, R}}(p_{1})b_{\mathrm{out,R}}(p_{2})b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast }[i\nu _{2}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle . \nonumber \end{align} Finally, we apply the Fourier transform to the time variables $p_1$ and $p_2$ in Eq. (\ref{july1_1}) to get \begin{align} & \left\vert \Psi _{\mathrm{out}}\right\rangle \label{dec8_Psi} \\ =& \frac{1}{2}\int_{-\infty}^\infty d\omega _{1}\int_{-\infty}^\infty d\omega _{2}\ \left\vert 1_{L\omega _{1}}1_{L\omega _{2}}\right\rangle \nonumber \\ & \ \ \ \times \int_{-\infty }^{\infty }d\nu _{1}\int_{-\infty }^{\infty }d\nu _{2}\ \xi_{\rm L}[i\nu _{1}]\xi_{\rm R}[i\nu _{2}] \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \nonumber \\ & \ \ \ \times b_{\mathrm{out,L}}[i\omega _{1}]b_{\mathrm{out,L}}[i\omega _{2}]b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast }[i\nu _{2}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ & +\int_{-\infty}^\infty d\omega _{1}\int_{-\infty}^\infty d\omega _{2}\ \left\vert 1_{L\omega _{1}}1_{R\omega _{2}}\right\rangle \nonumber \\ & \ \ \ \times \int_{-\infty }^{\infty }d\nu _{1}\int_{-\infty }^{\infty }d\nu _{2}\ \xi_{\rm L}[i\nu _{1}]\xi_{\rm R}[i\nu _{2}] \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \nonumber \\ & \ \ \ \times b_{\mathrm{out,L}}[i\omega _{1}]b_{\mathrm{out,R}}[i\omega _{2}]b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast }[i\nu _{2}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ & +\frac{1}{2}\int_{-\infty}^\infty d\omega _{1}\int_{-\infty}^\infty d\omega _{2}\ \left\vert 1_{R\omega _{1}}1_{R\omega _{2}}\right\rangle \nonumber \\ & \ \ \ \times \int_{-\infty }^{\infty }d\nu _{1}\int_{-\infty }^{\infty }d\nu _{2}\ \xi_{\rm L}[i\nu _{1}]\xi_{\rm R}[i\nu _{2}] \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \nonumber \\ & \ \ \ \times b_{\mathrm{out,R}}[i\omega _{1}]b_{\mathrm{out,R}}[i\omega _{2}]b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast }[i\nu _{2}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle . \nonumber \end{align} Hence, in order to find an analytical expression for $\left\vert \Psi _{\mathrm{out}}\right\rangle $, we have to calculate the following quantities: \begin{subequations} \begin{align} & \langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\vert b_{\mathrm{out,L}}[i\omega _{1}]b_{\mathrm{out,L}}[i\omega _{2}]b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast }[i\nu _{2}]\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\rangle, \label{3_key_a} \\ &\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\vert b_{\mathrm{out,L}}[i\omega _{1}]b_{\mathrm{out,R}}[i\omega _{2}]b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast }[i\nu _{2}]\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\rangle, \label{3_key_b} \\ &\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\vert b_{\mathrm{out,R}}[i\omega _{1}]b_{\mathrm{out,R}}[i\omega _{2}]b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast }[i\nu _{2}\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\rangle. \label{3_key_d} \end{align} \end{subequations} \vspace{-2mm} \subsection{A key lemma}\label{subsec:key lemma} In this subsection, we derive a key Lemma, Lemma \ref{lem:key}, which will be used to derive the main result of this paper. Define a matrix \begin{equation} A\triangleq \left[ \begin{array}{cc} \alpha & -\kappa \\ -\kappa & \alpha% \end{array} \right] =-\left[ \begin{array}{cc} i\omega _{c}+\kappa & \kappa \\ \kappa & i\omega _{c}+\kappa \end{array} \right] , \label{A} \end{equation} where $\alpha$ is given in Eq. (\ref{alpha}). It is easily found that the eigenvalues of the matrix $A$ are $-2\kappa -i\omega _{c}$ and $-i\omega _{c}$. Thus, $A$ is not a Hurwitz matrix. In order to establish Lemma \ref{lem:key}, we need to introduce two auxiliary lemmas. \begin{lemma}\label{lem:pre_1} The following three equations hold: \begin{subequations} \begin{align} &\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \left[ \begin{array}{c} \sigma _{-,1}(t) \\ \sigma _{-,2}(t) \end{array} \right] b_{\rm L}^{\ast }[i\omega ]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ =&\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \left[ \begin{array}{c} \sigma _{-,1}(t) \\ \sigma _{-,2}(t) \end{array} \right] b_{\rm R}^{\ast }[i\omega ]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ =&i\sqrt{\frac{\kappa }{2\pi }}e^{i\omega t}\frac{1-e^{-(2\kappa +i(\omega _{c}+\omega ))(t-t_{0})}}{(\omega _{c}+\omega )-2i\kappa }\left[ \begin{array}{c} 1 \\ 1 \end{array} \right] , \label{temp2} \end{align} \begin{align} &\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\rm L}[i\omega ]\left[ \begin{array}{c} \sigma _{+,1}(t) \\ \sigma _{+,2}(t) \end{array} \right] \left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ =&\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\rm R}[i\omega ]\left[ \begin{array}{c} \sigma _{+,1}(t) \\ \sigma _{+,2}(t) \end{array} \right] \left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ =&-i\sqrt{\frac{\kappa }{2\pi }}e^{-i\omega t}\frac{1-e^{-(2\kappa -i(\omega _{c}+\omega ))(t-t_{0})}}{(\omega _{c}+\omega )+2i\kappa }\left[ \begin{array}{c} 1 \\ 1 \end{array} \right], \label{june19_1a} \end{align} \end{subequations} and \begin{eqnarray} &&\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\rm L}(t)b_{\rm L}^{\ast }[i\omega ]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ &=& \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\rm R}(t)b_{\rm R}^{\ast }[i\omega ]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ &=& \frac{1}{\sqrt{2\pi }}e^{i\omega t}, \ \ {\rm as} \ \ t_0\to -\infty. \label{sept9_1} \end{eqnarray} \end{lemma} The proof of Lemma \ref{lem:pre_1} is given in the APPENDIX. Define a matrix function \begin{equation} g_{G}(t)\triangleq \left\{ \begin{array}{cc} \delta (t)-\kappa Ce^{At}C, & t\geq 0, \\ 0, & t<0. \end{array} \right. \label{sept7_tf} \end{equation} For the time domain function $g_{G}(t)$ defined in Eq. (\ref{sept7_tf}), we define its Laplace transform to be \begin{equation} G[s] \triangleq \int_{0}^{\infty }dt\ g_{G}(t)e^{-st}. \label{L} \end{equation} Actually, by the form of $g_{G}(t)$, the Laplace transform $G[s]$ defined in Eq. (\ref{L}) can be re-written as $G[s]=\int_{-\infty }^{\infty }dt\ g_{G}(t)e^{-st}$. For the matrix $A$ given in Eq. (\ref{A}), it can be shown that \begin{eqnarray} &&(i\omega -A)^{-1} \nonumber \\ &=&-\frac{1}{(\omega +\omega _{c})^{2}-2i(\omega +\omega _{c})\kappa } \nonumber \\ &&\times \left[ \begin{array}{cc} i(\omega +\omega _{c})+\kappa & -\kappa \\ -\kappa & i(\omega +\omega _{c})+\kappa \end{array} \right] . \label{july5_1} \end{eqnarray} Using Eqs. (\ref{C}) and (\ref{july5_1}) we get \begin{equation} C(i\omega -A)^{-1}C=\frac{2}{2\kappa +i(\omega +\omega _{c})}C. \label{dec22_1} \end{equation} By Eqs. (\ref{L}) and (\ref{dec22_1}), we obtain \begin{align} &G[i\omega ] =\frac{1}{\omega +\omega _{c}-2i\kappa }\left[ \begin{array}{cc} \omega +\omega _{c} & 2i\kappa \\ 2i\kappa & \omega +\omega _{c} \end{array} \right] \nonumber \\ \triangleq &\left[ \begin{array}{c} \Theta_{\rm L}[i\omega ] \\ \Theta_{\rm R}[i\omega ] \end{array} \right] \equiv \left[ \begin{array}{cc} \Theta _{1}[i\omega ] & \Theta _{2}[i\omega ] \\ \Theta _{2}[i\omega ] & \Theta _{1}[i\omega ] \end{array} \right] . \label{dec6_G} \end{align} \begin{lemma} \label{lem:pre_2} The following two equations hold: \begin{subequations} \begin{eqnarray} &&\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \left[ \begin{array}{c} \sigma _{-,1}[i\omega ] \\ \sigma _{-,2}[i\omega ] \end{array} \right] \label{dec20_9} \\ &=&\frac{1}{\sqrt{2\pi }}\frac{e^{-(2\kappa +i(\omega +\omega _{c}))t_{0}}}{% 2\left( 2\kappa +i(\omega +\omega _{c})\right) }Ce^{-At_{0}} \nonumber \\ && \ \ \ \times \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \left[ \begin{array}{c} \sigma _{-,1}(t_{0}) \\ \sigma _{-,2}(t_{0}) \end{array} \right] \nonumber \\ &&+\frac{1}{\sqrt{2\pi }}\frac{\pi }{2}\delta (\omega +\omega _{c})e^{-i(\omega +\omega _{c})t_{0}}\left[ \begin{array}{cc} 1 & -1 \\ -1 & 1 \end{array} \right] \nonumber \\ && \ \ \ \times e^{-At_{0}}\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \left[ \begin{array}{c} \sigma _{-,1}(t_{0}) \\ \sigma _{-,2}(t_{0}) \end{array} \right] \nonumber \\ &&-\frac{\sqrt{\kappa }}{2\kappa +i(\omega _{c}+\omega )}C\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{in}}[i\omega ], \nonumber \\ {\rm and} \nonumber \\ &&\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \left[ \begin{array}{c} b_{\mathrm{out,L}}[i\omega ] \\ b_{\mathrm{out,R}}[i\omega ] \end{array} \right] \label{dec20_10} \\ &=&\frac{\sqrt{\kappa }}{\sqrt{2\pi }}\frac{e^{-i\omega t_{0}}}{2\kappa +i(\omega +\omega _{c})}C\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \left[ \begin{array}{c} \sigma _{-,1}(t_{0}) \\ \sigma _{-,2}(t_{0}) \end{array} \right] \nonumber \\ &&+G[i\omega ]\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{in} }[i\omega ]. \nonumber \end{eqnarray} \end{subequations} \end{lemma} The proof of Lemma \ref{lem:pre_2} is given in the APPENDIX. For $i=1,2$, define functions \begin{subequations} \begin{align} &f_{{\rm L},i}(\omega _{1},p_{2},\nu _{1},\nu _{2}) \label{eq:jun4_5b} \\ \triangleq & \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\rm L}[i\omega _{1}]\sigma _{-,i}(p_{2})b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast }[i\nu _{2}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle , \nonumber \\ {\rm and} \nonumber \\ & f_{{\rm R},i}(\omega _{1},p_{2},\nu _{1},\nu _{2}) \label{eq:jun4_5a} \\ \triangleq & \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\rm R}[i\omega _{1}]\sigma _{-,i}(p_{2})b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast }[i\nu _{2}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle. \nonumber \end{align} \end{subequations} Fourier transforming $f_{{\rm L},i}$ and $f_{{\rm R},i}$ with respect to the time variable $p_2$ yields \begin{subequations} \begin{align} &f_{{\rm L},i}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) \label{eq:jun4_5c} \\ =&\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\rm L}[i\omega _{1}]\sigma _{-,i}[i\omega _{2}]b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast }[i\nu _{2}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle , \nonumber \\ {\rm and} \nonumber \\ & f_{{\rm R},i}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) \label{eq:jun4_5d} \\ =&\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\rm R}[i\omega _{1}]\sigma _{-,i}[i\omega _{2}]b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast }[i\nu _{2}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle , \nonumber \end{align} \end{subequations} respectively. We are ready to present the following lemma. \begin{lemma}\label{lem:key} The functions $f_{{\rm L},i}(\omega _{1},\omega _{2},\nu _{1},\nu _{2})$ in Eq. (\ref{eq:jun4_5c}) and $f_{{\rm R},i}(\omega _{1},\omega _{2},\nu _{1},\nu _{2})$ in Eq. (\ref{eq:jun4_5d}) can be calculated to be \begin{eqnarray} &&f_{{\rm L},1}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) = f_{{\rm L},2}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) \label{aug18_2a} \\ &=&f_{{\rm R},1}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) = f_{{\rm R},2}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) \nonumber \\ &=&g(\omega _{1},\omega _{2},\nu _{1},\nu _{2})\delta (\nu _{1}+\nu _{2}-\omega _{1}-\omega _{2}) \nonumber \\ &&-\frac{\sqrt{\kappa }}{i(\omega _{2}+\omega _{c})+2\kappa }\delta (\omega _{1}-\nu _{2})\delta (\nu _{1}-\omega _{2}), \nonumber \end{eqnarray} where \begin{align} &g(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) \label{aug18_g} \\ \triangleq &-i\frac{\kappa ^{3/2}}{\pi }\frac{\nu _{1}+\nu _{2}+2\omega _{c}-4i\kappa }{(\omega _{1}+\omega _{c}+2i\kappa )(\omega _{2}+\omega _{c}-2i\kappa )} \nonumber \\ & \times \frac{\nu _{1}+\nu _{2}+2\omega _{c}}{(\nu _{1}+\omega _{c}-2i\kappa )(\nu _{2}+\omega _{c}-2i\kappa )(\nu _{1}+\nu _{2}+2\omega _{c}-2i\kappa )}. \nonumber \end{align} \end{lemma} The proof of Lemma \ref{lem:key} is given in the APPENDIX. \vspace{-2mm} \subsection{The steady-state output state} \label{subsec:main} In this subsection, we present the main result of this paper, which gives an analytic form of the steady-state output two-photon state of the coherent feedback network driven by two photons, one in each channel, as shown in Fig. \ref{fig_sys}. Define \begin{align} F_{\rm LL}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) &\triangleq \Theta _{2}[i\nu _{2}]\delta (\omega _{1}-\nu _{2})\delta (\omega _{2}-\nu _{1}) \label{F_LL} \\\ &+2\sqrt{\kappa }\Theta_{\rm L}[i\omega _{1}]\left[ \begin{array}{c} f_{{\rm L},1}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) \\ f_{{\rm R},1}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) \end{array} \right] . \nonumber \end{align} The following lemma presents an expression for Eq. (\ref{3_key_a}). \begin{lemma} \label{lem:LL} In the limit $t_0 \to -\infty$, Eq. (\ref{3_key_a}) can be calculated by \begin{align} &\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\vert b_{\mathrm{out,L}}[i\omega _{1}]b_{\mathrm{out,L}}[i\omega _{2}] b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast}[i\nu _{2}]\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\rangle \nonumber \\ =&F_{\rm LL}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}), \label{LL} \end{align} where $F_{\rm LL}(\omega _{1},\omega _{2},\nu _{1},\nu _{2})$ is defined in Eq. (\ref{F_LL}). \end{lemma} The proof of Lemma \ref{lem:LL} is given in the APPENDIX. Define \begin{align} F_{\rm LR}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) &\triangleq \Theta _{1}[i\nu _{1}]\delta (\omega _{1}-\nu _{1})\delta (\omega _{2}-\nu _{2}) \label{F_LR} \\ &+2\sqrt{\kappa }\Theta_{\rm L}[i\omega _{1}]\left[ \begin{array}{c} f_{{\rm L},1}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) \\ f_{{\rm R},1}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) \end{array} \right]. \nonumber \end{align} The following lemma presents an expression for Eq. (\ref{3_key_b}). \begin{lemma} \label{lem:LR} In the limit $t_0 \to -\infty$, Eq. (\ref{3_key_b}) can be calculated by \begin{align} &\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\vert b_{\mathrm{out,L}}[i\omega _{1}]b_{\mathrm{out,R}}[i\omega _{2}]b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast}[i\nu _{2}]\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\rangle \nonumber \\ =&F_{\rm LR}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}), \label{LR} \end{align} where $F_{\rm LR}(\omega _{1},\omega _{2},\nu _{1},\nu _{2})$ is defined in Eq. (\ref{F_LR}). \end{lemma} The proof of Lemma \ref{lem:LR} is given in the APPENDIX. Define \begin{align} F_{\rm RR}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) &\triangleq \Theta _{2}[i\nu _{1}]\delta (\omega _{1}-\nu _{1})\delta (\omega _{2}-\nu _{2}) \label{F_RR}\\ &+2\sqrt{\kappa }\Theta_{\rm R}[i\omega _{1}]\left[ \begin{array}{c} f_{{\rm L},1}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) \\ f_{{\rm R},1}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) \end{array} \right] . \nonumber \end{align} The following lemma presents an expression for Eq. (\ref{3_key_d}). \begin{lemma} \label{lem:RR} In the limit $t_0 \to -\infty$, Eq. (\ref{3_key_d}) can be calculated by \begin{align} & \langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\vert b_{\mathrm{out,R}}[i\omega_{1}]b_{\mathrm{out,R}}[i\omega _{2}]b_{\rm L}^{\ast }[i\nu _{1}]b_{\rm R}^{\ast}[i\nu _{2}]\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\rangle \nonumber \\ =& F_{\rm RR}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}), \label{RR} \end{align} where $F_{\rm RR}(\omega _{1},\omega _{2},\nu _{1},\nu _{2})$ is defined in Eq. (\ref{F_RR}). \end{lemma} The proof of Lemma \ref{lem:RR} is given in the APPENDIX. On the basis of Lemmas \ref{lem:LL}-\ref{lem:RR}, we are able to derive the main result of this paper. \begin{theorem}\label{thm:output_state} The steady-state output two-photon state in Eq. (\ref{dec8_Psi}) can be calculated by \begin{align} & \left\vert \Psi _{\mathrm{out}}\right\rangle \label{psi_out_ss} \\ =& \frac{1}{2}\int_{-\infty}^\infty d\omega _{1}\int_{-\infty}^\infty d\omega _{2}\ T_{\rm LL}[\omega _{1},\omega _{2}]b_{\rm L}^{\ast }[i\omega _{1}]b_{\rm L}^{\ast }[i\omega _{2}]\left\vert 0_{\rm L}0_{\rm R}\right\rangle \nonumber \\ & +\int_{-\infty}^\infty d\omega _{1}\int_{-\infty}^\infty d\omega _{2}\ T_{\rm LR}[\omega _{1},\omega _{2}]b_{\rm L}^{\ast }[i\omega _{1}]b_{\rm R}^{\ast }[i\omega _{2}]\left\vert 0_{\rm L}0_{\rm R}\right\rangle \nonumber \\ & +\frac{1}{2}\int_{-\infty}^\infty d\omega _{1}\int_{-\infty}^\infty d\omega _{2}\ T_{\rm RR}[\omega _{1},\omega _{2}]b_{\rm R}^{\ast }[i\omega _{1}]b_{\rm R}^{\ast }[i\omega _{2}]\left\vert 0_{\rm L}0_{\rm R}\right\rangle , \nonumber \end{align} where \begin{subequations} \begin{align} &T_{\rm LL}[\omega _{1},\omega _{2}] \label{T_LL2a} \\ =&\ \xi_{\rm L}[i\omega _{1}]\xi_{\rm R}[i\omega _{2}]\frac{2i\kappa (\omega _{1}+\omega _{c})}{(\omega _{1}+\omega _{c}-2i\kappa )(\omega _{2}+\omega _{c}-2i\kappa )} \nonumber \\ &+\xi_{\rm L}[i\omega _{2}]\xi_{\rm R}[i\omega _{1}]\frac{2i\kappa (\omega _{2}+\omega _{c})}{(\omega _{1}+\omega _{c}-2i\kappa )(\omega _{2}+\omega _{c}-2i\kappa )} \nonumber \\ &+2\sqrt{\kappa }\frac{\omega _{1}+\omega _{c}+2i\kappa }{\omega _{1}+\omega _{c}-2i\kappa }\int_{-\infty }^{\infty }d\nu _{1}\ \xi_{\rm L}[i\nu _{1}] \nonumber \\ &\times \xi_{\rm R}[i(\omega _{1}+\omega _{2}-\nu _{1})]g(\omega _{1},\omega _{2},\nu _{1},\omega _{1}+\omega _{2}-\nu _{1}), \nonumber \\ \nonumber \\ &T_{\rm LR}[\omega _{1},\omega _{2}] \label{T_LR2a} \\ =&\xi_{\rm L}[i\omega _{1}]\xi_{\rm R}[i\omega _{2}]\frac{(\omega _{1}+\omega _{c})(\omega _{2}+\omega _{c})}{(\omega _{1}+\omega _{c}-2i\kappa )(\omega _{2}+\omega _{c}-2i\kappa )} \nonumber \\ &-\xi_{\rm L}[i\omega _{2}]\xi_{\rm R}[i\omega _{1}]\frac{(2\kappa )^{2}}{(\omega _{1}+\omega _{c}-2i\kappa )(\omega _{2}+\omega _{c}-2i\kappa )} \nonumber \\ &+2\sqrt{\kappa }\frac{\omega _{1}+\omega _{c}+2i\kappa }{\omega _{1}+\omega _{c}-2i\kappa }\int_{-\infty }^{\infty }d\nu _{1}\xi_{\rm L}[i\nu _{1}] \nonumber \\ &\times \xi_{\rm R}[i(\omega _{1}+\omega _{2}-\nu _{1})]g(\omega _{1},\omega _{2},\nu _{1},\omega _{1}+\omega _{2}-\nu _{1}), \nonumber \\ {\rm and} \nonumber \\ &T_{\rm RR}[\omega _{1},\omega _{2}] \label{T_RR2a} \\ =&\xi_{\rm L}[i\omega _{1}]\xi_{\rm R}[i\omega _{2}]\frac{2i\kappa (\omega _{2}+\omega _{c})}{(\omega _{1}+\omega _{c}-2i\kappa )(\omega _{2}+\omega _{c}-2i\kappa )} \nonumber \\ &+\xi_{\rm L}[i\omega _{2}]\xi_{\rm R}[i\omega _{1}]\frac{2i\kappa (\omega _{1}+\omega _{c})}{(\omega _{2}+\omega _{c}-2i\kappa )(\omega _{1}+\omega _{c}-2i\kappa )} \nonumber \\ &+2\sqrt{\kappa }\frac{\omega _{1}+\omega _{c}+2i\kappa }{\omega _{1}+\omega _{c}-2i\kappa }\int_{-\infty }^{\infty }d\nu _{1}\xi_{\rm L}[i\nu _{1}] \nonumber \\ &\times \xi_{\rm R}[i(\omega _{1}+\omega _{2}-\nu _{1})]g(\omega _{1},\omega _{2},\nu _{1},\omega _{1}+\omega _{2}-\nu _{1}). \nonumber \end{align} \end{subequations} \end{theorem} {\it Proof.} Define \begin{subequations} \begin{eqnarray} T_{\rm LL}[\omega _{1},\omega _{2}] &\triangleq& \int_{-\infty }^{\infty }d\nu _{1}\int_{-\infty }^{\infty }d\nu _{2}\ \xi_{\rm L}[i\nu _{1}]\xi_{\rm R}[i\nu _{2}] \nonumber \\ && \times F_{\rm LL}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}), \label{T_LL2} \\ \nonumber \\ T_{\rm LR}[\omega _{1},\omega _{2}] &\triangleq& \int_{-\infty }^{\infty }d\nu _{1}\int_{-\infty }^{\infty }d\nu _{2}\ \xi_{\rm L}[i\nu _{1}]\xi_{\rm R}[i\nu _{2}] \nonumber \\ &&\times F_{\rm LR}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}), \label{T_LR2} \\ \nonumber \\ T_{\rm RR}[\omega _{1},\omega _{2}] &\triangleq& \int_{-\infty }^{\infty }d\nu _{1}\int_{-\infty }^{\infty }d\nu _{2}\ \xi_{\rm L}[i\nu _{1}]\xi_{\rm R}[i\nu _{2}] \nonumber \\ &&\times F_{\rm RR}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}). \label{T_RR2} \end{eqnarray} \end{subequations} By Lemmas \ref{lem:LL}-\ref{lem:RR} and the functions $T_{\rm LL},T_{\rm LR}, T_{\rm RR}$ defined above, it can be readily shown that the steady-state output field state in Eq. (\ref{dec8_Psi}) is in the form of Eq. (\ref{psi_out_ss}). Therefore, it suffices to show that $T_{\rm LL}$ defined in Eqs. (\ref{T_LL2}), $T_{\rm LR}$ in Eqs. (\ref{T_LR2}), and $T_{\rm RR}$ in Eqs. (\ref{T_RR2}) can be evaluated by means of Eqs. (\ref{T_LL2a})-(\ref{T_RR2a}) respectively. By Lemma \ref{lem:key}, $F_{\rm LL}(\omega _{1},\omega _{2},\nu _{1},\nu _{2})$ in Eq. (\ref{F_LL}) becomes \begin{eqnarray} &&F_{\rm LL}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) \nonumber \\ &=&\frac{2i\kappa (\nu _{1}+\omega _{c})}{(\nu _{2}+\omega _{c}-2i\kappa )(\nu _{1}+\omega _{c}-2i\kappa )} \nonumber \\ &&\times \left( \delta (\omega _{1}-\nu _{2})\delta (\omega _{2}-\nu _{1})+\delta (\omega _{1}-\nu _{1})\delta (\nu _{2}-\omega _{2})\right) \nonumber \\ &&+2\sqrt{\kappa }\frac{\omega _{1}+\omega _{c}+2i\kappa }{\omega _{1}+\omega _{c}-2i\kappa } \nonumber \\ &&\times g(\omega _{1},\omega _{2},\nu _{1},\nu _{2})\delta (\nu _{1}+\nu _{2}-\omega _{1}-\omega _{2}) . \label{aug18_3a} \end{eqnarray} Substituting Eq. (\ref{aug18_3a}) into Eq. (\ref{T_LL2}) yields Eq. (\ref{T_LL2a}). Similarly, by Lemma \ref{lem:key}, $F_{\rm LR}(\omega _{1},\omega _{2},\nu _{1},\nu _{2})$ in Eq. (\ref{F_LR}) becomes \begin{eqnarray} &&F_{\rm LR}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) \nonumber \\ &=&\frac{(\nu _{1}+\omega _{c})(\nu _{2}+\omega _{c})\delta (\omega _{1}-\nu _{1})\delta (\omega _{2}-\nu _{2})}{(\nu _{1}+\omega _{c}-2i\kappa )(\nu _{2}+\omega _{c}-2i\kappa )} \nonumber \\ &&-\frac{(2\kappa )^{2}\delta (\omega _{1}-\nu _{2})\delta (\nu _{1}-\omega _{2})}{(\nu _{1}+\omega _{c}-2i\kappa )(\nu _{2}+\omega _{c}-2i\kappa )} \nonumber \\ &&+2\sqrt{\kappa }\frac{\omega _{1}+\omega _{c}+2i\kappa }{\omega _{1}+\omega _{c}-2i\kappa } \nonumber \\ &&\times g(\omega _{1},\omega _{2},\nu _{1},\nu _{2})\delta (\nu _{1}+\nu _{2}-\omega _{1}-\omega _{2}). \label{aug18_3b} \end{eqnarray} Substituting Eq. (\ref{aug18_3b}) into Eq. (\ref{T_LR2}) yields Eq. (\ref{T_LR2a}). Finally, by Lemma \ref{lem:key}, $F_{\rm RR}(\omega _{1},\omega _{2},\nu _{1},\nu _{2})$ in Eq. (\ref{F_RR}) becomes \begin{eqnarray} &&F_{\rm RR}(\omega _{1},\omega _{2},\nu _{1},\nu _{2}) \nonumber \\ &=&\frac{2i\kappa (\nu _{2}+\omega _{c})}{(\nu _{1}+\omega _{c}-2i\kappa )(\nu _{2}+\omega _{c}-2i\kappa )} \nonumber \\ &&\times (\delta (\omega _{1}-\nu _{1})\delta (\omega _{2}-\nu _{2})+\delta (\omega _{1}-\nu _{2})\delta (\nu _{1}-\omega _{2})) \nonumber \\ &&+2\sqrt{\kappa }\frac{\omega _{1}+\omega _{c}+2i\kappa }{\omega _{1}+\omega _{c}-2i\kappa } \nonumber \\ &&\times g(\omega _{1},\omega _{2},\nu _{1},\nu _{2})\delta (\nu _{1}+\nu _{2}-\omega _{1}-\omega _{2}). \label{aug18_3c} \end{eqnarray} Substituting Eq. (\ref{aug18_3c}) into Eq. (\ref{T_RR2}) yields Eq. (\ref{T_RR2a}). \hfill $\blacksquare$ \begin{corollary} \label{cor:kappa} In the limit $\kappa \to \infty$, the steady-state output field state is \begin{align} & \left\vert \Psi _{\mathrm{out}}\right\rangle \label{psi_out_ss_2} \\ =& \int_{-\infty}^\infty d\omega_1 \xi_{\rm R}[i\omega _{1}]b_{\rm L}^{\ast }[i\omega _{1}]|0_{\rm L}\rangle \int_{-\infty}^\infty d\omega_2 \xi_{\rm L}[i\omega _{2}]b_{\rm R}^{\ast }[i\omega _{2}]|0_{\rm R}\rangle. \nonumber \end{align} That is, the left-going output channel contains a single-photon packet $\xi_{\rm R}$, and the right-going output channel contains a single-photon packet $\xi_{\rm L}$. On the other hand, in the limit $\kappa \to 0$, The steady-state output field state is \begin{align} & \left\vert \Psi _{\mathrm{out}}\right\rangle \label{psi_out_ss_3} \\ =& \int_{-\infty}^\infty d\omega_1 \xi_{\rm L}[i\omega _{1}]b_{\rm L}^{\ast }[i\omega _{1}]|0_{\rm L}\rangle \int_{-\infty}^\infty d\omega_2 \xi_{\rm R}[i\omega _{2}]b_{\rm R}^{\ast }[i\omega _{2}]|0_{\rm R}\rangle. \nonumber \end{align} That is, the left-going output channel contains a single-photon packet $\xi_{\rm L}$, and the right-going output channel contains a single-photon packet $\xi_{\rm R}$. \end{corollary} {\it Proof.} By Eqs. (\ref{T_LL2})-(\ref{T_RR2}), we have \begin{subequations} \begin{eqnarray} \lim_{\kappa \rightarrow \infty }T_{LL}[\omega _{1},\omega _{2}] &=& 0, \label{nov13_1a} \\ \lim_{\kappa \rightarrow \infty }T_{LR}[\omega _{1},\omega _{2}] &=& \xi_{\rm L}[i\omega _{2}]\xi _{\rm R}[i\omega _{1}] \label{sept10_3a}, \label{nov13_1b} \\ \lim_{\kappa \rightarrow \infty }T_{RR}[\omega _{1},\omega _{2}] &=& 0. \label{nov13_1c} \end{eqnarray} \end{subequations} Substituting Eqs. (\ref{nov13_1a})-(\ref{nov13_1c}) into Eq. (\ref{psi_out_ss}) yields Eq. (\ref{psi_out_ss_2}). Eq. (\ref{psi_out_ss_3}) can be established in a similar way. \hfill $\blacksquare$ \begin{remark} On one hand, when the coupling strength $\kappa$ is small, the interaction between the two-level systems and the input photons is weak. In the limit $\kappa \to 0$, the right- (left-) going photon will be in the left (right) output channel. This interprets in Eq. (\ref{psi_out_ss_3}). On the other hand, in the strong coupling limit $\kappa \to \infty$, each two-level system acts as a mirror so that each input photon is bounced back. This interprets Eq. (\ref{psi_out_ss_2}). \end{remark} The following result presents a special case of Theorem \ref{thm:output_state}. \begin{corollary} \label{cor:ss} When $\xi_{\rm L}= \xi_{\rm R}\equiv \xi $; i.e., the input photons have the same pulse shape, the steady-state output two-photon state in Eq. (\ref{psi_out_ss}) can be calculated by \begin{align} & \left\vert \Psi _{\mathrm{out}}\right\rangle \label{psi_out_ss_4} \\ =& \frac{1}{2}\int_{-\infty}^\infty d\omega _{1}\int_{-\infty}^\infty d\omega _{2}\ T_{\rm LL}[\omega _{1},\omega _{2}]b_{\rm L}^{\ast }[i\omega _{1}]b_{\rm L}^{\ast }[i\omega _{2}]\left\vert 00\right\rangle \nonumber \\ & +\int_{-\infty}^\infty d\omega_{1}\int_{-\infty}^\infty d\omega _{2}\ T_{\rm LR}[\omega _{1},\omega _{2}]b_{\rm L}^{\ast }[i\omega _{1}]b_{\rm R}^{\ast }[i\omega _{2}]\left\vert 00\right\rangle \nonumber \\ & +\frac{1}{2}\int_{-\infty}^\infty d\omega _{1}\int_{-\infty}^\infty d\omega _{2}\ T_{\rm RR}[\omega _{1},\omega _{2}]b_{\rm R}^{\ast }[i\omega _{1}]b_{\rm R}^{\ast }[i\omega _{2}]\left\vert 00\right\rangle , \nonumber \end{align} where \begin{subequations} \begin{eqnarray} &&T_{\rm LL}[\omega _{1},\omega _{2}] = T_{\rm RR}[\omega _{1},\omega _{2}] \label{Aug22_1} \\ &=&\xi \lbrack i\omega _{1}]\xi[i\omega _{2}]\frac{2i\kappa (\omega _{1}+\omega _{2}+\omega _{c})}{(\omega _{1}+\omega _{c}-2i\kappa )(\omega _{2}+\omega _{c}-2i\kappa )} \nonumber \\ &&+2\sqrt{\kappa }\frac{\omega _{1}+\omega _{c}+2i\kappa }{\omega _{1}+\omega _{c}-2i\kappa } \times \int_{-\infty }^{\infty }d\nu _{1}\ \xi \lbrack i\nu _{1}] \nonumber \\ &&\times \xi \lbrack i(\omega _{1}+\omega _{2}-\nu _{1})] g(\omega _{1},\omega _{2},\nu _{1},\omega _{1}+\omega _{2}-\nu _{1}), \nonumber \\ {\rm and} \nonumber \\ &&T_{\rm LR}[\omega _{1},\omega _{2}] \label{T_LR2b} \\ &=&\xi \lbrack i\omega _{1}]\xi \lbrack i\omega _{2}]\frac{(\omega _{1}+\omega _{c})(\omega _{2}+\omega _{c})-(2\kappa )^{2}}{(\omega _{1}+\omega _{c}-2i\kappa )(\omega _{2}+\omega _{c}-2i\kappa )} \nonumber \\ &&+2\sqrt{\kappa }\frac{\omega _{1}+\omega _{c}+2i\kappa }{\omega _{1}+\omega _{c}-2i\kappa }\int_{-\infty }^{\infty }d\nu _{1}\xi \lbrack i\nu _{1}] \nonumber \\ &&\times \xi \lbrack i(\omega _{1}+\omega _{2}-\nu _{1})]g(\omega _{1},\omega _{2},\nu _{1},\omega _{1}+\omega _{2}-\nu _{1}). \nonumber \end{eqnarray} \end{subequations} Moreover, \begin{equation} T_{\rm RR}^{\ast }[\omega _{1},\omega _{2}]T_{\rm RR}[\omega _{2},\omega _{1}]=\left\vert T_{\rm RR}[\omega _{1},\omega _{2}]\right\vert ^{2}. \label{nov4_1} \end{equation} \end{corollary} {\it Proof.} Eqs. (\ref{Aug22_1})-(\ref{T_LR2b}) are immediate consequences of Theorem \ref{thm:output_state} for $\xi_{\rm L}= \xi_{\rm R}\equiv \xi $. Eq. (\ref{nov4_1}) could be established via conventional, though tedious, calculations. \hfill $\blacksquare$ \begin{remark} It should be noted that Eq. (\ref{Aug22_1}) does not hold for general input wavepackets. \end{remark} \vspace{-2mm} \subsection{The probabilities}\label{subsec:prob} Let $P_{\rm LL}$ denote the probability of finding two photons in the left-going output channel $b_{\rm out,L}$, $P_{\rm RR}$ the probability of finding two photons in the right-going output channel $b_{\rm out,R}$, and $P_{\rm LR}$ the probability of finding one photon in each output channel, respectively. By Theorem \ref{thm:output_state}, we have \begin{eqnarray*} P_{\rm LL} & = &\frac{1}{4}\int_{-\infty}^\infty d\omega _{1}\int_{-\infty}^\infty d\omega _{2}\ \bigg( \left\vert T_{\rm LL}[\omega _{1},\omega _{2}]\right\vert ^{2} \\ && \ \ \ +T_{\rm LL}^{\ast }[\omega _{1},\omega _{2}]T_{\rm LL}[\omega _{2},\omega _{1}] \bigg) , \\ P_{\rm LR} & = &\int_{-\infty}^\infty d\omega _{1}\int_{-\infty}^\infty d\omega _{2}\ \left\vert T_{\rm LR}[\omega _{1},\omega _{2}]\right\vert ^{2}, \\ P_{\rm RR} & = &\frac{1}{4}\int_{-\infty}^\infty d\omega _{1}\int_{-\infty}^\infty d\omega _{2}\ \bigg( \left\vert T_{\rm RR}[\omega _{1},\omega _{2}]\right\vert ^{2} \\ \\ &&\ \ \ +T_{\rm RR}^{\ast }[\omega _{1},\omega _{2}]T_{\rm RR}[\omega _{2},\omega _{1}] \bigg) . \end{eqnarray*} In particular, when $\xi_{\rm L}\equiv \xi_{\rm R}$, by Corollary \ref{cor:ss}, we get \begin{eqnarray*} P_{\rm LL} =P_{\rm RR} &=& \frac{1}{2}\int_{-\infty}^\infty d\omega _{1}\int_{-\infty}^\infty d\omega _{2}\ \left\vert T_{\rm LL}[\omega _{1},\omega _{2}]\right\vert ^{2}, \\ P_{\rm LR} &=&\int_{-\infty}^\infty d\omega _{1}\int_{-\infty}^\infty d\omega _{2}\ \left\vert T_{\rm LR}[\omega _{1},\omega _{2}]\right\vert ^{2}. \end{eqnarray*} \vspace{-2mm} \section{The single-photon input case}\label{sec:1_photon} In this section, we show that the framework presented in the previous section can also be applied to the single-photon input case. Let us assume that the left-going input field $b_{\rm L}$ is still in the single-photon state $b_{\rm L}^{\ast }(\xi_{\rm L})\vert 0_{\rm L}\rangle$, but the right-going input field $b_{\rm R}$ is initialized in the vacuum state $|0_{\rm R}\rangle$. Then, the joint system-field state is \begin{equation*} \left\vert \Psi (t)\right\rangle =U(t, t_{0})b_{\rm L}^{\ast }(\xi _{1})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle . \label{aug28_infty_t} \end{equation*} In the steady-state case ($t_{0}\rightarrow -\infty ,t\rightarrow \infty $), the single photon is in the two output channels, leaving the two-level systems in their ground state. As a result, the steady-state output single-photon state is \begin{eqnarray} \left\vert \Psi _{\mathrm{out}}\right\rangle &=&\lim_{t_{0}\rightarrow -\infty ,t\rightarrow \infty }\left\langle g_{1}g_{2}|\Psi (t)\right\rangle \label{aug28_infty} \\ &=&\lim_{t_{0}\rightarrow -\infty ,t\rightarrow \infty }\left\langle g_{1}g_{2}\right\vert U(t, t_{0})b_{\rm L}^{\ast }(\xi_{\rm L})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle . \nonumber \end{eqnarray} Notice that by Eqs. (\ref{may22_B_LR0})-(\ref{may22_B_LR}), we have \begin{align} &\lim_{t_{0}\rightarrow -\infty ,t\rightarrow \infty }\left\langle g_{1}g_{2}\right\vert U(t, t_{0})b_{\rm L}^{\ast }(\xi_{\rm L})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \label{aug28__temp_1a} \\ =&\lim_{t_{0}\rightarrow -\infty ,t\rightarrow \infty }\int_{t_{0}}^{t}dt_{1}\xi_{\rm L}(t_{1})\left\langle g_{1}g_{2}\right\vert U(t, t_{0})b_{\rm L}^{\ast }(t_{1})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle . \nonumber \end{align} Moreover, similar to Eq. (\ref{temp_1b}), the inner product on the right hand-side of Eq. (\ref{aug28__temp_1a}) can be re-written as \begin{align} & \left\langle g_{1}g_{2}\right\vert U(t, t_{0})b_{\rm L}^{\ast }(t_{1})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \label{aug28__temp_1b} \\ =& \int_{-\infty}^\infty dp_{1}\left\vert 1_{{\rm L}p_{1}}\right\rangle \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{out,L}}(p_{1})b_{\rm L}^{\ast }(t_{1})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ & +\int_{-\infty}^\infty dp_{1} \left\vert 1_{{\rm R}p_{1}}\right\rangle \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{out,R}}(p_{1})b_{\rm L}^{\ast }(t_{1})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle , \nonumber \end{align} where the time-domain 1-photon basis for the input field \begin{equation*} \left\{ \int_{-\infty}^{\infty }dp_{1}\left\vert 1_{{\rm L}p_{1}}\right\rangle \left\langle 1_{{\rm L}p_{1}}\right\vert ,\ \int_{-\infty}^{\infty }dp_{1}\ \left\vert 1_{{\rm R}p_{1}}\right\rangle \left\langle 1_{{\rm R}p_{1}}\right\vert \right\} \label{aug28_dec6:basis} \end{equation*} has been used. Substituting Eqs. (\ref{aug28__temp_1a})-(\ref{aug28__temp_1b}) into Eq. (\ref{aug28_infty}) gives \begin{align} & \left\vert \Psi _{\mathrm{out}}\right\rangle \label{aug28_mar17_1} \\ =&\lim_{t_0\to-\infty} \int_{-\infty}^\infty dp_{1}\left\vert 1_{{\rm L}p_{1}}\right\rangle \int_{t_{0}}^{\infty}dt_{1} \xi_{\rm L}(t_{1}) \nonumber \\ & \ \ \ \times \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{ \mathrm{out,L}}(p_{1})b_{\rm L}^{\ast }(t_{1})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ & +\lim_{t_0\to-\infty} \int_{-\infty}^\infty dp_{1}\left\vert 1_{{\rm R}p_{1}}\right\rangle \int_{t_{0}}^{\infty }dt_{1}\ \xi_{\rm L}(t_{1}) \nonumber \\ & \ \ \ \times \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{ \mathrm{out,R}}(p_{1})b_{\rm L}^{\ast }(t_{1})\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle . \nonumber \end{align} As with the two-photon case, we go to the frequency domain by applying the Fourier transform to the time variables $t_{1}$ and $t_{2}$, respectively. In the frequency domain, Eq. (\ref{aug28_mar17_1}) becomes \begin{align} & \left\vert \Psi _{\mathrm{out}}\right\rangle \label{aug28_dec8_Psi} \\ =& \int_{-\infty}^\infty d\omega _{1}\ \left\vert 1_{L\omega _{1}}\right\rangle \int_{-\infty }^{\infty }d\nu _{1}\ \xi_{\rm L}[i\nu _{1}] \nonumber \\ & \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{out,L}}[i\omega _{1}]b_{\rm L}^{\ast }[i\nu _{1}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ & + \int_{-\infty}^\infty d\omega _{1}\ \left\vert 1_{R\omega _{1}}\right\rangle \int_{-\infty }^{\infty }d\nu _{1}\ \xi_{\rm L}[i\nu _{1}] \nonumber \\ & \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{out,R}}[i\omega _{1}]b_{\rm L}^{\ast }[i\nu _{1}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle . \nonumber \end{align} Therefore, we have to calculate the following quantities: \begin{subequations} \begin{align} & \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{out,L}}[i\omega _{1}]b_{\rm L}^{\ast }[i\nu _{1}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle , \label{aug28_3_key_a} \\ & \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{out,R}}[i\omega _{1}]b_{\rm L}^{\ast }[i\nu _{1}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle . \label{aug28_3_key_d} \end{align} \end{subequations} First, we consider Eq. (\ref{aug28_3_key_a}). By Eqs. (\ref{sys_f}) and (\ref{dec20_10}) we have \begin{align} & \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{out},L}(p_{1})b_{\rm L}^{\ast }[i\nu _{1}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ =& \frac{1}{\sqrt{2\pi }}\int_{-\infty }^{\infty }d\omega _{1}\ e^{i\omega _{1}p_{1}}\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \nonumber \\ & \ \ \ \times \Theta_{\rm L}[i\omega _{1}]b_{\mathrm{in}}[i\omega _{1}]b_{\rm L}^{\ast }[i\nu _{1}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle . \label{aug28_eq:jun4_2} \end{align} Using (\ref{sept9_1}), in the limit $t_{0}\rightarrow -\infty $, Eq. (\ref{aug28_eq:jun4_2}) can be simplified to be \begin{align} &\frac{1}{\sqrt{2\pi }} \int_{-\infty}^\infty d\omega _{1}\ e^{i\omega _{1}p_{1}}\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \nonumber \\ &\times (\Theta _{1}[i\omega _{1}]b_{\rm L}[i\omega _{1}]+\Theta _{2}[i\omega _{1}]b_{\rm R}[i\omega _{1}])b_{\rm L}^{\ast }[i\nu _{1}] \left\vert0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ =& \frac{1}{\sqrt{2\pi }}\Theta _{1}[i\nu _{1}]e^{i\nu _{1}p_{1}}. \label{aug28_july1_2} \end{align} By Eqs. (\ref{aug28_eq:jun4_2})-(\ref{aug28_july1_2}), we have \begin{eqnarray} &&\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{out,L}}[i\omega _{1}]b_{\rm L}^{\ast }[i\nu _{1}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ &=&\Theta _{1}[i\nu _{1}]\delta (\omega _{1}-\nu _{1}). \label{single_LL} \end{eqnarray} Next, we consider Eq. (\ref{aug28_3_key_d}). By Eqs. (\ref{sys_f}) and (\ref{dec20_10}) we have \begin{align} & \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{out},R}(p_{1})b_{\rm L}^{\ast }[i\nu _{1}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ =& \frac{1}{\sqrt{2\pi }}\int_{-\infty }^{\infty }d\omega _{1}\ e^{i\omega _{1}p_{1}} \left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \nonumber \\ & \ \ \ \times \Theta_{\rm R}[i\omega _{1}]b_{\mathrm{in}}[i\omega _{1}]b_{\rm L}^{\ast }[i\nu _{1}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle. \label{aug28_july14_5} \end{align} Using (\ref{sept9_1}), in the limit $t_{0}\rightarrow -\infty $, Eq. (\ref{aug28_july14_5}) can be simplified to be \begin{align} & \frac{1}{\sqrt{2\pi }} \int_{-\infty}^\infty d\omega _{1}\ e^{i\omega _{1}p_{1}}\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert \nonumber \\ & \times (\Theta _{2}[i\omega _{1}]b_{\rm L}[i\omega _{1}]+\Theta _{1}[i\omega _{1}]b_{\rm R}[i\omega _{1}])b_{\rm L}^{\ast }[i\nu _{1}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ & =\frac{1}{\sqrt{2\pi }}\Theta _{2}[i\nu _{1}]e^{i\nu _{1}p_{1}}. \label{nov9_temp1} \end{align} By Eqs. (\ref{aug28_july14_5}) and (\ref{nov9_temp1}), we obtain \begin{eqnarray} &&\left\langle 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\vert b_{\mathrm{out,R}}[i\omega _{1}]b_{\rm L}^{\ast }[i\nu _{1}]\left\vert 0_{\rm L}0_{\rm R}g_{1}g_{2}\right\rangle \nonumber \\ &=&\frac{1}{\sqrt{2\pi }}\int_{-\infty }^{\infty }dp_{1}\ e^{-i\omega _{1}p_{1}}\frac{1}{\sqrt{2\pi }}\Theta _{2}[i\nu _{1}]e^{i\nu _{1}p_{1}} \nonumber \\ &=&\Theta _{2}[i\nu _{1}]\delta (\omega _{1}-\nu _{1}). \label{single_LR} \end{eqnarray} Substituting Eqs. (\ref{single_LL}) and (\ref{single_LR}) into Eq. (\ref{aug28_dec8_Psi}) yields the steady-state output two-photon state, which is \begin{align} & \left\vert \Psi _{\mathrm{out}}\right\rangle \nonumber \\ =& \int_{-\infty}^\infty d\omega \ \xi_{\rm L}[i\omega ]\bigg( \frac{\omega +\omega _{c}}{ \omega +\omega _{c}-2i\kappa }b_{\rm L}^{\ast }[i\omega ] \nonumber \\ & \ \ \ +\frac{2i\kappa }{ \omega +\omega _{c}-2i\kappa }b_{\rm R}^{\ast }[i\omega ]\bigg) \vert0_{\rm L}0_{\rm R}\rangle \nonumber \\ =& \int_{-\infty}^\infty d\omega \ \left( G[i\omega ]\left[ \begin{array}{c} \xi_{\rm L}[i\omega ] \\ 0 \end{array} \right] \right) ^{T}b_{\mathrm{in}}^{\#}[i\omega ] \vert0_{\rm L}0_{\rm R}\rangle. \label{aug28_Psi_out} \end{align} Denote \begin{eqnarray*} \eta_{\rm L}[i\omega ] \frac{\omega +\omega _{c}}{\omega +\omega _{c}-2i\kappa }\xi_{\rm L}[i\omega ], \ \ \eta_{\rm R}[i\omega ] =\frac{2i\kappa }{\omega +\omega _{c}-2i\kappa }\xi_{\rm L}[i\omega ]. \label{eta_R} \end{eqnarray*} Substituting them into Eq. (\ref{aug28_Psi_out}) yields \begin{equation} \left\vert \Psi _{\mathrm{out}}\right\rangle = \int_{-\infty}^\infty d\omega \ \left( \eta _{L}[i\omega ]b_{\rm L}^{\ast }[i\omega ]+\eta_{\rm R}[i\omega ]b_{\rm R}^{\ast }[i\omega ]\right) \vert0_{\rm L}0_{\rm R}\rangle. \label{sept10_1} \end{equation} Clearly, \begin{subequations} \begin{align} \lim_{\kappa \rightarrow \infty }\left\vert \Psi _{\mathrm{out}}\right\rangle =& |0_{\rm L}\rangle \otimes \int_{-\infty}^\infty d\omega \ (-\xi_{\rm L}[i\omega ])b_{\rm R}^{\ast }[i\omega ]|0_{\rm R}\rangle, \label{sept10_2b} \\ \lim_{\kappa \rightarrow 0 }\left\vert \Psi _{\mathrm{out}}\right\rangle =& \int_{-\infty}^\infty d\omega \ \xi_{\rm L}[i\omega ]b_{\rm L}^{\ast }[i\omega ]|0_{\rm L}\rangle \otimes |0_{\rm R}\rangle. \label{jan_91} \end{align} \end{subequations} which are consistent with Eqs. (\ref{psi_out_ss_2})-(\ref{psi_out_ss_3}). \begin{remark} According to Eq. (\ref{aug28_Psi_out}), the pulse shape of the photon in the output channels is obtained by linearly transforming that of the input photon by $G[i\omega]$. This looks like a linear dynamics. Indeed, as shown in \cite{YJ14, PZJ15}, the interaction between a two-level system and a single photon can be fully analyzed in a transfer function approach. Unfortunately, as the coherent feedback network studied in this paper is only marginally stable, the linear transfer function approach in \cite{YJ14, PZJ15} is not applicable. However, as shown above, the general framework presented Section \ref{sec:main_result} indeed works. \end{remark} \begin{remark}\label{rem:single} It is worthwhile to notice that Eq. (\ref{sept10_2b}) is consistent with \cite[Fig. 3]{ZGB10} for single-photon Fock-state scattering. That is, for strong coupling, a two-level atom appears as a mirror so that the input single photon is reflected. This is true even with the existence of a nonzero detuning $\omega_c$. \end{remark} \vspace{-2mm} \section{Simulations} \label{sec:example} In this section, we use an example to illustrate the results presented in this paper. Let the two single-photon states be those in Example \ref{ex:photon}. In particular, they have the same pulse shape as given in Eq. (\ref{jan14_xi}). \begin{figure}[!htb] \centering \begin{minipage}{7.0cm} \centering \includegraphics[width=1.0\textwidth]{2photon_a} \end{minipage} \begin{minipage}{7.5cm} \centering \includegraphics[width=1.0\textwidth]{2photon_b} \end{minipage} \caption{\label{fig_jan27} $|T_{\rm LR}(\omega_1,\omega_2)|^2$ with parameters $\gamma=1, \omega_o=0,\kappa=1.5$, $\omega_c=0$ (for the upper subfigure), and $\omega_c=3$ (for the lower subfigure)} \end{figure} By Corollary \ref{cor:kappa}, we know that $\lim_{\kappa \to \infty} P_{\rm LL}=P_{\rm RR} = 0$; in other words, in the limit $\kappa\to\infty$, there is one photon in each output channel. However, detuning changes this picture dramatically as demonstrated by Fig. \ref{fig_jan27}. Actually, if we let the detuning $\omega_c=2\kappa$, then our simulations show that $P_{\rm LR}$ decreases monotonically as $\kappa$ increases, with the limit $\lim_{\kappa \to \infty} P_{\rm LR} = 0$, or equivalently, $\lim_{\kappa \to \infty} P_{\rm LL}=P_{\rm RR} = 1/2$. That is, the two photons simultaneously leave the network from either the left- or right-going channel. This is the famous Hong-Ou-Mandel (HOM) interference phenomenon. Therefore, detuning plays a significant role in the dynamics of this coherent feedback network. The problem of two-photon, two-qubit scattering was studied in \cite{Neumeier13, ZB13, Laasko14}. As the distance between the two qubits is taken into account, the system exhibits {\it non-Markovian effects}. The detunings of the two qubits are opposite, $\Omega_1=-\Omega_2=\Omega>0$, while the coupling strengths are assumed to be identical which are denoted by $\Gamma$ in \cite{Laasko14}. The numerical studies carried out in \cite{Laasko14} show that, when $\Omega = 2\Gamma$, the probability of finding one photon in each channel is smaller, compared to other ratios, \cite[Fig. 3]{Laasko14}. In our study, the effect of distance between two qubits on the dynamics is ignored, i.e., essentially we study a Markovian system. Moreover, the detunings are assumed to be identical. But same scalings between detuning and coupling strength give rise to similar result in two different settings. \vspace{-2mm} \section{Conclusion} \label{sec:conclusion} In this paper we have studied a coherent feedback network which consists of two qubits and is driven by two photons, one in each input channel. The explicit expression of the steady-state output two-photon state has been derived. It has also been shown that the proposed framework is applicable to the single-photon case. Numerical simulations have been given for illustration. The feedback network studied in this paper may serve as a first step toward practical photon-based quantum information processing on-chip. \vspace{-3mm}
1,108,101,565,840
arxiv
\section{Introduction} \label{sec:intro} Visual Transformers (ViT)~\cite{dosovitskiy2020image} have been an ubiquitous backbone for visual representation learning, leading to many advances in image understanding~\cite{scalingVIT,pyramidVIT,segmenter}, multimodal tasks \cite{filip,akbari2021vatt,merlotreserve,flamingo} and self-supervised learning \cite{beit,tong2022videomae,feichtenhofer2022masked}, etc. However, adaptations to video are both challenging and computationally intensive, so video versions have been been specially designed to handle the larger number of frames, for example, ViViT~\cite{arnab2021vivit}, MultiView~\cite{yan2022multiview}, TimeSFormer \cite{bertasius_arxiv_2021} and others~\cite{fan2021multiscale}. Video understanding is an essential computer vision task, and a large number of successful video architectures have been developed~\cite{Carreira_2017_CVPR,tran_iccv_2015,xie_s3d_eccv_2018,wang2018nonlocal,ryoo2020assemblenet++,feichtenhofer_cvpr_2020,feichtenhofer_iccv_2019,liu2021video}. Previous video 3D CNNs \cite{Carreira_2017_CVPR,tran_iccv_2015} were designed to handle videos by learning spatio-temporal information; they often borrow from mechanisms for learning on images, for example \cite{Carreira_2017_CVPR} use pre-trained image CNN weights by inflating the kernels to 3D. However, once adapted to videos, these kernels are no longer applicable to images. Furthermore, most previous works treat image and video as entirely different inputs, providing independent methods for either videos or images, since designing a model capable of handling both is challenging. At the same time, image and video inputs are inherently related and a single visual backbone should be able to handle either or both inputs. Previous methods for co-training image and video \cite{zhang2021co,likhosherstov2021polyvit,bain2021frozen,omnivl} adapt the architectures to do so with significant portions of the network designed for each input. Works such as Perceiver \cite{jaegle2021perceiver} and Flamingo \cite{flamingo} address this by resampling the input and compressing it into a fixed number of features. However, this resampling can still be expensive for long videos, and, in the case of Flamingo, it treats videos as individual frames sampled at 1 FPS, which limits the temporal information. Such low FPS sampling and per-frame modeling would often be insufficient for datasets which rely on motion and temporal understanding, e.g., SomethingSomething \cite{somethingsomething}, or for recognizing quick and short actions. On the other hand, using one of the above-mentioned approaches with dense frames is computationally infeasible. \begin{figure} \centering \includegraphics[width=1.0\linewidth]{figures/teaser-tubes-drawing.pdf} \caption{TubeViT: With Sparse Video Tubes, Vision Transformers (ViTs) use both image and video inputs, providing an efficient video backbone and more accurate performance.} \label{fig:teaser} \end{figure} To address these limitations, we propose a simple but effective model, named TubeViT, to utilize a standard ViT model seamlessly for both image and videos. We introduce Sparse Video Tubes, a lightweight approach for joint image and video learning. Our method works by sparsely sampling various sized 3D space-time tubes from the video to generate learnable tokens, which are used by the vision transformer (Figure~\ref{fig:teaser}). With sparse video tubes, the model is easily applicable to either input, and can better leverage either or both sources of data for training and fine-tuning. The sparse video tubes naturally handle raw video signals and image signals which is crucial to understanding actions and other spatio-temporal information in videos. Video models are also expensive to train, and previous works have studied ways to leverage already trained models, such as using frozen ones \cite{lin2022frozen} or adapting them to videos \cite{ni2022expanding}. We expand on these ideas, and use the Sparse Video Tubes to adapt much larger ViT models to videos with lightweight training (Sec. \ref{sec:scaling}). Thus we create powerful large video models with less resources. We evaluate the approach across many standard video datasets: Kinetics-400, Kinetics-600, Kinetics-700, and SomethingSomething V2, outperforming the state-of-the-art (SOTA). Our methods are trained from scratch or on ImageNet-1k and Kinetics datasets and outperform even methods additionally pre-trained from very large datasets (e.g., JFT~\cite{jft}). Our work also outperforms models targeting video pretraining, such as recent video Masked Auto-Encoder (MAE) works~\cite{tong2022videomae,feichtenhofer2022masked}. Our key findings are that by using the sparse video tubes, we are able to better share the weights learned for both images and videos. This is in contrast to prior works that either inflate kernels or add new temporal-specific layers. Further, due to the sparse sampling, the number of tokens remains low, which we also find is important, both for reducing FLOPs and improving performance. \textbf{Our contribution} is construction of sparse video tubes, obtained by sparsely sampling videos with various sized 3D space-time tubes. With that we accomplish the following: (1) a universal visual backbone which easily adapts a ViT architecture to videos; (2) joint image and video understanding which seamlessly uses either input; (3) an easy-to-scale approach for video understanding, which can also leverage already trained (large) ViT models. \section{Related work} Video understanding is an important topic in computer vision. Early works hand-designed trajectory features to understand motion and time \cite{wang2013action}. With the success of neural networks, many different approaches have been developed, such as two-stream CNNs taking image frames plus optical flow for motion information as input \cite{simonyan_neurips_2014}, finding a clear benefit from adding the flow information. Works studying 3D CNNs found the learning of temporal kernels to be important \cite{tran_iccv_2015,Carreira_2017_CVPR,tgm,tran_cvpr_2018}, but also required much more data in order to be effective \cite{Carreira_2017_CVPR}. Many of the existing video CNN approaches, have been specialized to handle videos, either with flow streams or 3D kernels and thus have not been applicable to images. With the introduction of transformer models and self-attention \cite{vaswani2017attention}, vision transformers have been very effective for image-based tasks. However, due to the quadratic cost of self-attention and the dense sampling, their use for videos has required different elements, such as space-time factorized attention \cite{bertasius_arxiv_2021, arnab2021vivit,yan2022multiview}. However, these video transformers have not really been tested on longer videos and are mostly evaluated on short clips. The ability to handle larger number of input frames and understand long-term actions and their relationships is of key importance, but becomes computationally prohibitive with current models. \begin{figure} \centering \includegraphics{figures/tubes-drawing-new.pdf} \caption{Illustration of the approach. We use tubes of different shapes to sparsely sample the video. These are concatenated together and used as input to a transformer model.} \label{fig:sparse_tubes} \end{figure} Previous works have found that transformers focus on only a few tokens \cite{naseer2021intriguing,rao2021dynamicvit} and works have been designed to pool or reorganized tokens effectively \cite{liang2022notpatches,ryoo2021tokenlearner_neurips,marin2021token}. Many video works have found that frames contain redundant information, and thus propose strategies to sample frames \cite{gowda2021smart,wu2019adaframe}. Other works have studied ways to reduce the number of tokens in video transformer models \cite{wang2022efficient,sparsesampling,ryoo2021tokenlearner_neurips}. However, all these works still use an initial dense sampling of the video, then some heuristics to reduce the number of inputs. In this work, we more sparsely sample the input initially, increasing efficiency. Other recent works have studied video MAE tasks as pretraining \cite{feichtenhofer2022masked,tong2022videomae}, they similarly treat videos as tubes, and study the sparseness in terms of the masking, having similar findings that sparseness is beneficial. However, they use a single tube shape and create non-overlapping patches and have not been studied when joint training with images. This work is also related to approaches which use multiple views or streams from the input data, e.g., Multi-View Transformers \cite{yan2022multiview}, SlowFast Networks \cite{feichtenhofer_iccv_2019} and others \cite{piergiovanni2022cotok,simonyan_neurips_2014}, all have found benefits from multiple input views or streams. MultiView Transformers \cite{yan2022multiview}, similarly to us, is using tubes of varying shapes. The key difference is the sparse sampling we use enables the use of a single ViT encoder model, rather than multiple smaller, per-view encoders. This further unifies the approach with images. Another line of work in video understanding is leveraging image datasets during pre-training~\cite{duan2020omni,UniDual}. This is valuable as image-only datasets are better annotated and provide richer semantic information. One approach is to bootstrap the video models from image-pretrained models, often by inflating kernels. The model is first pre-trained on image data, and then only trained on video. Other works proposed to co-train image and video jointly \cite{UniDual,zhang2021co,likhosherstov2021polyvit,bain2021frozen,omnivl,jaegle2021perceiver}. These approaches adapt the architectures to handle both inputs which might be inefficient, e.g., treating an image input as a video of 1 frames~\cite{zhang2021co} or using separate networks to first encode the inputs\cite{jaegle2021perceiver,flamingo}. In contrast to all the previous works, our method is simple and straightforward. One crucial set of differences is that the tubes are sparsely applied to the raw input, consists of different shaped, possibly overlapping tubes, and uses a single, shared backbone network, different from all previous approaches (\cite{feichtenhofer_iccv_2019,yan2022multiview,sparsesampling,ryoo2021tokenlearner_neurips,arnab2021vivit,feichtenhofer2022masked,tong2022videomae}). This leads to both more efficient and accurate models. Secondly, and more importantly, the model is entirely shared between the image and video modalities. This is an important distinction as it not only improves performance for both tasks, but is also more generally applicable to vision tasks. \section{Method} \subsection{Preliminaries} The standard ViT architecture \cite{dosovitskiy2020image} takes an image and converts it into patch embedding, for example, by using a $16\times 16$ 2D convolutional kernel, with a $16\times 16$ stride. This results in a sequence of patches as the image representation, e.g., 196 for a $224\times 224$ input image. Given a video $V\in \mathcal{R}^{T\times H\times W\times C}$, prior approaches either used the same, dense 2D patches (e.g., TimeSFormer \cite{bertasius_arxiv_2021}) or used dense 3D kernels, e.g., 2 or $4\times 16\times 16$ as in ViViT \cite{arnab2021vivit}. In both cases, this results in significantly more tokens, e.g., $T*196$, where $T$ is the number of frames. These tubes or patches are then linearly projected into an embedding space, $z_i\in \mathcal{R}^d$. This sequence of tokens is then processed by a transformer encoder, using standard components, MSA - the multi-head self attention and MLP - the standard transformer projection layer (LN denotes Layer Norm). For a sequence of layers $l\in [0, 1, \ldots L]$, we compute the representation $y_i^l$ and next token features $z_i^l$ for all the $z_i$ tokens: \begin{equation} y_i^l = \text{MSA}(\text{LN}(z_i^{l-1})) + z_i^{l-1} \end{equation} \begin{equation} z_i^l = \text{MLP}(\text{LN}(y_i^l)) + y_i^l \end{equation} To reduce the computational cost, prior approaches factorize the attention mechanism, to have a spatial and temporal attention \cite{arnab2021vivit} or use multiple views with smaller, view level transformers \cite{yan2022multiview}. \subsection{Sparse Video Tubes} We propose a simple and straightforward method which is seamlessly applicable to both images and videos. Our approach follows the standard ViT tokenization approach for images: a 2D convolution with a $16\times 16$ kernel. We build on the observation that sparseness is effective for videos. Rather than following the prior works that densely tokenize the video, we instead use the same 2D kernel, but with a large temporal stride, for example, applied to every 16th frame. Thus for an input video clip of $32\times 224\times 224$, this results in only 392 tokens, rather than the ~6k in TimeSFormer or 1-2k in ViViT. However, this sparse spatial sampling might lose information, especially for quick or short actions. Thus, we create sparse tubes of different shapes, for example, a $16\times 4\times 4$ tube to obtain information from many frames at low spatial resolution. These tubes can have any shape, and we experimentally explore the effect of these. Importantly, these tubes also have large strides, sparsely sampling the video in different views. We also optionally add an offset to the start location, so that the patches do not always start at $(0,0,0)$ and this allows a reduction in the overlap between the tubes. This is illustrated in Figure \ref{fig:sparse_tubes}. Tubes of various sizes are also used in the MultiView approach for video classification ~\cite{yan2022multiview}, however there they are densely sampled and processed by multiple transformers, resulting in a more computationally intensive approach. Furthermore, in contrast to prior works, we also allow for overlap between the tubes. Specifically, we can represent a tube as $(T\times H\times W)$ for the kernel shape, $(T_s, H_s, W_s)$ for the spatio-temporal stride applied to the kernel, and $(x, y, z)$ as the offset of the starting point of the convolution. With the proposed design, our approach enables seamless fusion of the image- and video- visual information. The sparse spatial sampling allows sharing the image and frame tokens and the sparse video tubes create a low number of video-specific tokens. This enables better sharing of the ViT model between images and videos. \subsection{Positional embedding for sparse video tubes} A key aspect of our approach is the implementation of the positional embedding. In language models, relative positional embeddings are a common and effective approach \cite{vaswani2017attention, what}. However, here, the relative position between two tokens has minimal meaning, and no real reference to where the patch/tube came from in the original video or image. The ViT model \cite{dosovitskiy2020image} and similarly TimeSFormer \cite{bertasius_arxiv_2021} and ViViT \cite{arnab2021vivit} used learnable positional embeddings for the patches. Here, such an approach can be hard for the model, as these learned embeddings do not necessarily reflect where the patches came from in the original video, especially in the case where patches overlap. Instead, we use a fixed sine/cosine embedding. Importantly, we take into account the stride, kernel shape and offsets of each tube when applying the positional embeddings. This ensures that the positional embedding of each patch and tube has the global spatio-temporal location of that tube. Specifically, we compute the embeddings as follows. Here $\tau$ is a constant hyperparameter (we used 10,000). For $j$ from 0 to $d//6$ ($d$ is the number of features), and for $t,x,y$ from 0 to $T, H, W$, $z_i\in \mathcal{R}^{T\times H\times W\times D}$: \begin{align} \omega_j &= 1 / (\tau^{j}) \\ p_{j,t} &= \sin (t*\omega_j), \cos (t*\omega_j) \\ p_{j,x} &= \sin (x*\omega_j), \cos (x*\omega_j) \\ p_{j,y} &= \sin (y*\omega_j), \cos (y*\omega_j) \\ z_{i}[t,x,y&,6j:6(j+1)] \mathrel{{+}{=}} [p_{j,t}, p_{j,x}, p_{j,y}] \end{align} This adds each spatio-temporal position embedding to the feature dimension of the token $z_i$. Following previous work \cite{vaswani2017attention}, this is done for different wavelengths for each channel. $d//6$ is used since we have 6 elements (a sine and cosine value for each $x,y,t$), this creates a position value for each channel of the representation. Importantly, here $z_{i}[t,x,y]$ represents the center of the tube, taking into account any strides and offsets used in the tube construction (the channel dimension is not shown here). After the tokenization step, we concatenate all the tokens together and apply a standard transformer model. This simple structure lets the model share the majority of the weights between all inputs, which we find to be quite beneficial. \subsection{Sparse Tube Construction} We explore several methods to create the visual tubes. Our core approach consist of 2 tubes: the $1\times 16\times 16\times d$ tube used to tokenize the image and a $8\times 8\times 8\times d$ tube additionally used for the video. Both have strides of $16\times 16\times 16$. This base tokenizer provides strong performance, but we explore several variations on it. \textbf{Multi-Tube}. We add multiple tubes to the core approach of various sizes. For example, we can add temporally long and spatially small tubes, such as $16\times 4\times 4$ to learn long actions, or more spatially focused tubes such as a $2\times 16\times 16$ tube. There are many variations of tube shape and stride, which we experimentally explore. \begin{figure} \centering \includegraphics[width=0.99\linewidth]{figures/teaser-tubes-drawing-scaling_2.pdf} \caption{Scaling of TubeViT models: building large scale video models is expensive. We propose to expand model capacity for video models leveraging large pre-trained ViTs. With TubeViT we can easily train on both image and video data a small-scale model. Then we can adapt the sparse video tubes to a much larger image-only trained ViT, which can be mostly frozen.} \label{fig:teaser2} \end{figure} \textbf{Space-to-Depth} Another way to extend the core approach is a method inspired by depth-to-space \cite{depth2space}. Here, we reduce the number of channels in a tube, e.g., by a factor of 2. Thus the tube shape becomes $T\times H\times W\times d/2$. Next, we concatenate 2 tokens along the channel axis. We can then also reduce the stride of the tube. This results in the same number of tokens and dimensions as the original, but effectively increases the kernel size without changing the number of parameters. I.e., when the stride is reduced on the time axis, the token now represents $T*2\times H\times W$ locations, but only uses $T*H*W$ parameters. In the experiments, we explore different settings: e.g., more temporal dense vs more spatially dense and the depth to space factor (2, 4, 8, etc.). \textbf{Interpolated Kernels}. For this setting, rather than having a unique kernel for each tube, we learn 1 3D kernel of shape $8\times 8\times 8$. We then use tri-linear interpolation to reshape the kernel to various sizes, e.g., 4x16x16 or 32x4x4, etc. depending on the tube configuration. Any sized kernel can be created from this single kernel. This method has several advantages. (1) It reduces the number of learned parameters that are only used on the video stream. (2) It enables more flexible usage of the kernels, e.g., it can be made longer to handle longer videos, or spatially larger to find small objects. The TubeViT approach consists of the union of the above-mentioned Multi-Tube and Space-to-Depth, the exact settings are provided in the supplemental materials. We experiment with Interpolated Kernels in ablations. \subsection{Image and Video Joint Training} As described above, our approach seamlessly adapts to either image, video or both inputs. While image+video joint inputs are rare, the ability to use them together while training is very important as many datasets with valuable annotations (e.g., ImageNet, Kinetics) come from either image sources or video sources but not both. Jointly training with our approach is easy -- the image is tokenized by the 2D kernel and the video is tokenized by both the 2D patches (with large temporal stride) and Sparse Tubes. Both are then passed into a standard ViT; the position embedding will be supplied in either case. The position embedding approach is also needed for the joint training to be effective. We demonstrate the benefits of our approach for joint training in the experiments, Section~\ref{sec:experiments}. \subsection{Image-To-Video Scaling Up of Models} \label{sec:scaling} We also propose a method for a more efficient way of scaling up the models (Figure~\ref{fig:teaser2}). Training large ViT models is computationally expensive, especially for videos. Since nearly all the components of our model are shared between the both images and videos, we explore a method to utilize large models without having heavy fine-tuning. First, we train a smaller model jointly on images and videos. This gives us a set of weights for the tubes. Then we take a large pre-trained image ViT, but further add the tubes. These tubes use the same kernel weights as the smaller model, and so we can avoid further training them. Since larger ViTs generally use more channel dimensions than smaller ones, we use the space-to-depth transform again here to create tokens with the proper channel dimensions without needing new weights. Next, we pick a point in the network and freeze all the layers before it, for example, the 26th of 32 layers in ViT-H. At this point, we add a gated connection to the network: \begin{equation} \label{eq:gate_scale} z^{s} = \text{MLP}(\text{LN}(y^{s})) + y^{s} + \tanh(\alpha)z^0 \end{equation} where $s$ is the layer the network is frozen at (e.g., 26) of the ViT model and $z^0$ is the raw input tokens from the tubes. $\alpha$ is the learned gating parameter, initialized at 0. In the first steps of training, this gate has no effect on the representation, and thus the ViT is unchanged. However, it can learn to incorporate the raw tubes at this point and further refine the later weights. \begin{table} \centering \setlength{\tabcolsep}{3pt} % \renewcommand*{\arraystretch}{1.10} % \vspace{-0.3\baselineskip} \scriptsize{ \begin{tabular}{lccccc} \toprule Method & PT Data & Top 1 & Top 5 & Crops & TFLOPs \\ \midrule TSM-ResNeXt-101~\cite{lin_tsm_cvpr_2019} & ImageNet-1k & 76.3 & -- & -- & -- \\% I3D NL~\cite{wang2018nonlocal} & ImageNet-1k & 77.7 & 93.3 & $10 \times 3$ & 10.77 \\ VidTR-L~\cite{zhang2021vidtr} & ImageNet-1k & 79.1 & 93.9 & $10 \times 3$ & 10.53 \\ LGD-3D R101~\cite{qiu2019learning} & ImageNet-lk & 79.4 & 94.4 & -- & -- \\ SlowFast R101-NL~\cite{feichtenhofer_iccv_2019} & - & 79.8 & 93.9 & $10 \times 3$ & 7.02 \\ % X3D-XXL~\cite{feichtenhofer_cvpr_2020} & - & 80.4 & 94.6 & $10 \times 3$ & 5.82 \\ OmniSource~\cite{duan2020omni} & ImageNet-1k & 80.5 & 94.4 & -- & -- \\ TimeSformer-L~\cite{bertasius_arxiv_2021} & ImageNet-21k & 80.7 & 94.7 & $1 \times 3$ & 7.14 \\ MFormer-HR~\cite{patrick2021keeping} & ImageNet-21k & 81.1 & 95.2 & $10 \times 3$ & 28.76 \\ MViT-B~\cite{fan2021multiscale} & - & 81.2 & 95.1 & $3 \times 3$ & 4.10 \\ MoViNet-A6~\cite{kondratyuk2021movinets} & - & 81.5 & \textbf{95.3} & $1 \times 1$ & 0.39 \\ % ViViT-L FE~\cite{arnab2021vivit} & ImageNet-1k & 81.7 & 93.8 & $1 \times 3$ & 11.94 \\ % MTV-B \cite{yan2022multiview} & ImageNet-21K & 82.4 & 95.2 & $4 \times 3$ & 11.16 \\ % VideoMAE \cite{tong2022videomae} & - & 87.4 & 97.6 & - & -\\ \midrule \multicolumn{4}{l}{\textit{Large Scale Pretraining Data}} \\ VATT-L~\cite{akbari2021vatt} & HowTo100M & 82.1 & 95.5 & $4 \times 3$ & 29.80 \\ % ip-CSN-152~\cite{tran_iccv_2019} & IG-65M & 82.5 & 95.3 & $10 \times 3$ & 3.27 \\ R3D-RS~\cite{du2021revisiting} & WTS & 83.5 & -- & $10 \times 3$ & 9.21 \\ OmniSource~\cite{duan2020omni} & IG-65M & 83.6 & 96.0 & -- & -- \\ MAE-ST \cite{feichtenhofer2022masked} & IG-1M & 84.4 & - & - & - \\ ViViT-H~\cite{arnab2021vivit} & JFT & 84.9 & 95.8 & $4 \times 3$ & 47.77 \\ % TokenLearner-L/10~\cite{ryoo2021tokenlearner_neurips} & JFT & 85.4 & 96.3 & $4 \times 3$ & 48.91 \\ % Florence~\cite{yuan2021florence} & FLD-900M & 86.5 & 97.3 & $4 \times 3$ & -- \\ CoVeR ~\cite{zhang2021co} & JFT-3B & 87.2 & -- & $1 \times 3$ & -- \\ CoCa \cite{coca} & ALIGN (1.8B) & 88.9 & - & - & - \\ MTV-H \cite{yan2022multiview} & WTS 280p & 89.9 & 98.3 & $4 \times 3$ & 73.57 \\ % \midrule TubeVit-B & ImageNet-1k & 88.6 & 97.6 & $4\times 3$ & 0.87 \\ TubeVit-L & ImageNet-1k & \textbf{90.2} & \textbf{98.6} & $4\times 3$ & 9.53 \\ \midrule TubeViT-H (created) & ImageNet-1k & \textbf{90.9} & \textbf{98.9} & $4\times 3$ & 17.64 \\ \bottomrule \end{tabular} } \caption{Performance on Kinetics 400. TubeViT performs best. We report the crops and total TFLOPs used for inference. The crops, $t \times x$ denotes $t$ temporal and $x$ spatial crops.} \label{tab:sota_kinetics400} \end{table} \begin{table} \centering \setlength{\tabcolsep}{4pt} % \vspace{-0.3\baselineskip} \begin{tabular}{lcc} \toprule Method & Top 1 & Top 5 \\ % \midrule % SlowFast R101-NL~\cite{feichtenhofer_iccv_2019} & 81.8 & 95.1 \\ % X3D-XL~\cite{feichtenhofer_cvpr_2020} & 81.9 & 95.5 \\ % TimeSformer-L~\cite{bertasius_arxiv_2021} & 82.2 & 95.6 \\ % MFormer-HR~\cite{patrick2021keeping} & 82.7 & 96.1 \\ ViViT-L FE~\cite{arnab2021vivit} & 82.9 & 94.6 \\ % MViT-B~\cite{fan2021multiscale} & 83.8 & 96.3 \\ % % MoViNet-A6~\cite{kondratyuk2021movinets} & 84.8 & 96.5 \\ \midrule R3D-RS~\cite{du2021revisiting} (WTS) & 84.3 & -- \\ ViViT-H~\cite{arnab2021vivit} (JFT) & 85.8 & 96.5 \\ % TokenLearner-L/10~\cite{ryoo2021tokenlearner_neurips} (JFT) & 86.3 & 97.0 \\ % Florence~\cite{yuan2021florence} (FLD-900M) & 87.8 & 97.8 \\ CoVeR~\cite{zhang2021co} (JFT-3B) & 87.9 & -- \\ MTV-H \cite{yan2022multiview} (WTS 280p) & 90.3 & 98.5 \\ % CoCa \cite{coca} (ALIGN 1.8B) & 89.4 & - \\ Merlot-Reserve-L \cite{merlotreserve} (YT-1B) & 91.1 & 97.1 \\ \midrule TubeVit-B (ImageNet-1k) & 90.9 & 97.3 \\ TubeVit-L (ImageNet-1k) & \textbf{91.5} & \textbf{98.7} \\ \midrule` TubeVit-H (created) & \textbf{91.8} & \textbf{98.9} \\ \bottomrule \end{tabular} \caption{Performance on Kinetics 600. Similarly, to Table~\ref{tab:sota_kinetics400} our model uses the ImageNet-1k dataset. Most models use significantly larger pre-training datasets (bottom half). Tube-ViT outperforms prior work.} \label{tab:sota_kinetics600} \end{table} \begin{table} \setlength{\tabcolsep}{4pt} % \centering \vspace{-0.3\baselineskip} \setlength{\tabcolsep}{6pt} % \scriptsize{ \begin{tabular}{lcc} \toprule & Top 1 & Top 5 \\ \midrule VidTR-L~\cite{zhang2021vidtr} & 70.2 & -- \\ SlowFast R101~\cite{feichtenhofer_iccv_2019} & 71.0 & 89.6 \\ MoViNet-A6~\cite{kondratyuk2021movinets} & 72.3 & -- \\ \midrule CoVeR (JFT-3B)~\cite{zhang2021co} & 79.8 & -- \\ CoCa (Align 1.8B) \cite{coca} & 82.7 & - \\ MTV-H (WTS 280p) \cite{yan2022multiview} & 83.4 & 96.2 \\ % \midrule TubeViT-L & \textbf{83.8} & \textbf{96.6} \\ \bottomrule \end{tabular} \caption{Performance compared to SOTA on Kinetics 700.} \label{tab:sota_kinetics700} } \end{table} \begin{table} \setlength{\tabcolsep}{4pt} % \centering \vspace{-0.3\baselineskip} \setlength{\tabcolsep}{6pt} % \scriptsize{ \begin{tabular}{lcc} \toprule & Top 1 & Top 5 \\ \midrule SlowFast R50~\cite{feichtenhofer_iccv_2019} & 61.7 & -- \\ TimeSformer-L~\cite{bertasius_arxiv_2021} & 62.5 \\ VidTR-L~\cite{zhang2021vidtr} & 63.0 & -- \\ CoVeR~\cite{zhang2021co} & 64.7 & -- \\ MoViNet-A3~\cite{kondratyuk2021movinets} & 64.1 &88.8 \\ ViViT-L FE~\cite{arnab2021vivit} & 65.9 & 89.9 \\ % VoV3D-L~\cite{lee2020diverse} & 67.3 & 90.5 \\ MFormer-L~\cite{patrick2021keeping} & 68.1 & 91.2 \\ MTV-B (320p) \cite{yan2022multiview} & 68.5 & 90.4 \\ % MViT-B~\cite{fan2021multiscale} & 68.7 & 91.5 \\ MViT \cite{li2022mvitv2} & 73.3 & 94.1 \\ MaskFeat \cite{wei2021maskedfeature} & 75.0 & 95.0 \\ VideoMAE \cite{tong2022videomae} & 75.4 & 95.2 \\ \midrule TubeViT-L & \textbf{76.1} & \textbf{95.2} \\ \bottomrule \end{tabular} \caption{Performance on Something-SomethingV2 dataset.} \label{tab:something} } \end{table} \section{Experiments} \label{sec:experiments} We evaluate the approach on several popular datasets: Kinetics 400, Kinetics 600, Kinetics 700 \cite{kay_arxiv_2017,carreira2019short}, and SomethingSomething V2 \cite{somethingsomething}. These datasets cover a wide variety of video understanding challenges and are well established in the literature. The main results are trained jointly on ImageNet-1k (of 1.2M images) and the video data, please see the supplemental materials for full details. We use standard Top 1 and Top 5 evaluation metrics and report FLOPs of ours and previous works, when available. Our model sizes are \textbf{90M Base (B)}, \textbf{311M Large (L)}. A \textbf{635M Huge (H)} is `created' with Image-to-Video scaling. \subsection{Main results} For the main results, we use 4 tubes with the following configuration (order of $t, h, w$): (1) $8\times 8\times 8$ with a stride of $(16, 32, 32)$; (2) $16\times 4\times 4$ with a stride of $6\times 32\times 32$ and an offset of $(4, 8, 8)$; (3) $4\times 12\times 12$ with a stride of $(16, 32, 32)$ and an offset of $(0, 16, 16)$; and (4) $1\times 16\times 16$ with a stride of $(32, 16, 16)$. For an input of $32\times 224\times 224$, this results in only 559 tokens, significantly less than other approaches. In the supplemental material, we have detailed experiments over many tube configurations, as well as the space-to-depth settings used. We would like to note that with data augmentation such as random spatial and temporal cropping, over multiple training epochs the model will see different parts of the video, even with sparse sampling. \textbf{Comparison to SOTA.} First, we compare our final approach to previous state-of-the-art (SOTA) methods. Tables~\ref{tab:sota_kinetics400},~\ref{tab:sota_kinetics600} and \ref{tab:sota_kinetics700} shows the performance of our model compared to the state-of-the-art on the Kinetics-400 Kinetics-600 and Kinetics-700 datasets. Table~\ref{tab:sota_kinetics400} shows additional information (e.g. views, pre-training datasets) which applies to the other tables as well. These results show our approach outperforms SOTA, both in terms of accuracy and efficiency. We also outperform methods on co-training of images and videos, and methods with strong video pre-training. We note that all the sizes of our model perform well, despite the fact that others are much larger or use significantly larger pre-training data (e.g., CoCa with 1B params and 1.8B examples, MerlotReserve has 644M params and uses YT-1B dataset). Table~\ref{tab:something} shows our results on the Something-Something dataset (SSv2). This dataset is often used to evaluate more dynamic activities. Our approach outperforms SOTA on it as well. \textbf{Joint image+video training.} We further explore the effects of co-training on image+video datasets, finding this to be highly effective as also shown above. Table~\ref{tab:im_kin} evaluates this in a side-by-side experiment of using Kinetics (video) only vs Kinetics and ImageNet datasets for pre-training. We see that there is a large gain from the co-training of our approach. We see that two-stage training, i.e., first training on one dataset and then training on a second one, is also weaker than the joint training, as the two datasets cannot interact during training. We also compare to prior methods such as TimeSFormer \cite{bertasius_arxiv_2021} only using dense 2D patches, or using inflated 3D kernels (e.g., ViViT \cite{arnab2021vivit}). In both cases, we see a clear benefit from the proposed approach. We also note that these prior approaches have significantly more FLOPs, due to the large number of tokens from the dense sampling. Our observations that image and video co-training is beneficial are consistent with prior works \cite{zhang2021co,likhosherstov2021polyvit}; here the difference is that we have a single compact model to do that. As a sanity check, we also compare our performance on ImageNet-1k, without any hyperparameter tuning or additions: our ViT-B model only trained on ImageNet has 78.1 accuracy, similar to the ViT-B in \cite{steiner2021augreg}. When joint training with Kinetics-600, the model gets 81.4, a gain of 3.4\%, showing the benefits of joint training for image-only tasks too. While other works achieve higher performance on ImageNet, they often use specialized data augmentation, learning schedules, and other tricks which we are not using. Instead, we are purely studying the benefit from using both videos and images. \begin{table}[] \centering \begin{tabular}{l|c} & Kinetics 600 \\ \midrule TubeViT-L Kinetics-only & 85.6 \\ TubeViT-L ImageNet then Kinetics & 90.4 \\ TubeViT-L ImageNet+Kinetics Jointly & \textbf{91.5} \\ \midrule 2D Patches only ImageNet+Kinetics & 87.6 \\ Inflated 3D Patches ImageNet then Kinetics & 88.4 \\ \midrule \end{tabular} \caption{Combining datasets, which TubeViT seamlessly allows, is highly effective, as seen here in these side-by-side results for the Kinetics-600 dataset. The results are based on the ViT-L model.} \label{tab:im_kin} \end{table} \textbf{Scaling video training with sparse video tubes.} In Table \ref{tab:new_scaling} we demonstrate how a small TubeViT model can be adapted leveraging a large and (often independently) pre-trained model on images only. We start by leveraging a large, image-pretrained ViT, here ViT-H. We then take the learned tubes from TubeViT-B and use them along with the ViT-H image tokenizer to generate a set of tokens from a video, same as before. Then these are used as input to ViT-H, and we finetune only the latter parts of the model on the video data. These results suggests that this is an effective way to scale and utilize giant ViT models without needing the high compute cost to fully finetune the model. We also see that the gating in Eq. \ref{eq:gate_scale} is effective. We also found that in this setting, training time was reduced by 43\%, as it has fewer weights to update. \begin{table}[] \centering \begin{tabular}{l|c} Models & K600, Accuracy (\%) \\ \midrule TubeViT-H Full Finetune & 91.8 \\ \midrule \multicolumn{2}{l}{Scaling method with different portions trained}\\ Last FC Layer & 85.6 \\ + Last 4 Layers & 86.3 \\ + Last 8 Layers & 86.8 \\ + Last 8 + Gated (Eq. \ref{eq:gate_scale}) & 89.7 \\ \bottomrule \end{tabular} \caption{Image-to-Video Scaling. We take a ImageNet pre-trained ViT-H and use a set of Tubes from TubeViT-B to create the tokens. We then fine-tune different portions of the model to see how we can best take advantage of existing, large pretrained ViT models. Even pretraining of handful of layers can achieve performance approaching the full model training.} \label{tab:new_scaling} \end{table} \textbf{Detrimental Effects of Too Many Tokens.} Next we study the effect of number of tokens used in the model, shown in Figure \ref{fig:num_tokens}. This result is another key insight as to why our approach works so well: with too many tokens, the performance drops, especially when only using Kinetics data. There are a number of possible reasons for why this occurs, for example, the self-attention mechanism could be struggling to learn for longer sequences, or there may not be sufficient data to learn the longer sequences, or perhaps the model is overfitting with longer sequences. This result indicates that for current datasets, the sparse sampling is an effective and efficient way to process videos. Further, it is possible that existing using long, densely sampled sequences are effected by this, and perhaps another reason the factorized attention modules are needed. \begin{figure} \centering \includegraphics[width=\linewidth]{figures/num_tokens.pdf} \caption{Accuracy vs. Number of tokens used in our model. We find that when increasing the tokens above 1500, there is a noticeable drop in performance, especially when only training on Kinetics-600 data. Joint training is more robust.} \label{fig:num_tokens} \end{figure} \subsection{Ablations} \label{sec:ablations} In this section, we present a number of ablation studies to determine why this method is effective. For these experiments we use Kinetics 600. \textbf{Main ablations.} First, we study the effect of the choice of position biases (Table \ref{tab:ablation:pos_emb}). We find that adding fixed cosine position embedding performs best and much better than other embeddings. Intuitively, this makes sense, since we are sparsely sampling potentially overlapping tokens, this method is able to best capture the token location. Next in Table \ref{tab:ablation:num_tubes}, we study the number of tubes used. This finding, which is consistent with previous multi-view observations \cite{yan2022multiview}, shows that having a variety of tubes is beneficial to video understanding. Next, in Table \ref{tab:ablation:d2s}, we study the depth-to-space versions of the network. Here, we reduce the channels of the generated tokens from $D//S$, e.g., by a factor of 2 or 4. Then after generating the tokens, we concatenate them along the channel axis. We study both increasing the number of tokens along the spatial and temporal dimensions. We find this to be an effective method, as it enables more dense samples without increasing the number of parameters or tokens. Table \ref{tab:ablation:evaltokens} compares evaluating with more patches than the model was trained with. To do this we reduce the strides of the kernel. Initially this improves results, but after increasing 2x, the performance begins to drop, likely because the evaluation data is too different from the training one. In Table \ref{tab:ablation:interpolate}, we study the ability of the interpolated single kernel. I.e., rather than having $N$ 3D convolutional kernels, one for each tube, we build 1 $8\times 8\times 8$ 3D kernel and use interpolation to generate the different tube shapes. Somewhat surprisingly, we find this works fairly well, while also reducing the number of learnable parameters in the network. In Table \ref{tab:ablation:eval_views}, we compare the approach with different number of temporal and spatial crops. We find that even a single crop gives strong performance, and the standard $4\times 3$ performs nearly the same as the $10\times 10$ setting, suggesting that the sparse samples are quite suitable and further information is not as beneficial. \begin{table*}[] \centering \begingroup \captionsetup[subfloat]{width=.25\linewidth,captionskip=5pt} \subfloat[\textbf{Position Embeddings}. Fixed, cosine embeddings with strides is best. \label{tab:ablation:pos_emb}]{\tablestyle{2pt}{1.05} \begin{tabular}{l|c} & K600 \\ \midrule None & 78.6 \\ Learned & 79.2 \\ Relative & 77.5 \\ Fixed Cosine (no stride) & 77.7 \\ Fixed Cosine (Ours) & 84.5 \\ \bottomrule \end{tabular}}\hspace{15mm} \endgroup \begingroup \captionsetup[subfloat]{width=.15\linewidth,captionskip=5pt} \subfloat[\textbf{Number of Tubes}. \label{tab:ablation:num_tubes}]{\tablestyle{2pt}{1.05}\fontsize{9}{10.2}\selectfont \begin{tabular}{l|c|c} & GF & K600 \\ \midrule 1 & 70 & 78.4 \\ 2 & 71 & 81.5 \\ 4 & 72 & 83.4 \\ 8 & 74 & 85.4 \\ \bottomrule \end{tabular}}\hspace{1.8cm} \endgroup \begingroup \captionsetup[subfloat]{width=.25\linewidth,captionskip=3pt} \subfloat[\textbf{Space To Depth}. Applying space-to-depth temporally (T), spatially (S), and spatio-temporally (ST). \label{tab:ablation:d2s}]{\tablestyle{2pt}{1.05} \begin{tabular}{l|c|c} & GF & K600 \\ \midrule Baseline & 72 & 83.4 \\ With D2S x2 T & 72 & 84.7 \\ With D2S x2 S & 72 & 84.5 \\ With D2S x4 T & 72 & 85.1 \\ With D2S x4 S & 72 & 85.4 \\ With D2S x4 ST & 72 & 85.3 \\ \bottomrule \end{tabular}}\vspace{-3mm}\\ \hspace{1cm} \endgroup \begingroup \captionsetup[subfloat]{width=.2\linewidth,captionskip=3pt} \subfloat[\textbf{Eval Tokens}. Generating larger number of tokens at eval time than in training, where 559 are used. \label{tab:ablation:evaltokens}]{\tablestyle{2pt}{1.05}\fontsize{8}{9.2}\selectfont \hspace{6mm} \begin{tabular}{l|c} & K600 \\ \midrule Base (559) & 84.5 \\ 768 & 84.9 \\ 1024 & 84.6 \\ 1536 & 83.5 \\ \bottomrule \end{tabular}}\hspace{2.5cm} \endgroup \begingroup \captionsetup[subfloat]{width=.18\linewidth,captionskip=3pt} \subfloat[\textbf{Interpolated Kernel}. Using a single 3D kernel interpolated to different sizes. \label{tab:ablation:interpolate}]{\tablestyle{2pt}{1.05} \begin{tabular}{l|c} & K600 \\ \midrule Interpolated & 83.8 \\ TubeViT & 84.5 \\ \bottomrule \end{tabular}}\hspace{2cm} \endgroup \begingroup \captionsetup[subfloat]{width=.2\linewidth,captionskip=3pt} \subfloat[\textbf{Multi-Crop Evaluation}. $4\times 3$ is used in the paper. \label{tab:ablation:eval_views}]{\tablestyle{2pt}{1.05}\hspace{10mm} \begin{tabular}{l|c} & K600 \\ \midrule $1\times 1$ & 82.8 \\ $4\times 1$ & 83.3 \\ $1\times 3$ & 83.6 \\ $4\times 3$ & 84.5 \\ $10\times 10$ & 84.7 \\ \bottomrule \end{tabular}} \endgroup \caption{Ablation studies on various components of our approach on Kinetics-600, using TubeViT-B.} \label{tab:ablations} \end{table*} \textbf{Factorized attention ablations.} In Table \ref{tab:added_layers}, we further study the effect of adding a new attention layer to an ImageNet pre-trained ViT model. Here, we are using the tube method to tokenize the inputs, but instead of using a factorized attention module, we simply add an additional self-attention layer. This has a similar effect of the factorized attention approaches that add new, uninitialized $K,Q,V$ projections to a pre-trained ViT (e.g., TimeSFormer and ViViT). These results indicate that such methods are not able to best utilize the image pre-trained weights of the network due to these new layers. Since the sparse tubes yield few additional tokens, they can directly use the same ViT model without factorized attention and are thus able to better utilize the image trained weights. Note that there are still differences between the works, e.g., the reduced number of tokens, etc. However, we believe this observation holds, and is a possible explanation for why the spatio-temporal attention in ViVit performed better for some datasets. \begin{table}[] \centering \vspace{-0.1cm} \begin{tabular}{l|c} Layers Added & K600 \\ \midrule 0 & 84.23 \\ \midrule 1 & 80.23 \\ 2 & 78.87 \\ 4 & 75.24 \\ 8 & 72.95 \\ \bottomrule \end{tabular} \caption{We find that adding even a single layer to a pretrained image network degrades performance. This suggests that the factorized attention methods are sub-optimal since they cannot fully take advantage of the image-pre-trained networks. Trained for 70k steps.} \label{tab:added_layers} \end{table} \textbf{Model scaling ablations.} Table~\ref{tab:pt_scaling} provides ablations on scaling to create TubeViT Base from a Tiny one. Even just training the final few layers is effective (4 of 12), and can nearly match the performance of full finetuning. This is consistent with our observations in Table~\ref{tab:new_scaling} for ViT-H. \begin{table}[] \centering \begin{tabular}{l|c} Trained & K600 \\ \midrule Last FC Layer & 79.6 \\ + 1 Layer & 80.8 \\ + 4 Layers & 81.1 \\ Whole Model & 81.4 \\ \bottomrule \end{tabular} \caption{Image-to-Video scaling from Tiny to Base. We take a ImageNet pre-trained ViT-Base and the TubeViT corresponding to ViT-Tiny and ImageNet pre-trained ViT-Base to create a larger TubeViT. These models were trained for 50k steps.} \label{tab:pt_scaling} \end{table} Figure~\ref{fig:learned_tube_vis} visualizes the learned 2D patches and 3D tubes. \begin{figure} \centering \includegraphics[width=\linewidth]{figures/selected-tubes.pdf} \caption{Visualization of a selected set of 2D patches and tubes.} \label{fig:learned_tube_vis} \end{figure} \section{Conclusion} We proposed sparse video tubes for video recognition. With sparse video tubes, a ViT encoder can be transformed into an efficient video model. The approach is simple, enables seamless joint training with images and videos and improves video recognition across multiple datasets. We also demonstrate an elegant scaling of video models with our proposed method. We conduct extensive ablation experiments to determine why the approach works, finding the a combination of the joint training, reduced tokens, and better utilization of shared image+video weights led to the improvements. We obtain SOTA or above performance. \clearpage \newpage
1,108,101,565,841
arxiv
\section{Background} \subsection{Molecular Crystals} Molecular crystals, ordered periodic arrays of molecules, are known to exhibit a wide range of quantum mechanical phenomena, including unconventional superconductivity, quantum criticality, frustrated anti-ferromagnetism, and quantum spin liquid behaviour \cite{chaikin98, powell11, jacko13dmit, dressel07, seo04, brown15, jacko13tmttf, seo15}. In some cases many of these phases can be found in a single material by tuning an external parameter, such as pressure or magnetic field. Often, one can control which phase is expressed by making subtle physical or chemical changes to the molecules \cite{seo04, dressel07, jacko13tmttf, seo15,brown15}. Along with the flexibility of the interactions within individual molecules, molecular crystals also have a range of intermolecular interactions. It is the subtle competition between the many intra- and inter-molecular interaction energies that brings the wide variety of phases seen in experiments so close together. These crystals tend to have a low effective dimension, and this likely contributes to the close competition between the phases \cite{seo04, dressel07,brown15,coldea10}. Molecular crystals are an exciting testing ground for finding and understanding new emergent states of matter. They often display competition between multiple emergent strongly correlated ground states \cite{dressel07,powell11}. This, and the flexibility of organic chemistry, means that the emergent physics is often tuneable by subtle chemical and physical modifications (making slight variations around a core motif) \cite{seo04, dressel07, jacko13tmttf, seo15,brown15}. In one notable case, a superconducting state in an organic molecular crystal is destroyed by substituting some hydrogen atoms for deuterium, its heavy isotope \cite{taniguchi99}. That this extremely subtle change can have such profound consequences is both exciting and intimidating. On one hand, it presents the inviting prospect of creating strongly correlated materials with technologically desireable properties; on the other, the level of detail required to correctly predict the phase of a material can be substatial. Fig. \ref{fig:fabrephase} shows the phase diagram for a family of organic crystals called Fabre salts [salts of TMTTF (shown in Fig. \ref{fig:molvscrystal}) and an anion], which can be tuned through many different phases by applying physical pressure, or by slightly changing their anions, often thought of as a ‘chemical pressure’. As the size of the anion decreases, the TMTTF molecules pack closer together, as they would under the application of physical pressure. This range of accessible phases indicates that the many competing interactions are very close in energy. The many competing energy scales in molecular crystals gives us access to phases with various interesting physical properties. \begin{figure} \begin{center} \includegraphics[width=0.96\columnwidth]{./fig1_fabrephasediagram.png} \caption{Temperature-pressure phase diagram for the Fabre (TMTTF) and Bechgaard (TMTSF) charge transfer salts \cite{jerome91,jacko13tmttf}, highlighting the many accessible strongly-correlated phases. The phases shown are antiferromagnetic (AF), charge ordered (CO), Mott insulating (MI), spin Peierls (SP), spin density wave (SDW), superconducting (SC) and one dimensional (1D), 2D, \& 3D metals. The ambient pressure position for each salt is indicated with an arrow above the diagram. }\label{fig:fabrephase} \end{center} \end{figure} \subsection{Predictive versus postdictive modeling} There is an important philosophical point to be made here. The goal of science should be to make predictions about the nature of nature, and then to test those predictions. In the case of building models of materials, what is typically applied is a \textit{postdictive} approach; one `knows' that to have a model with the observed behaviour, it should be \textit{this} lattice with \textit{that} type of interaction (e.g. Heisenberg model on a triangular lattice, extended Hubbard model on a square lattice, and so on). Thus, there is limited information gained \textbf{about the system}, whether one finds the behaviour one was searching for in such a model or not. One one hand, one chose the model phenomenologically, so finding the correct phenomenology is not profound. On the other, \textbf{not} finding the expected behaviour could be due to any number of reasons from the profound to the trite. Postdictive approaches can be useful, but one must go beyond them to gain a deeper understanding of the important commonalities and differences within a class of materials. For example, such an approach does not show much promise for describing all of the multitude of phases seen in the Fabre salts. What one ideally would like is a systematic way of constructing an effective many-body Hamiltonian from first principles. Constructing the non-interacting part from first principles is currently emminantly possible: By producing localised `Wannier' orbitals (discussed in more detail later), one can use the results of density functional theory (DFT) to construct a tight-binding lattice without first assuming its form. Here we give a brief overview of the theoretical development of the concept and practical details of using Wannier orbitals. For a much more complete and mathematical account, turn to the excellent review of Marzari \textit{et al.} \cite{marzari12}. On a related note, it is worth commenting on the distinction between `\textit{ab initio}' and `first principles'; \textit{ab initio} implies no empirical input, just calculations on the grounds of the many-electron Schr{\"o}dinger equation using the fundamental constants of nature such as Planck's constant, the charge of the electron, etc. On the other hand, first principles allows for empirical parameters. Both density functional theory and Wannier orbital construction are in principle \textit{ab initio}, however particular implementations tend to include empirical parameterisations (specific density functionals, for example) that are properly considered first principles rather than \textit{ab initio}. \subsection{Development of Wannier Orbitals} In 1937 Gregory Wannier introduced the idea of constructing localised sets of wavefunctions by fourier transforming Bloch states \cite{wannier37}. For a Bloch wavefunction for band $n$, $\psi_n(\mathbf{k},\mathbf{r})$, the corresponding Wannier function for band $n$ is \begin{equation}\label{eq:wannier} \Phi_{n,\mathbf{R}}(\mathbf{r}) = \int_{FBZ} d^3 \mathbf{k} e^{-i\mathbf{k}.\mathbf{R}}\psi_n(\mathbf{k},\mathbf{r}), \end{equation} where $\mathbf{R}$ is any combination of the crystal lattice vectors with integer prefactors, $\Phi_{\mathbf{R}}(\mathbf{r})$ is localised in the unit cell located at $\mathbf{R}$, and the integral runs over the first Brillouin zone (FBZ). These new wavefunctions have the advantages of atomic orbitals (such as locality) while enforcing orthogonality. This allows one to treat localised excitations of individual electrons in metallic materials on the same footing as the `bulk' electrons (the delocalised Bloch states). By the 1950's these wavefunctions were widely known as Wannier functions, and of great use in understanding the physics of excitations in crystals. In 1953, George Koster introduced two new methods for defining Wannier functions without first having to solve the Schr\"{o}dinger equation, and allowing one to use these orbitals to compute energy bands in crystals \cite{koster53}. Walter Kohn put Wannier functions on a rigourous analytical grounding in 1959, showing that one can always find a unique, real, symmetry-preserving, and exponentially localised functions for a given single band \cite{kohn59}. In this work he showed (although not in so many words) that Wannier orbitals were the ideal basis for the recently-developed tight-binding method (closely related to the H\"uckel method used in chemistry) \cite{huckel31,slater54}. (Kohn continued working on Wannier orbitals, and in the mid-90's used the locality of Wannier functions, and the consequence that their interactions should decay exponentially, to propose a density functional theory method that scales linearly with the number of atoms \cite{kohn93, kohn95}.) Jacques Des Cloizeaux further expanded the mathematical grounding of Wannier functions, and identified what would later become known as the disentangling problem: if bands overlap, it is difficult to construct Wannier functions for just one of those bands (requiring one to `disentangle' the target band from the other bands it crosses) \cite{descloizeaux64}. This remains a general challenge in using Wannier functions to this day \cite{marzari12}. Due to the practical difficulty of the disentangling problem, and the extra indeterminancy introduced in the disentangling proceedure, Wanner functions were not of signficant help in computational electronic structure theory until the 90's, when approaches based on density functional theory were introduced \cite{marzari97}. \subsection{Wannier orbitals in Density Functional Theory}\label{sec:wodft} The key breakthrough in the application of Wannier orbitals occured when Nicola Marzari and David Vanderbilt formulated a generalised approach for generating maximally localised Wannier functions for the case of multiple bands \cite{marzari97}. Not only that, they also described a numerical algorithm to produce such orbitals based on Bloch functions sampled on a mesh of points in $k$-space, such as would be the output of a typical DFT code. This allowed for computations of Wannier orbitals in realistic, non-trivial cases. They also suggested the approach of using Wannier functions to construct effective model Hamiltonians for strongly correlated electron systems. A few years later, Marzari and Vanderbilt, along with Ivo Souza, extended their original approach to allow for entangled bands. Together they introduced an efficient disentangling methodology \cite{souza01} that requires no additional information over a usual Wannier construction, just one additional assumption. That assumption is that the `character' of the Wannier orbitals (the contributions from particular basis functions) should vary as smoothly and slowly as possible; this is enforced via minimising the change in character across the Brillouin zone. These works laid the foundation for the wide-spread computation of Wannier orbitals in DFT codes. In 2008, the code \texttt{wannier90} was released \cite{mostofi08}. Developed by Arash Mostofi, Jonathan Yates, Young-Su Lee, along with Souza, Vanderbilt and Marzari, this code is now widely used, designed to interface with any DFT code to produce Wannier orbitals. It is now used in Wannier orbital construction in FPLO \cite{koepernik99}, WIEN2k \cite{kunes10}, Quantum ESPRESSO \cite{giannozzi09}, ABINIT \cite{gonze09}, and Fleur \cite{freimuth08}, to list just a few of the more popular DFT codes for crystals. \section{Separation of energy scales in Molecular Crystals} Here we discuss the separation of energy scales that commonly occurs in molecular crystals and how this aids the construction of a minimal set of Wannier orbitals. Molecular crystals tend to have a separation of energy scales in their non-interacting states, while there is competition amongst many possible strongly correlated ground states in the full many-body treatment. Despite the advances made in disentangling procedures, it remains a highly challenging task to produce high quality, reliable Wannier orbitals from entangled bands. This is particularly important if one is concerned about capturing the fine features of the electronic structure, which can have significant effects on the many-body state, as I will discuss explicitly in the case of crystals based on {Pd(dmit)$_2$\xspace}. Molecular crystals provide an exciting playground for applying Wannier orbital based techniques to their greatest potential. because one can bypass the difficulty and ambiguity of disentangling procedures, one can determine the significance of the fine features of the electronic structure in determining the rich phase diagrams of these materials. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{./fig2_Mol_vs_crystal.png} \caption{Discrete energy levels of a single molecule (left) (here, TMTTF \textit{sans} hydrogens), and continuum band states of the resulting molecular crystal (right). The interatomic interactions within the molecule set the energy scale for the molecular orbitals, while the inter-molecular coupling determines the width of the bands. In typical cases these energy scale are quite different (as illustrated).}\label{fig:molvscrystal} \end{center} \end{figure} The separation of energy scales in molecular crystals is straight-forward to understand: the molecules are held together by (strong) covalent bonds, while the crystal is held together by much weaker intermolecular forces; van der Waals, $\pi$-stacking, and hydrogen bonding. The strong forces within a molecule produce a set of well spaced molecular orbitals (MOs), and these orbitals weakly couple between molecules, as illustrated in Fig. \ref{fig:molvscrystal}, producing bands that are narrow on the scale of the MO energy gaps. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{./fig3_PF6_BSDOS.pdf} \caption{Electronic properties of (TMTTF)$_2$PF$_6$ at $T=4$~K, reporduced from \cite{jacko13tmttf}. a) Band structure and density of states, b) path through $k$-space, c) Fermi surface in the $k_z = 0$ plane. The total density of states is shown with the solid black line, and the partial density of states of the anions ($\times$100) is shown in dashed orange. The partial density of states shows that the two bands at the Fermi level are nearly purely TMTTF, with large energy gaps on either side, demonstrating the separation of energy scales.}\label{fig:bsdos} \end{center} \end{figure} We can understand this more concretely by considering a toy example: a 1D, two orbital tight-binding model. For simplicity we consider a chain of spinless fermions, with orbitals $a$ and $b$ on each site $i$, with nearest neighbour interactions. The Hamiltonian is \begin{equation} \hat{H} = \sum_{i} \frac{\Delta}{2} (\hat{n}_{a,i}-\hat{n}_{b,i}) + \sum_{\alpha=a,b} t_\alpha \hat{c}_{\alpha,i}^\dagger \hat{c}_{\alpha,i+1} + h.c. \end{equation} where $\Delta$ is the energy difference between the orbitals, and $t_\alpha$ is the inter-site hopping for orbital $\alpha$. In the case of molecular crystals, the orbitals are molecular orbitals of single molecules. The energy difference between the molecular orbitals ($\Delta$) comes from the inter-atomic hopping within a molecule, typically a $\pi$-type overlap. A typical energy scale for this difference between molecular orbitals in an organic molecule is a few eV (see for example \cite{hinze71}). The inter-site hopping comes from the overlap of molecular orbitals on different molecules, and is exponentially suppresed by distance. These energy scales are on the order of 10 - 100 meV for nearest neighbour overlaps (see for example \cite{jacko13tmttf}). Thus it is often the case in such systems that $|\Delta| > |t_a| + |t_b|$; the bands resulting from each molecular orbital are narrow enough and well-separated enough that they do not overlap in energy. Thus, depending on filling, one can consider just one orbital or the other as the foundation for an effective model Hamiltonian. In applying the Wannier construction procedure outlined above, we have glossed over the details of limiting the Fourier transform window to some small energy range. In the simplest case of a single band system, it is clear that this originally-infinite window can be truncated to be exactly the bandwidth of the single band without any loss of generality. However, in multi-band systems, extracting a subset of bands can become difficult. To understand why, let us consider again the 1D, two orbital model. For $|\Delta| \leq |t_a| + |t_b|$, there are gapless excitations possible between these bands; the two bands have weight in an overlapping energy range. Thus, even in this simplest case without band crossings or interactions, the presence of a second band makes picking out the states of the first band non-trivial. When these bands cross or hybridise, this further complicates the procedure. This problem is very difficult to solve in general and is known as the disentangling problem. Now, the separation of energy scales in molecular crystals comes in to play. As discussed above, because of the often quite different energy scales of inter- and intra-molecular interactions, it is typical to find well isolated sets of bands in the band structure of a molecular crystal, as illustrated in Fig. \ref{fig:bsdos}. This property means that one can bypass the difficulty and ambiguity of projective disentangling procedures. Thus minimal input is needed into the WO construction procedure in molecular crystals; one just inputs how many orbitals you would like, spanning what energy window. \section{\textit{ab initio} Model Construction} It is important when modeling these systems that the models we use be as accurate and unbiased as possible; starting with preconceptions of how the model `should’ look can limit what one finds. \subsection{Tight-Binding Models} DFT can give us useful information about the non-interacting electronic properties of a system. To utilise this information, we will construct a tight-binding model from the DFT using a rigorous Wannier orbital construction technique. This procedure creates localised orbitals that accurately represent the electronic properties found by DFT for the frontier electrons, those that determine the low-temperature physics. As discussed above, the separation of energy scales makes Wannier orbital construction straightforward in molecular crystals. Once one has local Wannier orbitals one can construct a first principles tight-binding model. \subsubsection{First principles versus Fitting} One might ask why is all of this effort justified? Why not simply write down a perfectly good tight binding model and fit it to a first principles band structure? (As is often done, see \cite{kandpal09,scriven12,jeschke12,seo15} for just a sample.) For very simple systems, where the relevant tight-binding model is clear, and only has a few parameters, a first principles method is probably not justified as the band structure is well reproduced by fitting methods \cite{kandpal09,scriven12,jeschke12,seo15}. However, in the much more common situation of a somewhat ambiguous tight-binding model with an unknown number of relevant parameters, a first principles approach is ideal, as I will discuss further below. In fact, in some systems where a quite simple model seemed obvious, it has been shown that a somewhat more nuanced model does a better job of capturing the important many-body physics of the material (as discussed in the context of Pd(dmit)$_2$\xspace, below). John von Neumann is famously claimed to have said ``With four parameters I can fit an elephant, and with five I can make him wiggle his trunk.'' \cite{dysononfermi} (this is more-or-less true, as shown in Fig. \ref{fig:elephant}, after Ref. \cite{mayer10}); this captures the essence of the problem of fitting energy bands to a tight-binding model. With enough parameters in the fit, it is difficult to have a \emph{bad} fit of the band structure; a model with many parameters might reproduce the dispersion without having any connection to a realistic microscopic description of the system. As such, a good fit provides almost no information, especially not about the quality of the microscopic model to which you are fitting. Worse, when there are many parameters, very different values can produce similarly good fits by whatever optimisation metric you are using. Often, these different sets of values have importantly different physical consequences (for example changing the electronic dimerisation of a chain, or the localisation of charge). These differences can lead to importantly different many-body ground states, as will be discussed further below. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{./fig4_elephant.png} \caption{A four-parameter fit to an elephant, produced as described in \cite{mayer10}. A fifth parameter does indeed allow it to wiggle its trunk. While conforming to the letter of the statement, this implementation somewhat defies its spirit as the parameters are complex numbers, thereby carrying twice the information of four real numbers.}\label{fig:elephant} \end{center} \end{figure} To demonstrate this, I will discuss a particular case of producing a tight-binding model for an organic molecular crystal by fitting and via Wannier orbitals. TMTTF$_2$AsF$_6$ is one of the Fabre salts, a family of organic charge-transfer salts with a rich phase diagram, given in Fig. \ref{fig:fabrephase}. These flat organic molecules $\pi$-stack into one dimensional chains along the crystallographic $a$ direction (shown in Fig. \ref{fig:tmttfstructure}), with some inter-chain coupling in the $a-b$ plane, and minimal coupling in the $c$ direction (where the anionic AsF$_6$ layers introduce a large spacing between TMTTF layers). Thus this system is largely 1D electronically, with some 2D nature introduced by next-nearest neighbour hopping and further terms \cite{nogami05,jacko13tmttf,brown15}. \begin{figure} \begin{center} \includegraphics[width=0.9\columnwidth]{./fig5_tmttf_structure.png} \caption{Crystal structure of TMTTF$_2$AsF$_6$ at room temperature, showing a $\pi$-stacked chain along the $a$ direction, and spacing due to anions in the $c$ direction. Reproduced from \cite{jacko13tmttf}.}\label{fig:tmttfstructure} \end{center} \end{figure} In this family of materials, once one decides to include any 2D hopping terms, one encounters the problem that there are many terms of similar magnitude; to avoid neglecting terms of the order one keeps, one needs to add many parameters to the tight-binding model. These many degrees of freedom cause problems for fitting procedures; one finds many local minima with similar optimiziation functions values, but very different parameters, with important physical consequences. This is the heart of the problem; that such fits are examples of \textit{sloppy models}\cite{transtrum15}: changes in one parameter can be almost completely masked by compensatory changes in other parameters. Figure \ref{fig:tbfitting} shows the results of 5000 runs of a fitting procedure applied to the band structure of TMTTF$_2$AsF$_6$ \cite{jeschkecode, jacko13tmttf}; a pre-defined tight binding lattice is input, and the values of the 8 different hopping integrals (c.f. Fig. \ref{fig:tmttfnetwork}) are optimised to provide the best fit to the band structure (quantified by the least-squares error over the set of points the bands are sampled on). Each run starts with a (different) randomised set of fitting parameters and iterated on. Due to the nature of this method, each of the 5000 fits is basically unique. Rounding each $t$ to the nearest 0.1 meV, there are 26 unique fits; of the 5000 results, 74 have the minimum objective function. A given run has a 1.5\% chance (1/67) of finding this `best' solution! There are many more fits with a just slightly higher value of the objective function, and these fits contain contradictory physical information. For example, these different fits make different predictions about the electronic dimerisation of the system; whether the electronic dimers are on the structural dimers or not. This is an important difference, and can change as a function of pressure for example \cite{jacko13tmttf}. While most of the parameters have positive and negative equivalents, a few parameters are very precisely determined, and with a fixed phase. This means that the relative phases of the hopping integrals cannot be absorbed by a gauge transformation; these different sets of parameters have different physical meanings. The number of parameters used in the fit also effects the outcome. Removing some $t$ parameters from the inputed model will cause the other $t$ values to change. Damningly, in this system the optimal tight-binding fit is quite different to the set of hopping integrals found from Wannier orbitals, as shown in Fig. \ref{fig:tbfitting}. The Wannier and fitted parameters make contraditary predictions about the electronic dimerisation of the system (as seen in the magnitude of the first two $t$'s). In the limit of including all Wannier overlaps out to infinite distance, the set of Wannier tight-binding parameters will exactly reproduce the band structure when the bands are not entagled. Overall, it is hard to take fitting procedures too seriously with more than a couple of parameters in the fit. With small numbers of parameters, fitting becomes more stable, and in certain systems a few parameters is enough to acurately describe the band structure \cite{kandpal09,jeschke12,seo15}. Even then, a fitted model should be considered an effective model that has potentially lost important detail in `integrating out' the full set of parameters. In a sense these are `variational Hamiltonians'; they are optimised by some metric, but there is no assurance that they represent the underlying microscopic physics. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{./fig6a_TB_fitting_AsF6.png} \includegraphics[width=0.95\columnwidth]{./fig6b_TB_fitting_AsF6_hist.png} \caption{Instability of fitting with many parameters. 5000 fits of an 8 $t$ tight-binding model to the band structure of TMTTF$_2$AsF$_6$ produced in \cite{jacko13tmttf}. Each line is a set of parameters resulting from one run of the fitting algorithm. The best fit parameters are shown in red. They are inconsistent with the parameters found from Wannier orbital overlaps (given in green). Only 1.5\% of the runs found the minimal value of the objective function. The histogram shows the sets of minimisation function values produced in the 5000 runs. Less than 6\% of runs are in the `best' segment of the histogram, and 1/4 in the best two segments.}\label{fig:tbfitting} \end{center} \end{figure} By producing Wannier orbitals for molecular crystals, and computing a set of $t$'s from those, one finds a single set of parameters that is reliable and robust; one can believe them just as much as one believes the other results of the DFT computation. In the general case of Wannier orbital construction, there is ambiguity involved in disentangling bands to produce the desired set of Wanniers \cite{marzari12}. When the bands one cares about are well-separated from the bulk, this ambiguity is gone. Not only that, but one can gain knowledge by looking at the Wannier orbitals themselves. In the case of TMTTF$_2$AsF$_6$, the Wannier orbitals are localised to single TMTTF molecules, as shown in Fig. \ref{fig:tmttfWF}. Here, the Wannier orbital is very much like the HOMO (highest occupied molecular orbital) of a single TMTTF molecule in vacuum. Having this real-space orbital allows us to do many further computations based on the DFT, as well as producing a single robust parameter set for a tight-binding model (Fig. \ref{fig:tmttfnetwork}). This Wannier based model construction technique is being used more and more in molecular systems \cite{nakamura09,nakamura12,jacko13tmttf,jacko13dmit,altmeyer15}. \begin{figure} \begin{center} \includegraphics[height=0.7\columnwidth, angle=-90]{./fig7_tmttf_WF.png} \caption{Wannier orbital for TMTTF$_2$AsF$_6$. Note that this orbital is localised to a single molecules, and very much like the HOMO of an isolated TMTTF molecule. Having this real-space orbital allows us to do many further computations based on the DFT, such as computing a tight-binding model by taking real space overlaps of such orbitals. Reproduced from \cite{jacko13tmttf}.}\label{fig:tmttfWF} \end{center} \end{figure} \begin{figure} \begin{center} \includegraphics[width=0.9\columnwidth]{./fig8_tmttf_network.png} \caption{Tight-binding lattice for TMTTF$_2$AsF$_6$ produced from Wannier orbitals. The thickness of the lines is proportional to the magnitude of the $t$; $t_0 = 175$ meV, $t_1 = 157$ meV, and the rest are $< 25$ meV. Reproduced from \cite{jacko13tmttf}.}\label{fig:tmttfnetwork} \end{center} \end{figure} \subsection{Including Interactions} Correctly parameterising many-body effects is an important and challenging task; it is the competition between energy scales that leads to the interesting physics in many systems, so small relative changes in large parameters can have large effects \cite{jacko10a}. Often, these parameters are estimated without careful consideration of the assumptions involved. For instance, if one considers a Hubbard model on a dimer with an inter-monomer hopping $t$, on-monomer Hubbard repulsion $U_m$, and inter-monomer Hubbard repulsion $V$; in the limit $U_m \rightarrow \infty$, $V \rightarrow 0$, the effective Hubbard repulsion in the dimer orbitals is $U_d = 2 t$ \cite{komatsu96, mckenzie98}. This assumption is often used, since it allows one to estimate many-body parameters from straightforward band structure calculations, or molecular H{\"u}ckel calculations. However, it is not well justified. It has since been shown that in the more realistic case of $U_m \sim V \gg t$, that $U_d = \frac{1}{2}(U_m + V)$ \cite{scriven09b}. Thus, one still needs to be able to correctly compute many-body parameters to estimate the dimer parameters (even before considering screening). None-the-less, this approximation continues to be used (for example \cite{jeschke12,yoshimi12,tsumuraya13}), often without stating the strong underlying assumptions. One might think that, given these real-space Wannier orbitals for a particular system, it must be straightforward to calculate the many-body Coulomb integrals and parameterise a Hubbard model. However, computing these terms by simply evaluating the Coloumb energy for each orbital neglects screening (equivalently, relaxation of the bulk states). Screening can easily suppress the Hubbard $U$ by an order of magnitude \cite{nakamura06}. In classical electromagnetics, there are many techniques for determining the response of a bulk to a perturbing field (analogous to the case here of computing the screening/relaxation of a doubly occupied orbital). The discrete dipole approximations (also called the coupled dipole approximation) is one such technique. In this approximation, one discretises the bulk as a set of polarisable dipoles, and self-consistently solves their response to the perturbing field and to each other \cite{devoe64}. This method is very much like a technique applied to molecular crystals to compute screened Coloumb parameters. By representing each molecule by a set of polarisable (classical) dipoles, and placing a perturbing charge on one lattice site, one can compute the correction to the Hubbard repulsion due to the polarisation of the rest of the molecules in the crystal \cite{cano10b}. This technique, though promising, has only been applied to a single molecular crystal (TTF-TCNQ), with no new applications apparent in the 5 years since the original publication. An alternative approach to computing screened Coulomb parameters from first principles has gained prominence recently, the constrained random phase approximation (cRPA) \cite{aryasetiawan04}. This technique is also based around computing the polarisation of the system, but in this case, the quantum mechanical polarisation function in the random phase approximation. Here we discuss RPA, cRPA, and its application to molecular crystals in more detail. Practically, it also relies on having Wannier orbitals for a few relevant bands, and so like the tight-binding model construction it is particularly suitable to molecular crystals. \subsubsection{Constrained Random Phase Approximation} The random phase approximation (RPA) was introduced by David Bohm and David Pines in the 1950's to include the effects of screening into models of electron gases \cite{bohm51,pines52,bohm53,pines53}. Murray Gell-Mann and Keith Brueckner placed this approximation on a firmer footing, showing that the RPA can be derived from a self-consistent series of leading order Feynman diagrams \cite{gellmann57}, an example of which is illustrated in Fig. \ref{fig:rpabubbles}. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{./fig9_RPA_bubbles.png} \caption{Random phase approximation bubble diagrams appropriate for calculating the polariation function. Bold lines are fully interacting Greens functions, while non-bold are non-interacting \cite{rpabubblewiki}.}\label{fig:rpabubbles} \end{center} \end{figure} Given a basis of occupied and unoccupied states, one can compute an RPA polarisation function \begin{equation} \label{eq:rpapol} \begin{split} &P(\vec{r},\vec{r}',\omega)=\sum^{occ}_{i} \sum^{unocc}_{j} \psi_i(\vec{r}) \psi_i^*(\vec{r}') \psi_j^*(\vec{r}) \psi_j(\vec{r}') \times \\ & \left( \frac{1}{\omega - \varepsilon_j + \varepsilon_i + i0^{+}} - \frac{1}{\omega + \varepsilon_j - \varepsilon_i - i0^{+}} \right), \end{split} \end{equation} where $i$ ($j$) runs over the occupied (unoccupied) single particle states. With this polarisation function one can compute the screening effects of this system. This is exactly what one wants if computing the effects of an impurity in such a system, for example. However, if one wants to include many-body effects in a lattice model, then this constitutes an overcounting of the effect. In 2004, Aryasetiawan \textit{et al.} introduced a new, precise method for constructing sets of screened effective model parameters for strongly correlated lattice models \cite{aryasetiawan04}. The constrained random phase approximation (constrained RPA or cRPA) is a systematic way of accounting for screening in the many-body parameters computed for some basis orbitals. The system is divided into two fragments; the active subspace (often labelled `d'), the space spanned by the orbitals of interest; and the rest of the bands (labelled `r'). On a conceptual level, this procedure computes the effects of transitions involving the `r' subspace with RPA (by computing the polarisation function due to these transitions), while leaving the transitions within the `d' subspace to be dealt with in the many-body model that results \cite{aryasetiawan04}. This proceedure allows one to generate all the terms resulting from the Coulomb interaction, on- and off-site repulsive and magnetic interactions. Practically, one constrains the sums in Eq. \ref{eq:rpapol} to exclude transitions within the active subspace. The partitioning idea at the core of cRPA works best in the same situation that the Wannier orbital proceedure itself works best: a set of relevant bands well separated from the bulk. In the situation of entangled bands; where the natural basis one would like to use mixes with bands due to other states; one can apply the Wannier disentangling proceedure to construct a disentangled basis \cite{miyake09}. In inorganic system, there are difficulties in disentangling the target bands from the bulk. Nonetheless this approach was quickly applied to transition metal systems and simple transition metal oxides \cite{nakamura06,miyake08,nakamura08, miyake09}. This approach has been applied to only a few organic crystals \cite{nakamura09,nakamura12}. In those cases where it has, it finds sometimes importantly different parameter values. In the ET charge transfer salt $\kappa$-(ET)$_2$ Cu$_2$ (CN)$_3$, cRPA predicts a value of $U/t$ for the dimer about twice as large as was estimated from a H\"uckel analysis of a dimer (using an optial conductivity estimate of the monomer value, $U_m$), $U_{d}^{cRPA}/t \sim 15$ vs $U_{d}^{Huckel}/t \sim 7$ \cite{komatsu96, nakamura09}. In a simple Hubbard model, this would place this material well into the insulating phase, contrary to the observed metallic behaviour. The cRPA analysis also showed that off-site $V$ terms are significant, $V/U \sim 0.5$, meaning that to properly understand the system one must consider an extended Hubbard model \cite{nakamura09}. While the optical conductivity estimate for the monomer $U_m$ is quite reliable, the assumptions in using this to estimate $U_d$ are not. This Wannier-based approach gives us a reliable first principles estimate of all the Hamiltonian parameters on the same footing. \section{First priciples approach finds important differences} Here we discuss particular examples to show that using a first principles approach can give importantly different results and insights than a standard fitting approach; be it caused by subtleties of parameter variations or qualitatively different lattices. \subsection{EtMe$_3$Sb[{\pdmit}]$_2$: Fine details matter} To demonstrate the importance of finding a robust set of model parameters we will turn to the example of {EtMe$_3$Sb[{\pdmit}]$_2$}, a spin-liquid candidate material and part of a family of organic molecular crystals with a rich phase diagram; as well as the spin-liquid phase, these materials have Mott insulating, superconducting, spin density wave and valence bond solid phases \cite{powell11,kobayashi91,seya95,tamura02,itou08}. Constructing a coherent picture of this family of materials and their many phases is highly challenging. This effort has been hindered by the fact that, in the usual development of microscopic models, many approximations are made without fully understanding their consequences \cite{jacko13dmit}. The typical approach in {EtMe$_3$Sb[{\pdmit}]$_2$} and the related family of materials is to focus on a dimer model, where the dimers of Pd(dmit)$_2$ sit on a $t-t'$ triangular lattice. Parameters are either fit or mapped to this non-interacting model before many-body effects are considered (see for example Refs. \cite{itou08,scriven12}). It has since been shown that a fully-anisotropic triangular lattice (FATL; $t-t'-t''$) better represents the electronic structure \cite{tsumuraya13, jacko13dmit,rudra14}. Further, it was shown that a FATL allows one to reproduce the observed many-body properties, predicting a spin-liquid ground state for reasonable parameter values in {EtMe$_3$Sb[{\pdmit}]$_2$}, while the $t-t'$ model does not \cite{jacko13dmit}. Fig. \ref{fig:fatlphase} shows the phase diagram for the Hubbard model (as a function of $U/t$) on the isotropic triangular lattice, $t-t'$ triangular lattice, and fully anisotropic triangular lattice (FATL), each with tight-binding parameters consistent with {EtMe$_3$Sb[{\pdmit}]$_2$} (computed with variational quantum Monte Carlo). First principles estimates predict $U/t_{max} \sim 11$ \cite{nakamura12}. The FATL enters the spin-liquid phase at this point, while the $t-t'$ and isotropic lattices would predict an insulating phase, with this value of $U$ very far from the critical value. Generally, the extra anisotropy seems to destablise the insulating phase relative to the metallic and spin-liquid phases. It is worth noting that these variational quantum Monte Carlo results are not definitive; however, if nothing else, they are indicitive of the important consiquences that even slight parameter changes can have. Such highly anisotropic models have since become increasingly used in investigations of this family of materials \cite{seo15}. Having reliable and believable predictions of the degrees of anisotropy in these materials (were the fine variation in parameter values can be attributed to physics and not a quirk of the particular fit one is applying) will be vital for building an understanding of the whole class of materials. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{./fig10_FATL_phasediagram.png} \caption{Phase diagrams of the Hubbard model on the isotropic triangular lattice, $t-t'$ triangular lattice, and fully anisotropic triangular lattice (FATL), for parameters consistent with {EtMe$_3$Sb[{\pdmit}]$_2$}. Note that the transition to the spin-liquid phase occurs for a much smaller value of $U$ in the FATL. Phase diagram determined with variational quantum Monte Carlo \cite{jacko13dmit}. Reproduced from \cite{jacko13dmit}.}\label{fig:fatlphase} \end{center} \end{figure} \subsection{$\kappa$-(BEDT-TTF)$_2$ salts: Long range terms} The BEDT-TTF (bis(ethylenedithio)-tetrathiafulvalene, or ET) organic charge transfer salts are a family of quasi-2D crystals that exhibit a wide range of strongly correlated phases (such as non-BCS $d$-wave superconductivity) \cite{mayaffre95,kino96,vorontsov07,powell11,jacko13et,brown15}. Understanding this wide range of phases requires a good effective model and good model parameters. It was in these materials that the shortcomings of the $U \sim t_{intra}$ approximation (discussed above) were made clear, showing that it leads to a systematic underestimate of $U$ \cite{scriven09b}. Once a realistically large value of $U$ is used (computed with cRPA and found to be a 50\% - 100\% increase over previous estimates), a straightforward Hubbard model of the dimer lattice does not capture anything but the Mott insulating phase \cite{nakamura09}. These cRPA parameter estimates also showed that the nearest neighbour inter-site Coulomb interactions are significant ($V/U \sim 0.5$), and that they decay slowly with distance \cite{nakamura09}. By applying first principles model building techniques, it was found that describing the phases of the ET salts requires models like the extended Hubbard model with significant and long-ranged inter-site interactions. This kind of model, although it has more parameters, it has no more \textbf{free} parameters. Additionally, the inclusion of long-range Coulomb terms has important implications for the energetics of ordered phases \cite{nakamura09}. \subsection{{Mo$_3$S$_7$(dmit)$_3$\xspace}: An unexpected lattice} In the previous examples, we showed how details in the model parameters are found to have significant consequences on the predicted ground state. We now turn to a system where, by using a first principles method, one finds a totally different lattice model than any previously considered for this system. {Mo$_3$S$_7$(dmit)$_3$\xspace} is a single component molecular crystal that was designed to be metallic. However, it was found to be an activated insulator with an activation energy of 34 meV \cite{llusar04}. Further, it was found to have no sign of any magnetic order down to very low temperatures ($J/k_B T \sim 50$ \cite{llusar04}); a possible sign of a spin-liquid state. Based on the apparent 1D physicical properties, its crystal structure, and initial bandstructure calculations, {Mo$_3$S$_7$(dmit)$_3$\xspace} was modeled with a one-dimensional lattice \cite{llusar04,janani14a,janani14b}. This is the 1D `triangular necklace' lattice, illustrated in Fig. \ref{fig:mo3s7lattices}. The ground state of the Hubbard model on this lattice at $2/3$ filling (as appropriate for {Mo$_3$S$_7$(dmit)$_3$\xspace}) is found to be in the Haldane phase, consistant with the experimental evidence \cite{janani14a, janani14b}. \begin{figure} \begin{center} \includegraphics[width=0.9\columnwidth]{./fig11_mo3s7_lattices.png} \caption{Lattices for {Mo$_3$S$_7$(dmit)$_3$\xspace}: the phenomenological `triangular necklace' lattice on the left, and first priciples kagomene on the right. While quite different, both have interesting topological properties and can provide insights into the behaviour of {Mo$_3$S$_7$(dmit)$_3$\xspace}.}\label{fig:mo3s7lattices} \end{center} \end{figure} However, Wannier orbital tight-binding model construction based on density functional theory for {Mo$_3$S$_7$(dmit)$_3$\xspace} predicts that at the single electron level, this system is actually 2D with coupling between the 2D layers. The lattice of the 2D layers is an unusual decorated honeycomb lattice, the `kagomene' lattice (illustrated in Fig. \ref{fig:mo3s7lattices}); interpolating between the graphene (honeycomb) and kagom{\'e} lattices \cite{jacko15a}. These lattices have quite different properties, and provide quite different pictures of the physics of {Mo$_3$S$_7$(dmit)$_3$\xspace}. The kagomene lattice has been studied theoretically before \cite{yao07,wen10,ruegg10,tikhonov10,yao13}, but never seen in a real system. This first priciples approach found a layered kagomene lattice in {Mo$_3$S$_7$(dmit)$_3$\xspace} quite unexpectedly, demonstrating the novel insights this appoach can yield. The microscopic picture produced is quite different from the phenomenological model. The one dimensional behaviour can be understood on the grounds of the kagomene lattice: just like kagom{\'e}, this lattice has exactly localised states \cite{bergman08}, illustrated in Fig. \ref{fig:kagomeneflatband}. Once the 2D kagomene lattice is extended into 3D, these localised states become 1D bands. These emergent 1D states are topological; their degeneracy depends on the boundary conditions of the lattice. Thus, dispite the hopping integrals having similar magnitudes in every direction (in fact, slightly smaller in the stacking direction), one recovers the 1D behaviour and gains some important insights about the potential topological properties of this system. As a phenomenological model, the necklace lattice does a good job of reproducing the observed magnetic properties of {Mo$_3$S$_7$(dmit)$_3$\xspace} \cite{janani14b}. On the other hand, the kagomene model provides a natural explanation for the quasi-1D behaviour, and highlights the interesting topological flat bands analogous to those seen in the kagom{\'e} lattice \cite{jacko15a}. In addition, one can find a lattice closely related to the necklace model as a limiting behaviour of an interacting model in the kagomene lattice, and the many-body behaviour of this model is very similar to the necklace model \cite{nourse}. One can naturally find new terms to extend the necklace model in a consistent way by introducing higher-order terms in these limits, for example including the chiral next-nearest neighbour terms \cite{jacko15a}. \begin{figure} \begin{center} \includegraphics[width=0.9\columnwidth]{./fig12_Kagomene_flatbands.png} \caption{Localised states on the kagomene lattice, showing two plaquette states, the anti-bonding $A_{\bf R}$ and bonding $B_{{\bf R}'}$, and a topologically non-trivial loop state, which can only exist with periodic boundary conditions. Reproduced from \cite{jacko15a}.}\label{fig:kagomeneflatband} \end{center} \end{figure} \section{Summary} Over the last decade Wannier orbitals have become an important tool for predictive physics. By constructing Wannier orbitals for frontier bands, we can derive effective models that avoid our biases. These models are robust and reliable, and allow us to make detailed comparisons between materials, and start to extract some general behaviours. They can also find models that we might never have expected to see, leading us to new insights. By moving to this kind of assumption-free methodology for model construction, we can move to a truly \textbf{pre}dictive approach. We can avoid the dangers of relying on variational Hamiltonians, and allow ourselves to find truely unexpected things. \section{Acknowledgments} I would like to thank Ben Powell, Ross McKenzie, Roser Valent{\'i}, Harald Jeschke, and Klaus Koepernik for many interesting discussions on these topics. I thank Ben Powell and Ross McKenzie for their helpful comments on this manuscript. I was supported by the Australian Research Council (ARC) through Grant No. DP130100757.
1,108,101,565,842
arxiv
\section{Introduction and main results} The study of the multiple ergodic averages \begin{equation}\label{0} \frac{1}{N} \sum_{n=1}^{N}f_1(T^{a_1(n)}x)\cdot ... \cdot f_k(T^{a_k(n)}x) \end{equation}for general sequences $a_1(n),...,a_k(n)$ of integers, where $T$ is an invertible measure preserving map on a probability space $(X,\mathcal{X},\mu)$, has been an active field of research in recent years. Born from Furstenberg's proof of Szemer\'{e}di's theorem \cite{Fu1} with ergodic theoretic tools, mean convergence of the averages \eqref{0} has been established for a wide variety of sequences. In this article, our main result is that if the sequences $a_1,...,a_k$ arise from smooth functions of polynomial growth\footnote{A function $f$ is said to have polynomial growth, if there exists a positive integer $d$, such that the ratio $\frac{f(t)}{t^d}$ converges to 0, as $t\to+\infty$.} belonging to a Hardy field \cite{Hardy 1,Hardy 2} and satisfy certain independence assumptions, then they are jointly ergodic, that is the $L^2$-limit of the averages in \eqref{0} exists and is equal to the product of the integrals of the functions $f_1,...,f_k$, whenever the underlying system $(X,\mathcal{X},\mu,T)$ is ergodic. Some typical examples of sequences that we study are the polynomial sequences with real coefficients, the sequences $\floor{n^{3/2}}$, $\floor{n\log n}$, $\floor{n\log\log n + \exp(\sqrt{\log(n^2+1))}}$, $\floor{n^{\sqrt{2}}/\log^2 n}$ and, in general, sequences arising from functions in the Hardy field $\mathcal{LE}$ of logarithmico-exponential functions. Our main results also establish a conjecture of Frantzikinakis, namely \cite[Problem 23]{Fraopen} (first appearing in \cite{FraHardy1}), which is the content of Theorem \ref{problem}. Furthermore, it gives a partial answer to \cite[Problem 22]{Fraopen}, which asks for general convergence of averages of functions from a Hardy field and generalizes several known results. In the case of weak mixing systems, we can relax our assumptions on the functions $a_1,...,a_k$ even further and establish a Furstenberg type weak-mixing theorem (generalizing the results in \cite{Bergelson}), which gives a positive answer to \cite[Problem 3]{FraHardy1}. \subsection{Statement of the problem and main results}\label{mainresults} In order to state our theorems below, we shall work with Hardy fields $\mathcal{H}$ that contain the Hardy field $\mathcal{LE}$ of logarithmico-exponential functions and are closed under composition and compositional inversion of functions. All the subsequent results in this section will be stated under the above assumption. More background on Hardy fields will be presented in Section \ref{background}, where we also present a Hardy field that satisfies the above property. Before we begin, we present below a theorem concerning the case of single ergodic averages. This is a consequence of Theorems 3.2 and 3.3 in \cite{Boshernitzan2}. More precisely, those two theorems handle the case that the function $a$ below has at least linear growth, but the case that $a$ has sub-linear growth rate follows using the same arguments and the equidistribution results in \cite{Boshernitzan1}. The notions of a (measure-preserving) system and ergodicity are defined in Section \ref{background}. We denote by $C\mathbb{Z}[t]$ the collection of all real multiples of integer polynomials on some variable $t$. \begin{customthm}{1}\cite{Boshernitzan2}\label{ergodic} Let $a\in\mathcal{H} $ be a function of polynomial growth that satisfies the following condition: \begin{equation} \label{P1} \tag{{\bf A}}\ \lim\limits_{t\to +\infty}\frac{|a(t)-p(t)|}{\log t}=+\infty \text{ for any polynomial}\ p\in C\mathbb{Z}[t]. \end{equation} Then, for any measure preserving system $(X,\mu,T)$ and function $f\in L^{2}(\mu)$, the averages \begin{equation*} \frac{1}{N}\sum_{n=1}^{N} T^{\floor{a(n)}} f \end{equation*}converge in mean to the conditional expectation $\mathcal{E_{\mu}}(f|I(T))$, where $I(T)$ is the invariant factor of the system $(X,\mu,T)$. \end{customthm} \begin{remark*}The above condition is sufficient, but not necessary for convergence in the single iterate case. However, it encompasses most typical functions in $\mathcal{H}$ that are not rational polynomials. \end{remark*} Following the terminology of \cite{Boshernitzan2}, we give the following definition. \begin{definition} We will call a function $a\in\mathcal{H}$ ergodic, if it satisfies \eqref{P1}. \end{definition} We show that a natural extension of the above condition implies norm convergence in the case of multiple averages. If $a_1,...,a_k$ are general sequences or functions, we will denote by $\mathcal{L}(a_1,...,a_k)\subseteq\mathcal{H}$ the set of non-trivial linear combinations of the functions $a_1,...,a_k$ (here $\mathcal{H}$ is a vector space over $\mathbb{R}$). The following theorem is the main result of this article: \begin{theorem}\label{problem} Let $a_1,...,a_k\in\mathcal{H}$ have polynomial growth and assume that every function in $ \mathcal{L}(a_1,...,a_k)$ is ergodic. Then, for any ergodic measure preserving system $(X,\mu,T)$ and functions $f_1,...,f_k\in L^{\infty}(\mu)$, the averages \begin{equation}\label{multiple} \frac{1}{N}\sum_{n=1}^{N} T^{\floor{a_1(n)}} f_1\cdot ...\cdot T^{\floor{a_k(n)}}f_k \end{equation}converge in mean to the product of the integrals $\int f_1\ d\mu \cdot...\cdot \int f_k\ d\mu.$ \end{theorem} \begin{remark*}It is a consequence of our proof that the condition on the linear combinations of the functions $a_1,...,a_k$ can be substituted by the following more general assumption: for any real numbers $t_1,...,t_k\in [0,1)$, not all of them zero, we have \begin{equation*} \lim\limits_{N\to+\infty} \frac{1}{N}\sum_{n=1}^{N}e(t_1\floor{a_1(n)}+\cdots + t_k\floor{a_k(n)})=0. \end{equation*}Actually, this is a necessary and sufficient condition in order to have convergence to the product of the integrals in every ergodic system. \end{remark*} If we do not impose an ergodicity assumption on the system $(X,\mu,T)$, then we can show that the averages in the above theorem converge to the product \begin{equation*} \mathcal{E}_{\mu}(f_1|\mathcal{I}_T)\cdot...\cdot \mathcal{E}_{\mu}(f_k|\mathcal{I}_T), \end{equation*}where $\mathcal{E}_{\mu}(f|\mathcal{I}_T)$ is again the projection of $f$ to the invariant factor of the system. This follows from a standard ergodic decomposition argument, and thus, we will usually assume below that the system $(X,\mu,T)$ is ergodic. This theorem extends known results about ergodic averages of functions from a Hardy field. In the case of real polynomials, Theorem \ref{problem} was established in \cite{Koutsogianniscorrelation}. Theorem \ref{problem} was also proven in \cite{FraHardy1} when all functions $a_1,...,a_k$ have different growth rates and satisfy $t^{N_i+\varepsilon}\ll a_i(t)\prec t^{N_i+1}$ for non-negative integers $N_i$ and some $\varepsilon>0$. In addition, Theorem \ref{problem} was established in \cite{BerMorRic} under a variant of our condition. More precisely, an independence condition on the functions $a_1,...,a_k$ and on all of their derivatives was imposed. It was proven, however, that if we use a weaker averaging scheme than Ces\'{a}ro averages, we can establish uniform convergence results for the corresponding multiple ergodic averages\footnote{ In our setting, if we substitute the standard Ces\'{a}ro averages in \eqref{multiple} with uniform ones, then Theorem \ref{problem} is known to fail. This is because of the fact that, if a function $f\in\mathcal{H}$ satisfies $t^k\prec f(t)\prec t^{k+1}$ for some non-negative integer $k$, we can find arbitrarily large intervals, such that $\floor{f(n)}$ takes only odd (or only even) values. Then, this assertion fails for the rotation by $1/2$ on the torus $\mathbb{R}/\mathbb{Z}$.} (cf. \cite{BerMorRic2} for similar arguments and some nice multiple recurrence and combinatorial results). Finally, Theorem \ref{problem} was established recently for linear combinations of tempered functions from a Hardy field and real polynomials in \cite{Frajoint} (for functions $f$ belonging to $\mathcal{H}$, the tempered condition is equivalent to the relation $t^k\log t\prec f(t)\ll t^{k+1}, $ for some non-negative integer $k$). Our result is more general, since for example we can see that it covers even simple collections of functions like $\{t\log t,t^2\log t\}$, for which convergence has not been established in the literature. A variant of Theorem \ref{problem} for commuting transformations was proven in \cite{FraHardy2} (under more restrictive conditions). Our methods fail to extend Theorem \ref{problem} to this case, the main reason being that we cannot establish seminorm estimates for convergence of the averages \eqref{multiple} (even in the case when the iterates are independent integer polynomials, characteristic factors have only been described in some special cases like \cite{Chu}). Finally, we also remark that a similar problem regarding tempered functions of different growth that do not necessarily belong to some Hardy field was handled in \cite{Koutsogiannistempered}. \subsection{Characteristic factors and the case of weak-mixing systems} If our only objective is to find characteristic factors for the averages in \eqref{multiple}, we can relax the conditions of Theorem \ref{problem} considerably. More precisely, we have the following theorem which appeared as a conjecture in \cite[Problem 3]{FraHardy1}. The notion of the Host-Kra factor of a system is defined in the following section. \begin{theorem}\label{generalfactors} Assume that the functions $a_1,...,a_k\in\mathcal{H}$ have polynomial growth and satisfy \begin{equation*} \lim\limits_{t\to+\infty}\frac {|a_i(t)|}{\log t}= +\infty \ \ \ \text{ for all }\ 1\leq i\leq k \end{equation*}and \begin{equation*} \lim\limits_{t\to+\infty}\frac {|a_i(t)-a_j(t)|}{\log t}= +\infty \ \ \ \text{ for all }\ \ i\neq j . \end{equation*}Then, there exists a positive integer $s$ such that, for any measure preserving system $(X,\mu,T)$, we have \begin{equation*} \lim\limits_{N\to\infty} \bignorm{\frac{1}{N}\sum_{n=1}^{N}T^{\floor{a_1(n)}} f_1\cdot ...\cdot T^{\floor{a_k(n)}}f_k-\frac{1}{N}\sum_{n=1}^{N} T^{\floor{a_1(n)}} \widehat{f}_1\cdot ...\cdot T^{\floor{a_k(n)}}\widehat{f}_k}_{L^2(\mu)}=0, \end{equation*}where $\widehat{f}_i:=\mathcal{E}_{\mu}(f_i|Z_s(X))$ is the projection of $f_i$ to the $s$-step Host-Kra factor of the system. \end{theorem} The conditions above are necessary (one can consider some weakly-mixing systems that are not strongly-mixing to see this). Since for weak-mixing systems, the Host-Kra factors of any order are trivial, we get the following corollary, which extends the results in \cite[Theorem 1.2]{Bergelson} where the iterates are polynomials taking integer values on the integers, as well as some of the results in \cite{BergelsonHaland} involving tempered functions. \begin{corollary} Assume that the functions $a_1,...,a_k\in\mathcal{H}$ have polynomial growth and satisfy \begin{equation*} \lim\limits_{t\to+\infty}\frac {|a_i(t)|}{\log t}= +\infty \ \ \ \text{ for all }\ 1\leq i\leq k \end{equation*}and \begin{equation*} \lim\limits_{t\to+\infty}\frac {|a_i(t)-a_j(t)|}{\log t}= +\infty \ \ \ \text{ for all }\ \ i\neq j . \end{equation*} Then, for any weak-mixing system $(X,\mu,T)$, we have \begin{equation*} \lim\limits_{N\to\infty} \frac{1}{N}\sum_{n=1}^{N} T^{\floor{a_1(n)}} f_1\cdot ...\cdot T^{\floor{a_k(n)}}f_k=\int f_1\ d\mu \cdot...\cdot\int f_k\ d\mu , \end{equation*}where convergence takes place in $L^2(\mu)$. \end{corollary} \begin{remark*}The proof of this restricted form of Theorem \ref{generalfactors} still requires a large portion of the arguments that are used in this article. \end{remark*} \subsection{Combinatorial Applications} As a corollary of Theorem \ref{problem}, we get the following multiple recurrence result. \begin{corollary} Let $a_1,...,a_k$ be functions from a Hardy field $\mathcal{H}$ such that every non-trivial linear combination of the functions is ergodic. Then, for any measure preserving system $(X,\mu,T)$ and any set $A\subset X$ with $\mu(A)>0$, we have \begin{equation*} \lim\limits_{N\to\infty} \frac{1}{N}\sum_{n=1}^{N}\ \mu(A\cap T^{-\floor{a_1(n)}}A\cap\cdots\cap T^{-\floor{a_k(n)}}A)\geq\mu(A)^{k+1}. \end{equation*} \end{corollary} A similar result was established in \cite{BerMorRic} with $\limsup$ in place of the limit, but under more general conditions on the functions $a_1,...,a_k$. Utilizing Furstenberg's correspondence principle, we can deduce a combinatorial result about large sets of integers. First of all, we give a definition of the asymptotic density of a set. Assume $\Lambda\subset \mathbb{N}$. Then, we define the upper density of the set $\Lambda$ as the limit \begin{equation*} {\bar{d}}(\Lambda):= \limsup\limits_{N\to\infty} \frac{|\Lambda\cap [1,N]|}{N} \end{equation*}and the lower density $\underline{d}$ is defined similarly with $\liminf$ instead of $\limsup$. If those limits coincide, then we say that the set $\Lambda$ has natural density $d$ equal to the limit. \begin{customthm}{}[Furstenberg's correspondence principle] For any set $\Lambda\subset \mathbb{N}$ with positive upper density, there exists an invertible measure preserving system $(X,\mu,T)$ and a measurable set $A\subset X$, such that $\overline{d}(\Lambda)=\mu(A)$ and for any $r_1,...,r_k\in \mathbb{Z}$, we have \begin{equation*} \bar{d}(\Lambda \cap (\Lambda -r_1)\cap\cdots\cap (\Lambda -r_k))\geq \mu(A\cap T^{-r_1}A\cap\cdots\cap T^{-r_k}A). \end{equation*} \end{customthm} \begin{corollary} Let $\Lambda\subset\mathbb{N}$ have positive upper density and let $a_1,...,a_k$ be functions from a Hardy field $\mathcal{H}$, such that every non-trivial linear combination of these functions is ergodic. Then,\begin{equation*} \liminf\limits_{N\to\infty} \frac{1}{N}\sum_{n=1}^{N} \bar{d}(\Lambda \cap (\Lambda -\floor{a_1(n)})\cap\cdots\cap (\Lambda -\floor{a_k(n)}))\geq (\bar{d}(\Lambda))^{k+1}. \end{equation*} \end{corollary} \subsection{General overview of the proof and organization of the paper} Similarly to the work in \cite{FraHardy2,FraHardy1,BerMorRic}, our approach is to show that the Host-Kra factor introduced in \cite{Host-Kra1} (see also \cite{Host-Kra structures} for a presentation of the general theory) is characteristic for convergence of our averages. This is a technique used extensively in the literature to reduce the problem of convergence in general measure preserving systems to the case where the system is a rotation in a nilpotent homogeneous space. However, we shall use a recent result of Frantzikinakis \cite{Frajoint} which roughly asserts that in order to prove Theorem \ref{problem}, we only need to prove that the Host-Kra factor is characteristic for the averages in \eqref{multiple} plus some simple equidistribution results on the torus (which are simple consequences of the equidistribution results in \cite{Boshernitzan1}). This bypasses the usual hassle of proving convergence of the corresponding averages in nilmanifolds. We remark that this technique can only be used when we expect convergence of certain ergodic averages to the product of the integrals of the involved functions, which is the case in this article. Furthermore, there are also several differences between our methods and the methods used in \cite{BerMorRic} and \cite{FraHardy2} to establish seminorm estimates, where a standard PET induction argument was utilized to reduce to the case of functions with sub-linear growth rate. This technique restricts the cases that can be handled, because the van der Corput operation may eventually yield functions that do not satisfy the condition \eqref{P1} (a typical example in this case is the pair of functions $(t\log t, t\log\log t)$, which "drop" to functions that have growth rate smaller than $\log t$ after applications of the van der Corput inequality). In order to overcome this, we use the fact that Hardy field functions of polynomial growth behave "locally" as polynomials. This observation was used in \cite{FraHardy1} to handle the special case of the family $\{\floor{a(n)},2\floor{(a(n)},...,k\floor{a(n)}\}$, where there is only one Hardy field function. It was shown in this case that the corresponding multiple ergodic averages over small intervals converge to 0, provided that $f_1$ is orthogonal to one particular Host-Kra factor of the system. This technique does not extend to the more general case that we wish to cover here. However, we can prove instead that these averages can be bounded by finite ergodic averages, where the iterates are polynomials. We use then a double averaging trick and the asserted asymptotic bounds to show that the Host-Kra factor is, indeed, characteristic for convergence of the multiple averages in our setting. The price to pay is that our argument has to be somewhat finitary in nature and this makes the proof slightly more technical and cumbersome. We also note here that our work concerns finding bounds for ergodic averages involving families of variable polynomials. Some general convergence results regarding multiple ergodic averages of variable polynomials were recently established in \cite{Koutsogiannisvariable}. A general difficulty in the proofs is that functions in $f \in \mathcal{H}$ that satisfy $f(t)\ll t^{\delta}$ for all $\delta>0$ (such as the functions $(\log t)^c$, where $c>1$) behave differently from functions that dominate some fractional power. We describe this difference more clearly in the Appendix, where we also provide several propositions and lemmas that will be used extensively in Sections \ref{sectionfactors} through \ref{reductionestimates}. In addition, we will revisit the ideas discussed above in Section \ref{sectionfactors} and also present some examples that we believe help illustrate the argument of the main proofs. Our results do not cover the case of general convergence (not necessarily to the product of the integrals) of the averages in \eqref{multiple}. In this case, the ergodic assumption on the functions can be relaxed further to include more functions, like the polynomials with integer coefficients. In order to establish this, we need to deal with the case of convergence in nilsystems, which will be done in a subsequent article. \subsection{Some open problems } An interesting problem that arose when trying to prove our main result is whether sequences of the form $\floor{a(n)}^{\ell}$ are good for the multiple ergodic theorem, where $\ell$ is a natural number. In the special case $\ell=2$, we present the following problem: \begin{conjecture} Let $c_1,...,c_k$ be distinct positive non-integers. Do the averages \begin{equation*} \frac{1}{N}\sum_{n=1}^{N} T^{\floor{n^{c_1}}^2} f_1\cdot...\cdot T^{\floor{n^{c_k}}^2} f_k \end{equation*}converge in mean? \end{conjecture} Conjecture 1 seems non-trivial even in the case where the fractional powers $n^{c_i}$ are replaced by general (non-integer) real polynomials. In the case $k=1$, we can use the spectral theorem and the equidistribution results presented in \cite{Fraeq} to give a positive answer. In particular, it was proven in the same article that $\floor{a(n)}^k$ is good for the ergodic theorem when $a$ stays logarithmically far from real multiples of integer polynomials (this is condition \eqref{P1}). If we apply the van der Corput inequality, the resulting sequences at each step become very complicated and may oscillate substantially. As a consequence, the classical methods of finding characteristic factors do not seem to yield a result in this case. Furthermore, we expect that the above averages are jointly ergodic for totally ergodic systems: \begin{conjecture} Let $c_1,...,c_k$ be distinct positive non-integers and let $(X,\mu,T)$ be a totally ergodic system. Show that the averages \begin{equation*} \frac{1}{N}\sum_{n=1}^{N} T^{\floor{n^{c_1}}^2} f_1\cdot...\cdot T^{\floor{n^{c_k}}^2} f_k \end{equation*}converge to the product of the integrals $\int f_1\ d\mu \cdot...\cdot \int f_k\ d\mu$ for any functions $f_1,...,f_k\in L^{\infty}(\mu)$. \end{conjecture} The above problem is interesting even for weak-mixing systems. In addition, the problem of multiple recurrence is also open. In its simplest form, we have the following open question. \begin{conjecture} Show that any set of positive upper density contains patterns of the form \begin{equation*} \{m,m+\floor{n^a}^2,m+\floor{n^b}^2, \ m,n\in\mathbb{N}\}, \end{equation*}where $a,b>1$ are distinct non-integers. \end{conjecture} In order to establish this, it may be possible to sidestep the more difficult problem of proving convergence of the corresponding ergodic averages (see, for example, the arguments in \cite{Wierdl}). We do not concern ourselves with this here, however. \subsection{Acknowledgement} I would like to thank my advisor Nikos Frantzikinakis for helpful discussions. \subsection*{Notational conventions}We use $\mathbb{N}$ to denote the set of natural numbers, while $\mathbb{Z}^{+}$ denotes the non-negative integers. For two sequences $a_N$ and $b_N$, we say that $b_N$ {\em dominates} $a_N$ and write $a_N\prec b_N$ or $a_N=o(b_N)$, if and only if the fraction $\Big|\frac{a_N}{b_N}\Big|$ tends to 0 as $N\to\infty$ and we write $a_N\sim b_N$, when this limit is is a finite non-zero real number. In the latter case, we say that the sequences $a_N$ and $b_N$ have the same growth rate. In addition, we write $a_N\ll b_N$ or $a_N=O(b_N)$ if there exists a constant $C$ such that, $|a_N|\leq C|b_N|$ for all $N\in \mathbb{N}$. When we want to express dependence on some parameters $c_1,...,c_k$ in the above bounds, we will use the notation $a_N=O_{c_1,..,c_k}(b_N)$ instead. We use similar asymptotic notation when we compare growth rates of functions in some real variable $t$. Furthermore, for a real valued function $f$ we will denote by $f^{(k)}$ the $k$-th order derivative of $f$, assuming it is well defined. We will sometimes use bold letters to distinguish between scalar and vector valued quantities. For a positive integer $M$, we will use $[M]$ to denote the set $\{1,2,...,M\}$. Given a sequence $a(n)$ and a real number $x\geq 1$, we will use the averaging notation \begin{equation*} \underset{1\leq n\leq x}{\mathbb{E}}a(n):=\frac{1}{\floor{x}}\sum_{n=1}^{\floor{x}}a(n). \end{equation*} Consider a positive integer $s$. We will denote by $[[s]]$ the set $\{0,1\}^s$ of ordered $s$-tuples of zeroes and ones, which contains $2^s$ elements. For elements of the set $[[s]]$, we will use the notation $\underline{\varepsilon}$ instead of bold letters. For convenience, we will write $\underline{0}, \underline{1}$ for the elements $(0,0,...0)$ and $(1,1,...,1)$ of $[[s]]$ respectively. We will also define $|\underline{\varepsilon}|$ to be the sum of elements of $\underline{\varepsilon}$. For a finite set $Y$, we will similarly use the notation $Y^{[[s]]}$ to denote the set $Y^{2^s}$. Each element ${\bf h}\in Y^{[[s]]}$ can be represented as ${\bf h}=(h_{\underline{\varepsilon}},\underline{\varepsilon}\in [[s]])$ where each $h_{\underline{\varepsilon}}$ belongs to $Y$. For complex numbers $z$, we define the operator $\mathcal{C}^kz$, where $\mathcal{C}^kz:=z$, if $k$ is an even number and $\mathcal{C}^kz:=\bar{z}$ otherwise. Finally, we use the notation $e(t):=e^{2\pi it}$ for $t\in \mathbb{R}$. \section{Background material}\label{background} \subsection{Preliminaries on Hardy fields} Let $\mathcal{B}$ denote the set of germs at infinity of real valued functions defined on a half-line $[x,+\infty]$. Then, $(\mathcal{B},+,\cdot)$ is a ring. A sub-field $\mathcal{H}$ of $\mathcal{B}$ that is closed under differentiation is called a Hardy field. We will say that $a(n)$ is a Hardy sequence, if for $n\in \mathbb{N}$ large enough we have $a(n)=f(n)$ for some function $f\in\mathcal{H}$. We will make some small abuse of language and sometimes also refer to sequences of the form $\floor{f(n)}$ as Hardy sequences. An example of a Hardy field is the field $\mathcal{LE}$ of logarithmico-exponential functions. These are the functions defined on some half line of $\mathbb{R}$ by a finite combination of the operations $+,-,\cdot,\div,\ \exp$, $\log$ and composition of functions acting on a real variable $t$ and real constants. The set $\mathcal{LE}$ contains functions such as the polynomials $p(t)$, $t^c$ for all real $c>0$, $t\log t $, $t^{(\log t)^2}$ and $e^{\sqrt{t}}/t^2$. The main advantage when working with functions in a Hardy field (instead of just the $C^{\infty}$ functions) is that any two functions $f,g\in\mathcal{H}$ are comparable. That means that the limit \begin{equation*} \lim\limits_{t\to\infty} \frac{f(t)}{g(t)} \end{equation*}exists and thus it makes sense to talk about and compare their growth rates. In addition, since every function in our Hardy field has a multiplicative inverse, we can easily infer that every function in $\mathcal{H}$ is eventually monotone (and, therefore, has constant sign eventually). It will be crucial in the proof to assume that $\mathcal{H}$ is closed under composition and compositional inversion of functions, when defined. More precisely, if $f,g\in \mathcal{H}$ are such that $\lim\limits_{t\to+\infty}g(t)= +\infty$, then we have that $f\circ g\in\mathcal{H}$ and $g^{-1}\in\mathcal{H}$. The Hardy field $\mathcal{LE}$ does not have this property. This can be achieved by working with the Hardy field $\mathcal{P}$ of Pfaffian functions \cite{Kovanski}, which contains $\mathcal{LE}$ and satisfies the previously mentioned assumptions. This field can be defined inductively as follows:\\ i) Let $\mathcal{P}_1$ be the set of the smooth functions satisfying the differential equation $f'=p(t,f)$ for some polynomial $p$ with integer coefficients.\\ ii) Let $\mathcal{P}_k$ be the set of the smooth functions satisfying the differential equation $f'=p(t,f_1,...,f_k)$ for some polynomial $p$ with integer coefficients and $f_i\in P_i$ for $1\leq i\leq k-1$. Then $\mathcal{P}$ contains all germs at infinity of the set $\cup_{i=1}^{\infty} \mathcal{P}_i$. From now on, we will assume that $\mathcal{H}$ has all the above properties. In the appendix, we have gathered some lemmas regarding growth rates of functions in $\mathcal{H}$, which will play a crucial role in the approximations in the following sections. Finally, we give some definitions for functions whose growth rate is of particular interest. \begin{definition}We say that a function $f\in\mathcal{H}$ has sub-linear growth rate (or is sub-linear), if $f(t)\prec t$. We say that a function $f\in\mathcal{H}$ has sub-fractional growth rate (or is sub-fractional), if for all $\delta>0$, we have $f(t)\ll t^{\delta}$. \end{definition} Typical examples of sub-linear functions are $\sqrt{t}$, $e^{\sqrt{\log t}}$ and $\log^3(t)$. Among these, the functions $e^{\sqrt{\log t}}$ and $\log^3(t)$ are also sub-fractional, while the first one is not sub-fractional. \begin{definition} We will call a function $f\in\mathcal{H}$ of polynomial growth strongly non-polynomial, if there exists a non-negative integer $d$, such that \begin{equation*} t^{d}\prec f(t)\prec t^{d+1}. \end{equation*} \end{definition} For example, the functions $t^{3/2}$ and $\log^3(t)$ are strongly non-polynomial, while the function $t^2+\sqrt{t}$ is not. \subsection{Background in ergodic theory} \subsubsection{Ergodicity and factors} A measure preserving system is a probability space $(X,\mathcal{X},\mu)$ equipped with an invertible measure preserving transformation $T$. We call a system ergodic, if the only $T$-invariant functions in $L^{\infty}(\mu)$ are the constant ones. The system $(X,\mathcal{X},\mu,T)$ is called weak-mixing, if the product system $(X\times X,\mathcal{X}\times \mathcal{X},\mu\times\mu,T\times T)$ is ergodic. We say the system $(Y,\mathcal{Y},\nu,S)$ is a factor of $(X,\mathcal{X},\mu,T)$, if there exist $X'\subset X$, $Y'\subset Y$ of full measure that are invariant under $T$ and $S$ respectively and a map $p:X'\to Y'$ such that $\nu=\mu\circ p^{-1}$ and $p\circ T(x)=S\circ p(x)$ for all $x\in X'$. If $p$ is a bijection, we say that the two systems are isomorphic. A factor of the system $(X,\mathcal{X},\mu,T)$ corresponds to a $T$-invariant sub-$\sigma$-algebra of $\mathcal{X}$ (in the above example this $\sigma$-algebra is $p^{-1}(\mathcal{Y})$). From now on, we will omit the $\sigma$-algebra $\mathcal{X}$ from the quadruple $(X,\mathcal{X},\mu,T)$. \subsubsection{Host-Kra seminorms and factors} Let $(X,\mu,T)$ be an invertible measure preserving system and let $f\in L^{\infty}(\mu)$. We define the Host-Kra uniformity seminorms inductively as follows:\begin{equation*} \nnorm{f}_{0,T}:=\int f \ d\mu \end{equation*}and, for $s\in \mathbb{Z}^{+}$, \begin{equation}\label{uniformitynorms} \nnorm{f}_{s+1,T}^{2^{s+1}}:=\lim\limits_{H\to\infty}\underset{0\leq h\leq H}{\mathbb{E}} \nnorm{\bar{f}\cdot T^h f}_{s,T}^{2^s}. \end{equation} The existence of the limits above was proven in \cite{Host-Kra1} in the ergodic case (for the non-ergodic case, see \cite{Host-Kra structures} for a proof) and it was also established that the $\nnorm{\cdot}_s$ are indeed seminorms (for $s\neq 0)$. More importantly, it was also shown in the same article that the seminorms $\nnorm{f}_{s,T}$ define a factor $Z_{s-1}(X)$ of $X$, which is characterized by the following property:\begin{equation*} f\perp L^2(Z_{s-1}(X))\iff \nnorm{f}_{s,T}=0. \end{equation*} It can be shown that the factors $Z_s(X)$ form an increasing sequence of factors. This follows from the inequality $\nnorm{f}_{s,T}\leq \nnorm{f}_{s+1,T}$, for all non-negative integers $s$. For weak-mixing systems, it can be shown that all the factors $Z_s(X)$ are trivial. Furthermore, it is easy to prove that $\nnorm{\bar{f}\otimes f}_{s,T\times T}\leq \nnorm{f}_{s+1,T}^2$, where $\bar{f}\otimes f$ denotes the function $(x,y)\to \overline{f(x)}f(y)$ on $(X\times X,\mu\times \mu,T\times T)$. Finally, when there is no danger of confusion, we will omit the subscript $T$ in the seminorms and write simply $\nnorm{f}_s$. \subsubsection{Joint ergodicity of sequences } Let $a_1(n),...,a_k(n)$ be sequences of integers. Following the terminology in \cite{Frajoint}, we call these sequences {\em jointly ergodic}, if for any ergodic measure preserving system $(X,\mu,T)$ and functions $f_1,...,f_k\in L^{\infty}(\mu)$, we have \begin{equation*} \lim\limits_{N\to+\infty} \frac{1}{N} \sum_{n=1}^{N}T^{a_1(n)}f_1\cdot ... \cdot T^{a_k(n)}f_k =\int f_1\ d\mu \cdot ....\cdot \int f_k\ d\mu, \end{equation*}where convergence takes place in $L^2(\mu)$. We also give the following definitions: \begin{definition}We say that a collection of sequences $a_1,...,a_{k}$ of integers:\\ i) is {\em good for seminorm estimates}, if for every ergodic system $(X,\mu,T)$ there exists an $s\in \mathbb{N}$, such that if $f_1,...,f_k\in L^{\infty}(\mu)$ and $\nnorm{f_{\ell}}_s=0$ for some $\ell\in\{1,...,k\}$, then\footnote{In \cite{Frajoint}, this property is called "{\em very good for seminorm estimates}".}\begin{equation*} \lim\limits_{N\to+\infty }\frac{1}{N} \sum_{n=1}^{N}T^{a_1(n)}f_1\cdot ... \cdot T^{a_k(n)}f_k=0 \end{equation*}in $L^2(\mu)$.\\ ii) is {\em good for equidistribution}, if for all $t_1,...,t_{k}\in [0,1)$, not all of them zero, we have \begin{equation*} \lim\limits_{N\to+\infty} \frac{1}{N} \sum_{n=1}^{N} e(t_1a_1(n)+\cdots+t_ka_k(n))=0. \end{equation*} \end{definition} The main result in \cite{Frajoint}, which we are also going to use is the following: \begin{customthm}{2}\cite[Theorem 1.1]{Frajoint} \label{jointlyergodic} Let $a_1,...,a_k$ be a collection of sequences of integers. Then, the following are equivalent:\\ i) The sequences $a_1,...,a_k$ are jointly ergodic.\\ ii) The sequences $a_1,...,a_k$ are good for seminorm estimates and good for equidistribution. \end{customthm} \begin{proof}[Proof that Theorem \ref{problem} follows from Theorem \ref{generalfactors}] Note that every ergodic function dominates the logarithmic function $\log t$. Therefore, if the functions $a_1,...,a_k$ are such that every non-trivial linear combination of them is ergodic, then the hypotheses of Theorem \ref{generalfactors} are satisfied, which means that the sequences $\floor{a_1(n)},...,\floor{a_k(n)}$ are good for seminorm estimates. Therefore, due to Theorem \ref{jointlyergodic} we only need to prove that they are good for equidistribution. This, however, follows from the equidistribution results in \cite{Boshernitzan1} and has been established in \cite[Proposition 6.3]{Frajoint}. \end{proof} \section{Characteristic factors for Hardy sequences}\label{sectionfactors} In this section, we present the main proposition that asserts that the Host-Kra factors of a given system are characteristic for the convergence of the averages \eqref{multiple}. That means that if we substitute the functions $f_i$ by their projections on $Z_{s}(X)$ for some suitable $s\in \mathbb{N}$, then the limiting behavior of the average in \eqref{multiple} remains unchanged. We will also make some small reductions to the original problem and prove some lemmas that will be essential in the proofs in the following sections. We also provide a brief overview of the proof and some examples that we believe present the idea of the proof, while avoiding most of the technicalities. The following proposition will be proven in subsequent sections. \begin{proposition}\label{factors} Assume that the functions $a_1,a_2,...,a_k\in\mathcal{H}$ have polynomial growth and suppose that the following two conditions hold: i) The functions $a_1,...,a_k$ dominate the logarithmic function $\log t$. ii) The pairwise differences $a_i-a_j$ dominate the logarithmic function $\log t$ for any $i\neq j$. Then, there exists a positive integer $s$, such that for any measure preserving system $(X,\mu,T)$, functions $f_1\in L^{\infty}(\mu)$ and $f_{2,N}...,f_{k,N}\in L^{\infty}(\mu)$, all bounded by $1$, with $f_1\perp Z_{s}(X)$, the expression \begin{equation}\label{factor} \sup_{|c_{n}|\leq 1} \norm{\underset{1\leq n\leq N}{\mathbb{E}}\ c_{n}\ T^{\floor{a_1(n)}} f_1 \cdot T^{\floor{a_2(n)}}f_{2,N}\cdot ...\cdot T^{\floor{a_k(n)}}f_{k,N}}_{L^2(\mu)} \end{equation}converges to 0, as $N\to+\infty$. \end{proposition} \begin{remark*} It is possible to establish Proposition \ref{factors} under the weaker assumption that only the functions $a_1,a_1-a_2,...,a_1-a_k$ dominate the logarithmic function, but this requires a few more details in the proof and is not required for the proof of Theorem \ref{generalfactors}. \end{remark*} It is obvious that Proposition \ref{factors} implies Theorem \ref{generalfactors} (this follows from a standard telescoping argument). Therefore, all of our remaining results follow if we establish this proposition. The reason that we work with sequences of functions and the bounded sequence $c_n$ is because that will be helpful in some spots to absorb some of the error terms that will appear in the iterates and also allows us to "transform" the sequences in the iterates, so that we can reduce our problem to the case that the first sequence $a_1$ has some specific properties depending on the situation. As an example, we claim that we only need to consider the case when the function $a_1(t)$ has maximal growth in the family $\{a_1,...,a_k\}$. Indeed, suppose that this is not the case. Then, there exists a function $a_i$ for some $ i\in\{1,...,k\}$ with $a_1\prec a_i$. Without loss of generality, assume that the function $a_k$ has maximal growth rate. It is sufficient to show that for any sequence of functions $g_N$ with $||g_N||_{L^{\infty}(\mu)}\leq 1$, we have \begin{equation*} \lim\limits_{N\to\infty} \underset{1\leq n\leq N}{\mathbb{E}}c_{n,N}\int g_N\ T^{\floor{a_1(n)}} f_1\cdot ...\cdot T^{\floor{a_k(n)}}f_{k,N}\ d\mu =0. \end{equation*}Then, we can choose the function $g_N$ to be the conjugate of the average \begin{equation*} \underset{1\leq n\leq N}{\mathbb{E}}c_{n,N}\int T^{\floor{a_1(n)}} f_1\cdot ...\cdot T^{\floor{a_k(n)}}f_{k,N} \ d\mu \end{equation*}to get our claim. Composing with $T^{-\floor{a_k(n)}}$ and applying the Cauchy-Schwarz inequality, it is sufficient to show that \begin{equation*} \lim\limits_{N\to+\infty} \sup_{|c_{n}|\leq 1} \norm{\underset{1\leq n\leq N}{\mathbb{E}}\ c_{n}\ T^{-\floor{a_{k}(n)}}g_N\cdot T^{\floor{a_1(n)}-\floor{a_k(n)}} f_1\cdot ...\cdot T^{\floor{a_{k-1}(n)}-\floor{a_k(n)}}f_{k-1,N}}_{L^2(\mu)}=0. \end{equation*}We can write $\floor{a_i(n)}-\floor{a_k(n)}=\floor{a_i(n)-a_k(n)} +e_{i,n}$, where the errors $e_{i,n}$ take values in $\{0,\pm 1\}$. Using Lemma \ref{errors} below, the errors can be absorbed by the supremum outside the average and, therefore, the function that corresponds to $f_1$ is equal to $a_1-a_k$, which now has maximal growth rate among the new family of functions. It is also easy to check that the new family satisfies the conditions of Proposition \ref{factors}. This notion of absorbing the errors that we described above can be made more precise by the next lemma. \begin{lemma}\label{errors} Assume that the integers $e_{i,n,N}$ take values in a finite set $S$. Then, for any sequences $a_{i,N}$ of integers, complex numbers $c'_{n,N}$ bounded in magnitude by 1 and any 1-bounded functions $f_{i,N}$, we have \begin{align*} \norm{ \underset{1\leq n\leq N}{\mathbb{E}}\ c'_{n,N}\ T^{a_{1,N}(n)+e_{1,n,N}}f_{1,N}\cdot...\cdot T^{a_{k,N}(n)+e_{k,n,N}}f_{k,N}}_{L^2(\mu)}&\ll_{k,S} \\ \sup_{|c_{n,N}|\leq 1}\ \sup_{||f_2||_{\infty}\leq 1,...,||f_k||_{\infty}\leq 1}^{} \ \norm{\underset{1\leq n\leq N}{\mathbb{E}}\ c_{n,N}\ T^{a_{1,N}(n)}f_{1,N}\cdot T^{a_{2,N}(n)}f_{2} \cdot ...\cdot T^{a_{k,N}(n)}f_{k}}_{L^2(\mu)}&. \end{align*} As a consequence, there exist 1-bounded functions $f'_{i,N}$, such that the original expression is bounded by a constant multiple of the quantity \begin{equation*} \sup_{|c_{n,N}|\leq 1}\norm{ \mathbb{E}_{1\leq n\leq N} \ c_{n,N}\ T^{a_{1,N}(n)}f_{1,N}\ T^{a_{2,N}(n)}f'_{2,N}\cdot ...\cdot T^{a_{k,N}(n)}f'_{k,N}}_{L^2(\mu)} +o_N(1). \end{equation*} \end{lemma} \begin{proof} We partition the integers $n$ into a finite number of sets, in which all the quantities $e_{i,n,N} $ are constant (as $n$ varies). There are at most $|S|^{k}$ such sets. If $A_1,...,A_{|S|^{k}}$ are these sets, then we have \begin{align*} &\norm{ \underset{1\leq n\leq N}{\mathbb{E}} c'_{n,N}\ T^{a_{1,N}(n)+e_{1,n,N}}f_{1,N}\cdot...\cdot T^{a_{k,N}(n)+e_{k,n,N}}f_{k,N}}_{L^2(\mu)}\leq \\ &\sum_{i=1}^{|S|^k} \bignorm{\frac{1}{N}\sum_{n\in A_i}^{} \ c'_{n,N}\ T^{a_{1,N}(n)+e_{1,n,N}}f_{1,N}\cdot...\cdot T^{a_{k,N}(n)+e_{k,n,N}}f_{k,N} }_{L^2(\mu)}\leq \\ &|S|^{k} \max_{1\leq i\leq |S|^k} \bignorm{\frac{1}{N}\sum_{1\leq n\leq N}^{} \ c'_{n,N} \mathbbm{1}_{A_i}(n)\ T^{a_{1,N}(n)}f_{1,N}\cdot...\cdot T^{a_{k,N}(n)+e_{k,n,N}-e_{1,n,N}}f_{k,N} } _{L^2(\mu)} \leq \\ &|S|^{k} \sup_{|c_{n,N}|\leq 1}\ \sup_{||f_2||_{\infty}\leq 1,...,||f_k||_{\infty}\leq 1}^{} \norm{\underset{1\leq n\leq N}{\mathbb{E}} c_{n,N}\ T^{a_{1,N}(n)}f_{1,N}\ T^{a_{2,N}(n)}f_{2}\cdot...\cdot T^{a_{k,N}(n)}f_{k}}_{L^2(\mu)}, \end{align*}which is the required result. In the second to last relation, we composed with $T^{-e_{1,n,N}}$, because $e_{1,n,N}$ is constant when $n$ is restricted to the set $A_i$. \end{proof} \begin{remark*}In the following sections, we will encounter situations where we have some error terms in the iterates. The above lemma is not applied verbatim to all cases below. However, the reasoning presented above (i.e. partitioning into sets where the error sequences are constant) can be applied directly every time to remove these error terms. In particular, we can also show (using the same arguments) that a similar statement holds for double averages, that is, if $I_r$ are a sequence of intervals with lengths going to infinity, $d$ is a natural number and the error terms $e_{i,n,R}$ take values on a finite set $S$ of integers, then \begin{multline*} \underset{1\leq r\leq R}{\mathbb{E}}\bignorm{\underset{n\in I_r}{\mathbb{E}} c'_{n,R}\ T^{a_{1,R}(n)+e_{1,n,R}}f_{1,R}\cdot...\cdot T^{a_{k,R}(n)+e_{k,n,R}}f_{k,R}}_{L^2(\mu)}^d\ll_{S,k,d} \\ \sup_{||f_2||_{\infty}\leq 1,...,||f_k||_{\infty}\leq 1}^{} \ \underset{1\leq r\leq R}{\mathbb{E}}\ \sup_{|c_{n,R}|\leq 1}\ \bignorm{\underset{n\in I_r}{\mathbb{E}} c_{n,R}\ T^{a_{1,R}(n)}f_{1,R}\cdot T^{a_{2,R}(n)}f_{2}...\cdot T^{a_{k,R}(n)}f_{k}}_{L^2(\mu)}^d, \end{multline*}where we also use the H\"{o}lder inequality (which gives dependence on the exponent $d$ in the implicit constants). Therefore, instead of using the same argument repeatedly, we will cite this lemma in such instances and add a comment when a modified version is required. \end{remark*} \subsection{Overview of the proof} Our main objective is to reduce our problem to the study of ergodic averages of some variable polynomials. Therefore, we will first study asymptotic bounds for certain polynomial families in Section \ref{PETsection}, since they will be required for the proof of Proposition \ref{factors}. This will rely on the van der Corput inequality and an induction argument on the complexity of the family. In Section \ref{sublinearsection}, we will establish bounds for Hardy sequences of a specific form, namely when the involved functions are a sum of a sub-linear function and a polynomial. This will also be required for the general case. In Section \ref{reductionestimates}, we shall finish the proof. The main idea is that we can approximate the given Hardy functions by Taylor polynomials (possibly constant) in suitable smaller intervals (with lengths going to infinity). We shall reduce our problem to proving a statement of the form \begin{equation}\label{expansion0} \lim\limits_{R\to+\infty} \underset{1\leq r\leq R}{\mathbb{E}} \norm{\underset{n\in I_r}{\mathbb{E}}\ c_{n,R}\ T^{\floor{p_{1,r}(n)}} f_{1,r}\cdot ...\cdot T^{\floor{p_{k,r}(n)}}f_{k,r}}_{L^2(\mu)}^{2^t} =0, \end{equation}where the iterates are variable polynomials and $f_{1,r}$ has the form\begin{equation*} f_{1,r}=f_1\cdot T^{\floor{b_1(r)}}h_1\cdot ...\cdot T^{\floor{b_{\ell}(r)}}h_{\ell} \end{equation*}for sub-linear functions $b_1,...,b_{\ell}$ and $h_1,...,h_{\ell}\in L^{\infty}(\mu)$. After this reduction, we bound the innermost average using the results from Section \ref{PETsection}. More precisely, we claim that the inner average can be bounded by a quantity of the form \begin{equation*} \underset{ {\bf m}\in [-M,M]^{t}}{\mathbb{E}} \Big|\int T^{\floor{q_1(r,{\bf m})}}g_{r,1}\cdot...\cdot T^{\floor{q_{\ell}(r,{\bf m})}} g_{r,\ell} \ d\mu \Big| \end{equation*}plus some small error terms, where $M$ is a finite integer (independent from the rest of our parameters) and all the functions $g_{r,i}$ are either $f_r$ or $\overline{f_r}$. In addition, the functions $q_{i}(r,{\bf m})$ in the iterates are such that, for (almost all) ${\bf m}\in \mathbb{Z}^{l}$, they can be written as a sum of a sublinear function plus a polynomial, which is the special case that we discussed above. Thus, taking first the limit $R\to+\infty$ to use the bounds established in the special case and then taking the limits $M\to+\infty$, we shall reach our conclusion. The fact that we can reduce our original problem to \eqref{expansion0} is based on the following elementary lemma. \begin{lemma}\label{mainlemma} Let $d$ be a positive integer and consider a two-parameter sequence $\big(A_{R,n}\big)_{R,n\in\mathbb{N}}$ in a normed space such that $\norm{A_{R,n}}\leq 1 $ for all possible choices of $R,n\in\mathbb{N}$. Let $L(t)\in\mathcal{H}$ be an eventually positive function such that $1\prec L(t)\prec t$ and assume that \begin{equation*} \limsup\limits_{R\to+\infty} \underset{1\leq r\leq R}{\mathbb{E}} \ \bignorm{ \underset{r\leq n\leq r+L(r)}{\mathbb{E}} A_{R,n} }^{d} \leq C \end{equation*}for some $C>0$. Then, we also have \begin{equation*} \limsup\limits_{R\to+\infty} \bignorm{ \underset{1\leq r\leq R}{\mathbb{E}} A_{R,n} }\leq C^{1/d}. \end{equation*} \end{lemma} \begin{proof} Combining the power mean inequality and the triangle inequality, we can easily deduce that \begin{equation*} \underset{1\leq r\leq R}{\mathbb{E}} \ \bignorm{ \underset{r\leq n\leq r+L(r)}{\mathbb{E}} A_{R,n} }^{d}\geq \bignorm{\underset{1\leq r\leq R}{\mathbb{E}} \big( \underset{r\leq n\leq r+L(r)}{\mathbb{E}} A_{R,n} \big) }^d. \end{equation*}Therefore, our result will follow if we show that \begin{equation*} \bignorm{ \underset{1\leq r\leq R}{\mathbb{E}} \big( \underset{r\leq n\leq r+L(r)}{\mathbb{E}} A_{R,n}\big) -\underset{1\leq r\leq R}{\mathbb{E}} A_{R,n} }=o_R(1). \end{equation*} Let $u$ be the compositional inverse of the function $t+L(t)$. Our assumptions on the Hardy field $\mathcal{H}$ imply that $u\in\mathcal{H}$. In addition, it is easy to check that $\lim\limits_{t\to+\infty} u(t)/t=1$. Now, we have \begin{equation*} \underset{1\leq r\leq R}{\mathbb{E}} \big(\ \underset{r\leq n\leq r+L(r)}{\mathbb{E}} A_{R,n} \big) = \frac{1}{R} \big( \sum_{n=1}^{R}p_R(n)A_{R,n} + \sum_{n=R+1}^{R+L(R)} p_R(n)A_{R,n}\big) \end{equation*}for some real numbers $p_R(n)$. Assuming that $n$ (and thus $R$) is sufficiently large (so that $u(n)$ is positive) we can calculate $p_R(n)$ to be equal to \begin{equation*} p_R(n)= \frac{1}{L(\floor{u(n)})+1}+\cdots +\frac{1}{L(n)+1} +o_n(1), \end{equation*}since the number $A_{R,n}$ appears on the average $\underset{r\leq n\leq r+L(r)}{\mathbb{E}}$ if and only if $u(n)\leq r\leq n$. Note that $p_R(n)$ is actually independent of $R$ (for $n$ large enough) and therefore, we will denote it simply as $p(n)$ from now on. We claim that \begin{equation}\label{p(n)limit} \lim_{n\to +\infty } p(n)=1. \end{equation}Let us first see how this finishes the proof. Since for $n$ large enough we must have $p(n)\leq 2$, we can easily deduce that \begin{equation*} \frac{1}{R} \sum_{n=R+1}^{R+L(R)} p(n)A_{R,n}=o_R(1). \end{equation*}Here, we used the fact that $L(t)\prec t$. In addition, we have \begin{equation*} \bignorm{ \frac{1}{R}\sum_{n=1}^{R}p(n)A_{R,n}-\frac{1}{R}\sum_{n=1}^{R}A_{R,n} }\leq \frac{1}{R}\sum_{n=1}^{R}|p(n)-1|, \end{equation*}which is also $o_{R}(1)$. Combining the above we reach the desired conclusion. In order to establish \eqref{p(n)limit}, we observe that $L(t)$ is eventually strictly increasing, and therefore, we can easily get \begin{equation*} \int_{\floor{u(n)}}^{n+1} \frac{1}{L(t)+1}\ dt \leq p(n)\leq \int_{\floor{u(n)}-1}^{n}\frac{1}{L(t)+1} \ dt. \end{equation*}Thus, it suffices to show that the integrals on both sides of the above inequality converge to 1. It is straightforward to check that each of these integrals is $o_n(1)$ close to the integral \begin{equation*} I_n= \int_{u(n)}^{n} \frac{1}{L(t)+1}\ dt. \end{equation*}Therefore, we only need to prove that $I_n\to 1$. Using the mean value theorem, we can find a real number $h_n\in[u(n),n]$ such that, \begin{equation*} I_n=\frac{n-u(n)}{L(h_n)+1}=\frac{L(u(n))}{L(h_n)+1}. \end{equation*}The last equality follows easily from the definition of $u$. Since $L$ is eventually strictly increasing, we conclude that $I_n$ is smaller than $L(u(n))/(L(u(n)) +1)\leq 1$. In addition, we also have \begin{equation*} I_n\geq \frac{L(u(n))}{L(n)+1}. \end{equation*}The result follows if we show (note that the function $u^{-1}$ is onto in a half line of $\mathbb{R}$) \begin{equation*} \lim\limits_{t\to +\infty} \frac{L(t)}{L(u^{-1}(t))+1}=1. \end{equation*}However, \begin{equation*} \frac{L(t)}{L(u^{-1}(t))+1}=\frac{L(t)}{L(t+L(t))+1}=\frac{L(t)}{L(t+L(t))}+o_t(1). \end{equation*}Using the mean value theorem, we can write \begin{equation*} L(t+L(t))=L(t)+L(t)L'(x_t)\ , \end{equation*}where $x_t\in [t,t+L(t)]$. Thus, \begin{equation*} \frac{L(t+L(t))}{L(t)}=1+L'(x_t)=1+o_t(1)\ , \end{equation*}since $L'(t)\ll L(t)/t\prec 1$. The result follows. \end{proof} \subsection{Two examples} a) Whenever we use $\ll$ without indices in this example, we imply that the constants are absolute. Assume that $a(t)=t\log t+\log^3 t $, $b(t)=t\log t$ and $c(t)=\sqrt{t}$. We want to show that there exists $s\in \mathbb{N}$, such that, if $\nnorm{f}_s=0$, then \begin{equation*} \underset{1\leq n\leq N}{\mathbb{E}} T^{\floor{n\log n+\log^3 n}}f\cdot T^{\floor{n\log n}} g_1\cdot T^{\floor{\sqrt{n}}} g_2 \end{equation*}converges to 0 in $L^2$ as $N\to+\infty$. Here, $g_1$ and $g_2$ are arbitrary 1-bounded functions in $L^{\infty}(\mu)$. In view of Lemma \ref{mainlemma}, it suffices to show that \begin{equation}\label{example0} \underset{1\leq r\leq R}{\mathbb{E}} \bignorm{ \underset{r\leq n\leq r+L(r)}{\mathbb{E}} T^{\floor{n\log n+\log^3 n}}f\cdot T^{\floor{n\log n}} g_1\cdot T^{\floor{\sqrt{n}}}g_2 }_{L^2(\mu)}^{2^d}=\underset{1\leq r\leq R}{\mathbb{E}} A_r \end{equation}converges to 0 as $R\to+\infty$, for some sub-linear function $L(t)\in \mathcal{H}$ and an integer $d$, both of which we will choose later. \subsection*{ Step 1: Reduction to averages of variable polynomials.} We observe that \begin{equation*} A_r = \bignorm{ \underset{0\leq h\leq L(r)}{\mathbb{E}}\ T^{\floor{ (r+h)\log (r+h) +\log^3(r+h)}} f\cdot T^{\floor{(r+h)\log (r+h)}}g_1 \cdot T^{\floor{\sqrt{r+h}}} g_2 }_{L^2(\mu)}^{2^d}. \end{equation*} Now, we can use the Taylor expansion to write \begin{equation*} (r+h)\log (r+h)=-\frac{h^3}{6x_h^2} + \frac{h^2}{2r}+h(\log r+1)+r\log r, \ \text{ for some} \ \ \ x_h\in[r,r+h], \end{equation*}and \begin{equation*} \sqrt{r+h}=-\frac{h^2}{8(x'_{h})^{3/2}}+\frac{h}{2\sqrt{r}} +\sqrt{r}, \ \text{ for some} \ \ \ x'_h\in[r,r+h], \end{equation*} for every $0\leq h\leq L(r)$. Since \begin{equation*} \Big|\frac{h^3}{6x_h^2} \Big|\leq \frac{L(r)^3}{r^2} \end{equation*} and \begin{equation*} \Big|\frac{h^2}{8(x'_h)^{3/2}}\Big|\leq \frac{L^2(r)}{8r^{3/2}}, \end{equation*}we conclude that these two last terms are both $o_r(1)$, provided that we choose the function $L(t)$ to satisfy $L(t)\prec t^{2/3}$. We also choose $L(t)\succ t^{1/2}$, so that both the 2-degree term in the expansion of $(r+h)\log (r+h)$ and the 1-degree term in the expansion of $\sqrt{r+h}$ are not bounded (for $h $ taking values in the range $[0,L(r)]$). In addition, under the above assumptions, we can also show that \begin{equation*} \max_{0\leq h\leq L(r)} |\log^3(r+h)-\log^3(r)|=o_r(1) \end{equation*}using the mean-value theorem. Therefore, we have\footnote{In this example, we split and combine the integer parts freely, which is not true in general. In our main proof, we explain this argument using Lemma \ref{errors}.} \begin{multline}\label{example} A_r\simeq \bignorm{ \underset{0\leq h\leq L(r)}{\mathbb{E}}\ T^{\floor{\frac{h^2}{2r}+h(\log r+1) +r\log r+\log^3 r }} f\cdot T^{\floor{\frac{h^2}{2r}+h(\log r+1) +r\log r}}g_1\cdot T^{\floor{\frac{h}{2\sqrt{r}} +\sqrt{r}}}g_2 }_{L^2(\mu)}^{2^d}=\\ \bignorm{ \underset{0\leq h\leq L(r)}{\mathbb{E}}\ T^{\floor{\frac{h^2}{2r}+h(\log r+1) +r\log r }} (g_1\cdot T^{\floor{\log^3 r}}f) \cdot T^{\floor{\frac{h}{2\sqrt{r}} +\sqrt{r}}}g_2 }_{L^2(\mu)}^{2^d}, \end{multline}which is an average where the iterates are polynomials in $h$. The fact that the $o_r(1)$ terms can be discarded follows from Lemma \ref{errors} and will be explained more thoroughly in the formal proof. Note that the iterates have now become polynomials in the variable $h$. \begin{remark*} In the proof of Proposition \ref{factors} in Section \ref{reductionestimates}, we will choose the function $L(t)$ in order to have a common polynomial expansion as above. Although in this example this is easily done by hand, this will be accomplished in the general case using some lemmas and propositions that are proven in the appendix. \end{remark*} We will use the van der Corput inequality (Lemma \ref{vdc}):\begin{equation*} |\underset{1\leq n\leq N}{\mathbb{E}}\ a_n|^{2^d}\ll_d \frac{1}{M}+ \underset{| m|\leq M}{\mathbb{E}} | \underset{1\leq n\leq N}{\mathbb{E}} \langle a_{n+m},a_m \rangle|^{2^{d-1}} +o_N(1)\ , \end{equation*}which holds as long as $ M=o (N)$. We will deal with a simpler case here, since \eqref{example} requires many applications of the van der Corput inequality and the estimates are quite complicated. We shall find a bound for the average \begin{equation*} \underset{1\leq r\leq R}{\mathbb{E}} \bignorm{ \underset{0\leq h\leq L(r)}{\mathbb{E}} T^{\floor{\frac{h^2}{2r}}}f_r }^4=\underset{1\leq r\leq R}{\mathbb{E}} A_r^4\ , \end{equation*}where $f_r =g_1\cdot T^{\floor{\log^3(r)} }f$. \subsection*{Step 2: A change of variables trick and bounds for the polynomial averages} First of all, we can write $h=k\floor{\sqrt{2r}}+s$, where the integers $k,s$ satisfy $0\leq k\leq L(r)/\floor{\sqrt{2r}}$ and $0\leq s\leq \floor{\sqrt{2r}}-1$. Then, we have \begin{equation*} \frac{h^2}{2r}=\frac{k^2\floor{\sqrt{2r}}^2}{2r}+\frac{2k\floor{\sqrt{r}}s}{2r}+\frac{s^2}{2r}. \end{equation*}Note that \begin{equation*} \Big|\frac{k^2\floor{\sqrt{2r}}^2}{2r}-k^2\Big|\leq 2k^2\frac{\{\sqrt{2r}\}}{\sqrt{2r}} \leq 2\frac{L^2(r)}{\floor{\sqrt{2r}}^2 \sqrt{2r}}. \end{equation*}If we choose $L(t)$ to satisfy the additional hypothesis $L(t)\prec t^{3/4}$, then we get that the above quantity is $o_r(1)$. In this example, we can take $L(t)=t^{3/5}$ as our sub-linear function (observe that all of the restrictions we imposed above are satisfied). Therefore, we can use the power mean inequality to deduce that\begin{equation}\label{poiuy} A_r^4\leq \underset{0\leq s\leq \floor{\sqrt{2r}}-1}{\mathbb{E}} \ \bignorm { \underset{1\leq k\leq \frac{L(r)}{\floor{\sqrt{2r}}}}{\mathbb{E}} T^{\floor{k^2+p_{s,r}(k)}} f_r }^4 \end{equation}for some linear polynomials $p_{s,r}(k)$. Denote by $A_{s,r}$ the innermost average in the above relation. We fix a positive integer parameter $M$. Applying the van der Corput inequality twice, we deduce that \begin{equation*} A_{s,r}^4\ll \frac{1}{M} +\underset{|m_1|,|m_2|\leq M}{\mathbb{E}} \Big|\int \bar{f_r} \cdot T^{2m_1m_2} f_r\ d\mu \Big| +o_r(1) , \end{equation*}where the implied constant is absolute (and, in particular, independent of $M$). We omitted the routine computations here (the general case is more complicated than this and is handled in Section \ref{PETsection}). This bound holds regardless of the choice of the polynomial $p_{s,r}(k)$. Using this bound in \eqref{poiuy} we deduce that \begin{equation*} A_r^4\ll \frac{1}{M}+\underset{|m_1|,|m_2|\leq M}{\mathbb{E}} \Big|\int \overline{(g_1\cdot T^{\floor{\log^3 r}}f )} \cdot T^{2m_1m_2} (g_1\cdot T^{\floor{\log^3 r}}f )\ d\mu \Big| +o_r(1). \end{equation*}Therefore, the quantity in \eqref{example0} is $\ll$ \begin{multline}\label{afterstep2} \frac{1}{M}+\underset{1\leq r\leq R}{\mathbb{E}}\ \underset{|m_1|,|m_2|\leq M}{\mathbb{E}} \Big|\int \overline{(g_1\cdot T^{\floor{\log^3 r}}f )} \cdot T^{2m_1m_2} (g_1\cdot T^{\floor{\log^3 r}}f )\ d\mu \Big| +o_R(1)=\\ \frac{1}{M}+ \underset{|m_1|,|m_2|\leq M}{\mathbb{E}} \ \underset{1\leq r\leq R}{\mathbb{E}} \Big|\int (\bar g_1 \cdot T^{2m_1m_2}g_1)\cdot T^{\floor{\log^3 r}}(\bar{f}\cdot T^{2m_1m_2}f) \ d\mu \Big| +o_R(1). \end{multline} \begin{remark*} In the proof of the general case, instead of the sub-linear function $\floor{\log^3(r)}$ in the iterates in \eqref{afterstep2}, we may also have functions of the form $\floor{u(r)}^{k}$, where $u\in\mathcal{H}$ is a sub-linear function and $k\in \mathbb{Z}^{+}$ (like $\floor{\sqrt{r}}^3$ and $\floor{r^{2/3}}^5$). For instance, assume we want to study the limit of the averages \begin{equation*} \underset{1\leq n\leq N}{\mathbb{E}} T^{\floor{\sqrt{n}+n^3}}f\cdot T^{\floor{\sqrt{n}}}g. \end{equation*}Using Lemma \ref{mainlemma}, it suffices to show that \begin{equation*} \underset{1\leq r\leq R}{\mathbb{E}}\bignorm{\underset{0\leq h\leq L(r)}{\mathbb{E}} T^{\floor{\sqrt{r+h}+(r+h)^3}}f\cdot T^{\floor{\sqrt{r+h}}}g }_{L^2(\mu)}^{2^d} \end{equation*}for some $d\in \mathbb{N}$ and some sub-linear function $L(t)\in \mathcal{H}$. If we choose $L(t)$ appropriately, then we can write \begin{equation*} \sqrt{r+h}=\sqrt{r}+\frac{h}{2\sqrt{r}}+o_r(1) \end{equation*}for $0\leq h\leq L(r)$. Now, using the change of variables $h=k\floor{2\sqrt{r}}+s$, we observe that the leading coefficient of the polynomial $(r+h)^3$ in the iterates becomes $\floor{2\sqrt{r}}^3$. If we proceed similarly as in step 2 above using repeated applications of the van der Corput inequality, we will arrive at a similar bound as the one in \eqref{afterstep2}, but now the term $\floor{2\sqrt{r}}^3$ will appear in the iterates. In order to combat this situation, we need another intermediate step in our proof (this is Step 7 in Section \ref{reductionestimates}). We shall use a lemma that allows us to replace the sub-linear function $2\sqrt{r}$ by the identity function $a(r)=r$. As an example, suppose we want to bound the limit of the averages \begin{equation*} \underset{1\leq r\leq R}{\mathbb{E}} T^{\floor{\sqrt{r}}}f\cdot T^{\floor{\sqrt{r}}^{3}+\floor{r^{2/5}} }g \end{equation*}as $R\to+\infty$. We rewrite this expression as a function of $\sqrt{r}$ \begin{equation*} \underset{1\leq r\leq R}{\mathbb{E}} T^{\floor{\sqrt{r}}}f\cdot T^{\floor{\sqrt{r}}^{3}+\floor{({\sqrt{r}})^{4/5}} }g. \end{equation*}Then, we can prove that \begin{equation*} \limsup\limits_{R\to+\infty}\bignorm{\underset{1\leq r\leq R}{\mathbb{E}} T^{\floor{\sqrt{r}}}f\cdot T^{\floor{\sqrt{r}}^{3}+\floor{({\sqrt{r}})^{4/5}} }g}_{L^2(\mu)}\leq C \limsup\limits_{R\to+\infty}\bignorm{\underset{1\leq r\leq R}{\mathbb{E}} T^{r}f\cdot T^{r^{3}+\floor{r^{4/5}} }g}_{L^2(\mu)} \end{equation*}for some positive real number $C$. Now the functions in the iterates are sub-linear functions and polynomials, which we are now able to handle (this is the content of Section \ref{sublinearsection}). \end{remark*} \subsection*{Step 3: Dealing with the sub-linear function.} In this step we show that the quantity in \eqref{afterstep2} goes to 0, if we take $R\to +\infty$ and then $M\to+\infty$. While steps 1 and 2 of this example correspond to parts of the proof in Sections \ref{PETsection} and \ref{reductionestimates}, this step corresponds to the proofs in Section \ref{sublinearsection}. We observe that the function $\log^3(r)$ in the iterates is a sub-linear function. We will show that \begin{equation}\label{expl} \lim\limits_{R\to+\infty} \underset{1\leq r\leq R}{\mathbb{E}} \Big|\int (\bar g \cdot T^{2m_1m_2}g) \cdot T^{\floor{\log^3 r}}(\bar{f}\cdot T^{2m_1m_2}f) d\mu \Big|\ll \nnorm{\bar f\cdot T^{2m_1m_2} f}_3. \end{equation} In addition, the implicit constants do not depend on $m_1,m_2$. Assuming that \eqref{expl} holds, we take the limit as $M\to +\infty$ (this can be done because all implied asymptotic constants do not depend on $m_1,m_2$) and we need to show that \begin{equation*} \lim\limits_{M\to +\infty} \underset{|m_1|,|m_2|\leq M}{\mathbb{E}} \nnorm{ \bar{f}\cdot T^{2m_1m_2}f }_3=0. \end{equation*}Applying the H\"{o}lder inequality, we are left with showing that \begin{equation*} \lim\limits_{M\to +\infty} \underset{|m_1|,|m_2|\leq M}{\mathbb{E}} \nnorm{ \bar{f}\cdot T^{2m_1m_2}f }_3^8=0. \end{equation*} Using the definition of the Host-Kra seminorms, this relation reduces to an ergodic average with polynomial iterates, which is well known to converge to $0$ under our hypothesis on the function $f$ (namely, that $\nnorm{f_1}_s=0$ for some suitable $s\in \mathbb{N}$). We now establish \eqref{expl}. It suffices to show that \begin{equation*} \lim\limits_{R\to+\infty} \underset{1\leq r\leq R}{\mathbb{E}} \Big|\int g\cdot T^{\floor{\log^3 r}}f d\mu \Big|\ll \nnorm{f}_{3,T} \end{equation*}for any 1-bounded functions $f \text{ and }g$, where the implied constant is absolute. We square the above expression and apply the Cauchy-Schwarz inequality to bound it by \begin{equation*} \underset{1\leq r\leq R}{\mathbb{E}} \int {G} \cdot S^{\floor{\log^3(r)}}F \ d(\mu\times\mu), \end{equation*}where $F:=\overline{f}\otimes f$, $G:=\overline{g}\otimes g$ and $S:=T\times T$. Then, \eqref{expl} follows if we show \begin{equation*} \bignorm{ \underset{1\leq r\leq R}{\mathbb{E}} S^{\floor{\log^3(r)}}F }_{L^2(\mu\times \mu)}\ll \nnorm{f}_{3,T}^2. \end{equation*}We use Lemma \ref{mainlemma} once more: it suffices to show that \begin{equation*} \limsup\limits_{r\to+\infty} \bignorm{\underset{r\leq n\leq r+L(r)}{\mathbb{E}} S^{\floor{\log^3(n)}}F }_{L^2(\mu\times \mu)}\ll \nnorm{f}_{3,T}^2\ , \end{equation*}where $L(t)\in\mathcal{H}$ is sub-linear. Using the Taylor expansion, we can write \begin{equation*} \log^3(r+h)=\log^3(r)+\frac{3\log^2 r}{r}h-\frac{6\log x_h-3\log^2 x_h }{2x_h^2}h^2, \end{equation*}where $0\leq h\leq L(r)$ and $x_h\in [r,r+h]$. If we choose the function $L(t)$ so that \begin{equation*} \frac{t}{\log^2 t}\prec L(t)\prec \frac{t}{\log t}, \end{equation*}we can then deduce that the last term in the above expansion is $o_r(1)$. Our problem reduces to \begin{equation*} \limsup\limits_{r\to+\infty} \bignorm{\underset{0\leq h\leq L(r)}{\mathbb{E}} S^{\floor{\log^3(r)+ \frac{3\log^2 r}{r}h }}F}_{L^2(\mu\times \mu)}\ll \nnorm{f}_{3,T}^2. \end{equation*} We have again reduced our problem to finding a bound for an ergodic average with (variable) polynomials. In order to finish the proof, we work similarly as in the previous steps, using the change of variables trick and one application of the van der Corput inequality (we also need to use the inequality $\nnorm{ F}_{2,T\times T} \leq \nnorm{f}_{3,T}^2$). b) In this second example we describe the strategy that will be used in the special case that we discussed above, that is when our functions are sums of sublinear functions and polynomials. This case is covered in full generality in Section \ref{sublinearsection}. We consider the triplet of functions in $\mathcal{H}$ $(t+\log^3 t, t, \log^2 t) $ and we shall show that there exists $s\in \mathbb{N}$ so that, if $\nnorm{f}_s=0$, then \begin{equation*} \underset{1\leq n\leq N}{\mathbb{E}} T^{\floor{n+\log^3 n}}f\cdot T^{{n}} g_1\cdot T^{\floor{\log^2 n}}g_2 \end{equation*}converge to $0$ in mean ($g_1,g_2$ are again arbitrary 1-bounded functions). \subsection*{Step 1: Reducing to the case when all iterates have sub-linear growth.} We start by using Lemma \ref{mainlemma} to reduce our problem to \begin{equation} \limsup\limits_{R\to+\infty}\underset{1\leq r\leq R}{\mathbb{E}} \bignorm{ \underset{r\leq n\leq r+L(r)}{\mathbb{E}} T^{\floor{n+\log^3 n}}f\cdot T^{{n}} g_1\cdot T^{\floor{\log^2 n}}g_2 }_{L^2(\mu)}^{2}=0 \end{equation}for some sub-linear function $L(t)\in\mathcal{H}$. In this example, we will choose the function $L(t)$, so that \begin{equation*} \max_{r\leq n\leq r+L(r)} |\log^3(n)-\log^3(r)|=o_r(1) \ \ \ \ \text{ and }\ \ \ \max_{r\leq n\leq r+L(r)} |\log^2(n)-\log^2(r)|=o_r(1). \end{equation*} For instance, the function $L(t)=\sqrt{t}$ can easily be checked to satisfy the above. Therefore, if $r$ is very large, we can write \begin{multline*} \bignorm{ \underset{r\leq n\leq r+L(r)}{\mathbb{E}} T^{\floor{n+\log^3 n}}f\cdot T^{{n}} g_1\cdot T^{\floor{\log^2 n}}g_2 }_{L^2(\mu)}=\\ \bignorm{ \underset{r\leq n\leq r+L(r)}{\mathbb{E}} T^{n+\floor{\log^3 r}+e_{1,n}}f\cdot\ T^{n} g_1\cdot\ T^{\floor{\log^2 r}+e_{2,n}}g_2 }_{L^2(\mu)} , \end{multline*}where $e_{1,n},e_{2,n}\in \{0,\pm 1\}$. We assume here that all the error terms are zero (in the main proof, we will invoke Lemma \ref{errors} to remove the error terms). Therefore, we want to show that \begin{equation*} \limsup\limits_{R\to+\infty}\underset{1\leq r\leq R}{\mathbb{E}}\bignorm{ \underset{r\leq n\leq r+L(r)}{\mathbb{E}} T^{n+\floor{\log^3 r}}f\cdot\ T^{n} g_1\cdot \ T^{\floor{\log^2 r}}g_2 }_{L^2(\mu)}^{2}=0. \end{equation*}Since $\norm{g_2}_{\infty}\leq 1$, we reduce our problem to \begin{equation*} \limsup\limits_{R\to+\infty}\underset{1\leq r\leq R}{\mathbb{E}}\bignorm{ \underset{r\leq n\leq r+L(r)}{\mathbb{E}} T^n (g_1\cdot T^{\floor{\log^3 r}}f )}_{L^2(\mu)}^{2}=0. \end{equation*}Note that the inner average is a polynomial average in the variable $n$. We fix a positive integer $M$ and use the van der Corput inequality to deduce that \begin{equation*} \bignorm{ \underset{r\leq n\leq r+L(r)}{\mathbb{E}} T^n (g_1\cdot T^{\floor{\log^3 r}}f )}_{L^2(\mu)}^{2}\ll \frac{1}{M} +\underset{|m|\leq M}{\mathbb{E}}\Big|\int \overline{(g_1\cdot T^{\floor{\log^3(r)}})} \cdot T^m (g_1\cdot T^{\floor{\log^3(r)}}) \ d\mu \Big| +o_r(1)\ , \end{equation*}where the implied constant is absolute. Thus, we want to show that \begin{equation*} \frac{1}{M} +\underset{|m|\leq M}{\mathbb{E}}\ \underset{1\leq r\leq R}{\mathbb{E}} \Big|\int (\overline{g_1}\cdot T^m g_1) \cdot T^{\floor{\log^3(r)}}(\overline{f}\cdot T^m f) \ d\mu \Big|+o_R(1) \end{equation*}goes to $0$, as $R\to+\infty$ and then as $M\to+\infty$. \subsection*{Step 2: Dealing with the sub-linear functions.} Our problem follows by taking the limit as $R\to +\infty$ and then using the bound \begin{equation}\label{lst} \limsup\limits_{R\to+\infty}\underset{1\leq r\leq R}{\mathbb{E}} \Big|\int (\overline{g_1}\cdot T^m g_1) \cdot T^{\floor{\log^3(r)}}(\overline{f}\cdot T^m f) \ d\mu \Big|\ll \nnorm{\overline{f} \cdot T^m f}_{3,T}. \end{equation}This was established in the previous example. Using this relation and taking the limit $M\to+\infty$ (note that our asymptotic constants do not depend on $M$), we reach the conclusion. Since \eqref{lst} follows from the previous example, we will describe our arguments for a more representative case. We shall prove that \begin{equation}\label{eld} \limsup\limits_{N\to+\infty} \bignorm{ \underset{1\leq n\leq N}{\mathbb{E}} T^{\floor{\log^3 n+\log^2 n}}f\cdot T^{\floor{\log^3 n}} g_1\cdot T^{\floor{\log^2 n}}g_2 }_{L^2(\mu)}\ll \nnorm{f}_4, \end{equation}where the implied constant is absolute. Using Lemma \ref{mainlemma}, it suffices to show that \begin{equation*} \limsup\limits_{R\to+\infty} \underset{1\leq r\leq R}{\mathbb{E}}\bignorm{ \underset{r\leq n\leq r+L(r)}{\mathbb{E}} T^{\floor{\log^3 n+\log^2 n}}f\cdot T^{\floor{\log^3 n}} g_1\cdot T^{\floor{\log^2 n}}g_2 }_{L^2(\mu)}^2\ll \nnorm{f}_4^2 \end{equation*}for some sub-linear function $L(t)\in\mathcal{H}$. We choose $L(t)=t(\log t)^{-3/2} $. Using similar approximations as in the first example, we can show that for any $0\leq h\leq L(r)$ \begin{equation*} \log^3(r+h)=\log^3 r+h\frac{3\log^2 r}{r}+o_r(1), \end{equation*}while \begin{equation*} \log^2(r+h)=\log^2 r+o_r(1) \end{equation*}for all $0\leq h \leq L(r)$. Disregarding the error terms $o_r(1)$ in this example, it suffices to show that \begin{equation*} \limsup\limits_{R\to+\infty} \underset{1\leq r\leq R}{\mathbb{E}} \bignorm{ \underset{0\leq h\leq L(r)}{\mathbb{E}} T^{\floor{\log^3 r +h\frac{3\log^2 r}{r}}}\big( T^{\floor{\log^2 r}}f\cdot g_1 \big)\cdot T^{\floor{\log^2 r}}g_2 }_{L^2(\mu)}^2\ll \nnorm{f}_4^2. \end{equation*}Since $g_2$ is bounded by 1, the above bound follows from \begin{equation*} \limsup\limits_{N\to+\infty}\underset{1\leq r\leq R}{\mathbb{E}} \bignorm{ \underset{0\leq h\leq L(r)}{\mathbb{E}} T^{\floor{\log^3 r +h\frac{3\log^2 r}{r}}}\big( T^{\floor{\log^2 r}}f\cdot g_1 \big)}_{L^2(\mu)}\ll \nnorm{f}_4^2. \end{equation*} This is an average where the iterates are variable polynomials. Working similarly to the previous example, we can show that \begin{multline*} \bignorm{ \underset{0\leq h\leq L(r)}{\mathbb{E}} T^{\floor{\log^3 r +h\frac{3\log^2 r}{r}}}\big( T^{\floor{\log^2 r}}f\cdot g_1 \big)}_{L^2(\mu)}^2\ll \\ \frac{1}{M}+\underset{|m|\leq M}\mathbb{E} \Big| \int \overline{\big(T^{\floor{\log^2 r}}f\cdot g_1\big)} \cdot T^m\big( T^{\floor{\log^2 r}}f\cdot g_1 \big)\ d\mu \Big| +o_r(1). \end{multline*}Thus, it suffices to show that \begin{equation*} \limsup\limits_{M\to+{\infty}} \underset{|m|\leq M}{\mathbb{E}} \limsup\limits_{R\to+\infty}\underset{1\leq r\leq R}{\mathbb{E}}\Big| \int (\bar{g_1}\cdot T^m g_1)\cdot T^{\floor{\log^2 r}} (\bar{f}\cdot T^mf) \ d\mu\Big|\ll \nnorm{f}_4^2. \end{equation*}Note that we started with three sub-linear functions in the iterates and now we have an average with only one sub-linear function (our argument in the general case is based on this induction scheme). The result follows by working similarly to step 3 in the previous example. \section{Bounds of polynomial averages}\label{PETsection} Our main goal in this section is to establish Proposition \ref{PET} below. Before stating that proposition, we will first give some definitions. \subsection{Families of variable polynomials } Assume we are given a family $P_N=\{p_{1,N},...,p_{k,N}\}$ of essentially distinct (i.e. their pairwise differences are non-constant polynomials) variable polynomials, such that the degrees of the polynomials in $P_N$ and of their pairwise differences are independent of $N$. Then, we can assign to $p_{1,N}$ its own vector $(v_{1,N},...,v_{k,N})$, where $v_{1,N}$ is the leading coefficient of $p_{1,N}$ and $v_{j, N}$ is the leading coefficient of $p_{1,N}-p_{j,N}$ for $j\neq 1$. We symbolize this by $\mathcal{S}(p_{1,N})$ and call this the {\em leading vector} of the family $P_N$ corresponding to $p_{1,N}$. We similarly define $\mathcal{S}(p_{i,N})$ for every $i\in \{1,...,k\}$ and call it the {\em leading vector} corresponding to $p_{i,N}$. Let us remark that the leading vector has no elements equal to 0, because we have assumed that the polynomials are essentially distinct. Finally, we call $P_N$ {\em ordered}, if the degrees of the polynomials $p_{i,N}$ are non-increasing. In this case, the polynomial $p_{1,N}$ has maximal degree and we call it the {\em leading polynomial}. The {\em leading vector} of an ordered polynomial family is defined as the leading vector corresponding to its leading polynomial. \subsection{Types of polynomial families} We define the {\em type} $(d,w_d,...,w_1)$ of the polynomial family, where $d$ is the largest degree appearing in the polynomials of $P_N$ and $w_i$ is the number of distinct leading coefficients of the members of $P_N$ with degree exactly $i$ among all polynomials in the family. Note that for families of variable polynomials, the value of this vector may depend on the variable $N$. We order the types by the value of $d$ and then order types of same degree lexicographically. We observe that a decreasing sequence of types must eventually be constant. The type of a family is a classical quantity used in the literature when an induction scheme on polynomial families is required. \subsection{Good sequences and nice polynomial families} Now, we define the notion of a nice polynomial family. Namely, we will deal with polynomials whose coefficients are well-behaved sequences. Our arguments fail to work in the general case where the coefficients can be arbitrary sequences. \begin{definition}\label{good sequence} a) A sequence $(a_n)_{n\in\mathbb{N}}$ of real numbers is called "good", if there exists a function $f\in\mathcal{H}$ with $\lim\limits_{t\to+\infty}f(t)\neq 0$ such that \begin{equation*} \lim\limits_{n\to+\infty} \frac{a_n}{f(n)}=1. \end{equation*} b) Let $P_N=\{p_{1,N},...,p_{k,N}\}$ be a collection of polynomials. The family $P_N$ is called nice, if all the degrees of the polynomials $p_{i,N}$ and $p_{i,N}-p_{j,N}$ are independent of $N$ for $N$ large enough and their leading coefficients are good sequences, for all admissible values of the $i,j$.\\ \end{definition} Note that any good sequence has a limit (possibly infinite). An example of a good sequence that is not a Hardy sequence is the sequence $\frac{\floor{N^{2/3}}}{\sqrt{N}}$, which is asymptotically close to $N^{1/6}$. In particular, all sequences of the form $\floor{f(n)}$, where the function $f\in\mathcal{H}$ does not converge to $0$ (as $t\to+\infty$), are good sequences, while, for example, $\floor{\frac{1}{\log n}}$ is not a good sequence. \begin{lemma}\label{type} The type of a nice polynomial family is well-defined (independent of $N$) for $N$ large enough. \end{lemma} \begin{proof} This is fairly straightforward. Indeed, assume that the polynomials $p_{i,N}$ and $p_{j,N}$ of the given family have the same degree $s$. Let $a_i(N),a_{j}(N), a_{ij}(N)$ be the leading coefficients of $p_{i,N},p_{j,N}$ and $p_{i,N}-p_{j,N}$, which are all good sequences. The degree of the polynomial $p_{i,N}-p_{j,N}$ does not depend on $N$. Then, we have either one of the following:\\ i) If the polynomial $p_{i,N}-p_{j,N}$ has degree equal to $s$, then for $N$ large enough, $a_{ij}(N)=a_i(N)-a_j(N)\neq 0$ and therefore the polynomials $p_{i,N},p_{j,N}$ have distinct leading coefficients eventually.\\ ii) If the polynomial $p_{i,N}-p_{j,N}$ has degree smaller than $s$, then that means that, for $N$ large enough, we have $a_{i}(N)-a_j(N)= 0$ and the polynomials $p_{i,N},p_{j,N}$ have equal leading coefficients eventually.\\ The claim easily follows. \end{proof} \subsection{The van der Corput inequality} We shall rely heavily on the following variant of the van der Corput inequality in our proofs. \begin{lemma} For a sequence $u_n$ in a Hilbert space with $\norm{u_n}\leq 1$ and a quantity $M=o(N)$, we have\begin{equation*} \bignorm{\frac{1}{N}\sum_{n=0}^{N-1} u_n}^{2^d}\ll_d \frac{1}{M} + \underset{-M\leq m\leq M}{\mathbb{E}}\ \Big|\underset{0\leq n\leq N-1}{\mathbb{E}}\langle u_{n+m},u_{n}\rangle\Big|^{2^{d-1}}+o_N(1). \end{equation*} \end{lemma} \begin{proof} This follows from the basic van der Corput inequality \begin{equation*} \bignorm{\frac{1}{N}\sum_{n=0}^{N-1} u_n}\ll \frac{1}{M^{1/2}} +\big(\underset{-M\leq m\leq M}{\mathbb{E}} \Big|\underset{0\leq n\leq N-1}{\mathbb{E}}\langle u_{n+m},u_{n}\rangle\Big|\big)^{1/2}+\frac{M^{1/2}}{N^{1/2}} \end{equation*} by successively squaring and applying the Cauchy-Schwarz inequality. \end{proof} We will use this inequality to derive asymptotic bounds for multiple ergodic averages involving polynomials. The above inequality holds, in particular, when $M$ is a fixed positive integer. We state here the equivalent result for variable sequences, since this is more consistent with the notation used in the proof below. \begin{lemma}\label{vdc} For sequences $(u_{n,N})_{n,N\in \mathbb{N}}$ in a Hilbert space with $\norm{u_{n,N}}\leq 1$ and a quantity $M=o(N)$, we have\begin{equation*} \bignorm{\frac{1}{N}\sum_{n=0}^{N-1} u_{n,N}}^{2^d}\ll_d \frac{1}{M} + \underset{ |m|\leq M}{\mathbb{E}}\ \Big|\underset{0\leq n\leq N-1}{\mathbb{E}}\langle u_{n+m,N},u_{n,N}\rangle\Big|^{2^{d-1}}+o_N(1). \end{equation*} \end{lemma} \subsection{Bounds of polynomial averages} The remainder of the section will be dedicated to establishing the following proposition: \begin{proposition}\label{PET} Let $k, d$ be positive integers and let $M$ be a positive integer parameter. Suppose ${\bf W}=(d,w_d,...,w_1)$ is a (d+1)-tuple of positive integers that is also a type for some polynomial family. Then, there exist positive integers $t=t(d,k,{\bf W}) $, $ s=s(d,k,{\bf W})$, a finite set $Y=Y(d,k,{\bf W})$ of integers and integer polynomials in $t$ variables $p_{\underline{\varepsilon},j},\text{ with } \underline{\varepsilon}\in [[s]]$ and $1\leq j\leq \ k$, that are at most linear in each variable\footnote{This means that when regarded as polynomials only in one variable, then they are linear. Examples are $p_1(m_1,m_2)=m_1-2m_2$ and $p_2(m_1,m_2,m_3)=m_1m_2-3m_3$.}, such that for any nice ordered family of non-constant, essentially distinct polynomials \begin{equation*} P_N=\{p_{1,N},...,p_{k,N}\} \end{equation*}of degree $d$ with leading vector $\mathcal{S} (P_N) =\{u_{1,N},...,u_{k,N}\}$, any increasing sequence $L_N\to \infty $, any measure preserving system $(X,\mu,T)$ and sequences of 1-bounded functions $f_{1,N},...,f_{k,N}$, we have \begin{multline}\label{asdfghjkl} \sup_{|c_{n,N}| \leq 1}\bignorm{ \underset{0\leq n\leq L_N}{\mathbb{E}}\ c_{n,N} \prod_{i=1}^{k} T^{\floor{p_{i,N}(n)}}f_{i,N}}_{L^2(\mu)}^{2^t}\ll_{d,k,{\bf W}} \\ \frac{1}{M}+ \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon},N}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{1,N}) \ d\mu \Big|+ o_N(1), \end{multline}where \begin{equation*} A_{\underline{\varepsilon},N}({\bf m})=\sum_{1\leq j\leq k} \ p_{\underline{\varepsilon},j}({\bf m})u_{j,N} \end{equation*}are real polynomials in ${\bf m}$. In addition, we have the following:\\ i) For $\underline{\varepsilon}\neq \underline{0}$, we have that the polynomial $A_{\underline{\varepsilon},N}({\bf m})$ is non-constant.\\ ii) The polynomials $A_{\underline{\varepsilon},N}({\bf m}),\ \underline{\varepsilon}\in [[s]]$ are pairwise essentially distinct.\\ iii) We have the relation $$A_{\underline{\varepsilon},N}({\bf m})+A_{\underline{\varepsilon}^c,N}({\bf m})=A_{\underline{1},N}({\bf m})$$ for any $\underline{\varepsilon}\in[[s]]$.\\ iv) For any $\underline{\varepsilon} \in [[s]]$, we have that if \begin{equation*} c_1p_{\underline{\varepsilon},1}({\bf m})+...+c_k p_{\underline{\varepsilon},k}({\bf m}) \end{equation*}is the zero polynomial for some $c_1,...,c_k\in\mathbb{R}$, then we have $c_i=0$ or $p_{\underline{\varepsilon},i}({\bf m})$ is the zero polynomial, for every $1\leq i\leq k$. \end{proposition} \begin{comment*} The $\sum\limits_{{\bf h}\in Y^{[[s]]}}^{}$ means that we take the sum for all choices of ${\bf h}=(h_{\underline{\varepsilon}},\ \underline{\varepsilon} \in [[s]])$ where $h_{\underline{\varepsilon}}\in Y$. In addition, we will make a small abuse of notation and write $\underset{{\bf m}\in[-M,M]^t}{\mathbb{E}}$ to denote the average over all $ {\bf m}\in \mathbb{Z}^t\cap [-M,M]^t$. \end{comment*} \begin{remarks*} i) The polynomials $p_{\underline{\varepsilon},j}$ are independent of the leading vector $\{u_{1,N},...,u_{k,N}\}$ and are, more importantly, independent of the variable $N$.\\ ii) The existence of the errors $h_{\underline{\varepsilon}}$ is merely technical and arises from the floor function in the last expression inside the integral, since we cannot use Lemma \ref{errors} to remove the error terms in this case. This will be more easily understood in the proof of the case of linear polynomials that follows. \\ iii) The quantity $o_N(1)$ depends of course on the values of $d $ and $k$. It also depends on the value of the fixed number $M$. However, this dependence plays no role in arguments of the following sections (where we will usually take limits first as $N\to+\infty$ and, then, as $M\to+\infty$). For ease of notation, we will omit all other subscripts for the term $o_N(1)$.\\ iv) The final condition $iv)$ above implies that, for a fixed $\underline{\varepsilon}\in[[s]]$, if we exclude all the constant polynomials among the $p_{\underline{\varepsilon},j}$, the remaining polynomials are linearly independent. \end{remarks*} Ignoring the technical parts of the statement, the above proposition asserts that when working with multiple averages on some polynomials that vary with $N$, we can instead bound them by the averages of a polynomial correlation sequence of only the function $f_{1,N}$. Even though the new polynomials $A_{\underline{\varepsilon},N}$ have several variables, they only depend on the sequences $u_{1,N},...,u_{k,N}$ and, assuming we they have good limiting behavior, we can take the limits first as $N\to+\infty$ and then as $M\to+\infty$ to get some nice bounds for the original averages. For instance, in the case where we have a fixed function $f_{1,N}=f_1$ and the sequences $u_{i,N}$ converge to non-zero real numbers, the above statement can be used to prove that the $\limsup$ of the ergodic averages in the left-hand side of \eqref{asdfghjkl} can be bounded by a power of $\nnorm{f_{1}}_s$ for some suitable positive integer $s$. This last assertion follows from minor modifications to the argument present in \cite{Leibmanseveral} (to cover the case of real polynomials instead of just integer polynomials). Finally, we observe that the statement of Proposition \ref{PET} requires that the family $P_N$ is ordered, which is equivalent to demanding that $p_{1,N}$ has maximal degree. However, we can always reduce to this case. To see this, we can use the same argument as in Section \ref{sectionfactors} in the discussion after the statement of Proposition \ref{factors}. Therefore, when applying Proposition \ref{PET}, we do not have to assume that $p_{1,N}$ has maximal degree. \begin{proof}[Proof in the linear case] Firstly, we shall establish Proposition \ref{PET} in the case where all the polynomials have degree 1. Thus, assume that $p_{i,N}(t)= a_{i,N}t+b_{i,N}$ where $a_{i,N}, b_{i,N}\in \mathbb{R}$ so that the variables $a_{i,N}$ are (eventually) non-zero. The assumption that our polynomials are essentially distinct implies that the numbers $a_{i,N}$ and $a_{j,N}$ are distinct. The leading vector of $P_{N}$ is the set \begin{equation*} \{a_{1,N}, a_{1,N}-a_{2,N},...,a_{1,N}-a_{k,N}\} \end{equation*}and these are good sequences. We induct on $k$. For $k=1$, we apply the van der Corput inequality to get \begin{multline*} \bignorm{ \underset{0\leq n\leq L_N}{\mathbb{E}}\ c_{n,N} T^{\floor{a_{1,N}n +b_{1,N}}}f_{1,N}}_{L^2(\mu)}^2 \ll \\ \frac{1}{M}+ \underset{ |m|\leq M}{\mathbb{E}} \Big| \underset{0\leq n\leq L_N}{\mathbb{E}} \overline{c_{n,N}}c_{n+m,N} \int \overline{f_{1,N}} \cdot T^{\floor{a_{1,N} n+b_{1,N} +m a_{1,N}}-\floor{a_{1,N}n+b_{1,N}}} f_{1,N} \ d\mu \Big| +o_N(1). \end{multline*}We rewrite the last quantity as \begin{equation*} \frac{1}{M}+ \underset{|m|\leq M}{\mathbb{E}} \Big| \underset{0\leq n\leq L_N}{\mathbb{E}} \overline{c_{n,N}}c_{n+m,N} \int \overline{f_{1,N}} \cdot T^{\floor{ma_{1,N}}+e_{n,m,N}} f_{1,N} \ d\mu \Big| +o_N(1), \end{equation*} where $e_{n,m,N}\in \{0,\pm 1\}$ (the implied constant is independent of all variables in the above relation). Let $A_{z,m,N}=\{n\in\mathbb{Z}^{+}{:}\; 0\leq n\leq L_N \text{ and } e_{n,m,N}=z \}$ for $z\in\{0,\pm 1\}=Y$. Then, the innermost average can be rewritten as \begin{multline*} \Big|\frac{1}{L_N} \sum_{z\in Y}^{} \sum_{n\in A_{z,N,m}} \overline{c_{n,N}}c_{n+m,N} \int \overline{f_{1,N}} \cdot T^{\floor{ma_{1,N}}+z} f_{1,N} \ d\mu| \leq \sum_{z\in Y}^{} \Big|\int \overline{f_{1,N}} \cdot T^{\floor{ma_{1,N}}+z} f_{1,N}\ d\mu\Big|, \end{multline*}which, combined with the above, gives the desired result (for constants $t=1$ and $s=1$, polynomials $p_1(m)=ma_{1,N}$ and $p_0(m)=0$ and set $Y=\{0,\pm 1\}$). Now assume that we have proven the result for $k-1$ ($k\geq 2$), with the constants of the proposition given by $t=k-1$ and $s=k-1$. Then, we use the van der Corput inequality to get \begin{multline*} \bignorm{ \underset{0\leq n\leq L_N}{\mathbb{E}}\ c_{n,N} \prod_{i=1}^{k} T^{\floor{a_{i,N}n +b_{i,N}}}f_{i,N}}_{L^2(\mu)}^{2^k}\ll_k \frac{1}{M}+o_N(1)+\\ \underset{| m|\leq M}{\mathbb{E}} \Big| \underset{0\leq n\leq L_N}{\mathbb{E}} \overline{c_{n,N}}c_{n+m,N} \int \prod_{i=1}^{k} T^{\floor{a_{i,N}n+b_{i,N}} +\floor{m a_{i,N}}+e_{i,m,n,N}} f_{1,N}\ T^{\floor{a_{i,N}n+b_{i,N}}}\overline{f_{i,N}} \ d\mu \Big|^{2^{k-1}}, \end{multline*} which is smaller than \begin{multline}\label{23} \underset{|m|\leq M}{\mathbb{E}} \Big| \underset{0\leq n\leq L_N}{\mathbb{E}}\ \overline{c_{n,N}}c_{n+m,N}\ \int \prod_{i=1}^{k} T^{\floor{a_{i,N}n+b_{i,N}}-\floor{a_{k,N}n+b_{k,N}} +\floor{m a_{i,N}}+e_{i,m,n,N}} f_{1,N}\cdot \\ T^{\floor{a_{i,N}n+b_{i,N}}-\floor{a_{k,N}n+b_{k,N}}}\overline{f_{i,N}} \ d\mu \Big|^{2^{k-1}} + 1/M +o_N(1), \end{multline}where we again have $e_{i,m,,n,N}\in\{0,\pm 1\}$. In the last step, we composed with $T^{-\floor{a_{k,N}n+b_{k,N}}}$ inside the integral. We have \begin{equation*} \floor{a_{i,N}n+b_{i,N}}-\floor{a_{k,N}n+b_{k,N}} =\floor{{(a_{i,N}-a_{k,N})n+b_{i,N}-b_{k,N}}}+e'_{i,n,N} \end{equation*}where $e'_{i,n,N}\in\{0,\pm 1\}$. Therefore, we can rewrite the last expression in \eqref{23} as \begin{multline*} \frac{1}{M}+ \underset{|m|\leq M}{\mathbb{E}} \ \Big| \underset{0\leq n\leq L_N}{\mathbb{E}}\ \overline{c_{n,N}}c_{n+m,N} \\ \prod_{i=1}^{k} \int T^{\floor{(a_{i,N}-a_{k,N})n+b_{i,N}-b_{k,N}}+e'_{i,n,N}} \big( \overline{f_{i,N}} \cdot T^{\floor{ma_{i,N}}+e_{i,m,n,N}}f_{i,N} \big) \ d\mu \Big|^{2^{k-1}}+o_N(1). \end{multline*} Then, using the Cauchy-Schwarz inequality and the argument in Lemma \ref{errors}, we can bound the innermost average in the above expression by $O_k(1)$ times the quantity \begin{equation*} A_{m,N}=\sup_{|c_{n,N}|\leq 1} \bignorm{ \underset{0\leq n\leq L_N}{\mathbb{E}}\ c_{n,N}\prod_{i=1}^{k-1}\ T^{\floor{(a_{i,N}-a_{k,N})n+(b_{i,N}-b_{k,N})}}(\overline{f_{i,N}}\cdot T^{\floor{m a_{i,N}}+e_{i,m,n,N} }f_{i,N}) }_{L^2(\mu)}^{2^{k-1}}. \end{equation*}Now, we use the argument of Lemma \ref{errors} again to deduce that $A_{m,N}$ is bounded by $O_k(1)$ times \begin{equation*} \sum_{\underset{1\leq i\leq k-1}{z_i\in\{0,\pm 1\}} }^{} \sup_{|c_{n,N}|\leq 1} \bignorm{ \underset{0\leq n\leq L_N}{\mathbb{E}}\ c_{n,N}\prod_{i=1}^{k-1}\ T^{\floor{(a_{i,N}-a_{k,N})n+(b_{i,N}-b_{k,N})}}(\overline{f_{i,N}}\cdot T^{\floor{m a_{i,N}}+z_i }f_{i,N}) }_{L^2(\mu)}^{2^{k-1}}. \end{equation*} We fix some ${\bf z}=(z_1,...,z_{k-1})\in \{0,\pm 1\}^{k-1}$. If we take the polynomial that corresponds to $\overline{f_{1,N}}\cdot T^{\floor{m a_{1,N}}+z_1}f_{1,N}$ to be the new leading polynomial, then the new leading vector is the set \begin{equation*} \{a_{1,N}-a_{k,N},a_{1,N}-a_{2,N},...,a_{1,N}-a_{k-1,N}\}. \end{equation*} By the induction hypothesis, there exists a finite set $Y_{k-1}$, for which\begin{multline*} \sup_{|c_{n,N}|\leq 1} \bignorm{ \underset{0\leq n\leq L_N}{\mathbb{E}}\ c_{n,N}\prod_{i=1}^{k-1}\ T^{\floor{(a_{i,N}-a_{k,N})n+(b_{i,N}-b_{k,N})}}(\overline{f_{i,N}}\cdot T^{\floor{m a_{i,N}}+z_i }f_{i,N}) }_{L^2(\mu)}^{2^{k-1}} \ll_k \\ \frac{1}{M} + \sum_{{\bf h}\in [[Y_{k-1}]]}^{} \underset{|m_1|,...,|m_{k-1}|\leq M}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[k-1]]}^{} T^{\floor{\sum_{1\leq j\leq k-1} \ p_{\underline{\varepsilon},j}(m_1,...,m_{k-1})(a_{1,N}-a_{j,N})}+h_{\underline{\varepsilon}}}\\ \mathcal{C}^{|\underline{\varepsilon}|}(\overline{f_{1,N}}\cdot T^{\floor{m a_{1,N}}+z_1}f_{1,N}) \ d\mu \Big|+o_N(1). \end{multline*} Using the identification $[[k]]=[[k-1]]\times \{0,1\}$, we can write an $\underline{\varepsilon}\in[[k]]$ as $\underline{\varepsilon}=(\underline{\varepsilon_1},\varepsilon_2)$ where $\underline{\varepsilon_1}\in [[k-1]]$ and $\varepsilon_2\in \{0,1\}$. We also write ${\bf m}=(m,m_1,...,m_{k-1})$. Combining the integer parts, we rewrite the last integral as \begin{equation*} \int \prod_{\varepsilon\in [[k]]]} T^{\floor{ \sum_{1\leq j\leq k-1} \ p'_{\underline{\varepsilon},j}(m_1,...,m_{k-1})(a_{1,N}-a_{j,N})+ p'_{\underline{\varepsilon},k}(m)a_{1,N}} +h'_{\underline{\varepsilon},{\bf m}}}\ \mathcal{C}^{|\underline{\varepsilon}|}f_{1,N} \ d \mu\ , \end{equation*} where \begin{enumerate} \item $p'_{\underline{\varepsilon},j}$ is the polynomial $p_{\underline{\varepsilon}_1,j}$ for $1\leq j \leq k-1$, \item the polynomial $p'_{\underline{\varepsilon},k}$ is equal to $m$ when $\underline{\varepsilon_2}=0$ and is zero otherwise and \item $h'_{\underline{\varepsilon},{\bf m}}=h_{\underline{\varepsilon}_1}+h_{2,\underline{\varepsilon},{\bf m}}$, where \footnote{ In particular, $h_{2,\underline{\varepsilon},{\bf m}}$ is the sum of $z_1$ plus the error term appearing by combining $\floor{ma_{1,N}}$ with the other integer part, whenever they both appear. Otherwise, it is zero. Thus, it takes values on a finite set of integers.} $h_{2,\underline{\varepsilon},{\bf m}}\in \{0,\pm 1,\pm 2\}$. More importantly, $h'_{\underline{\varepsilon},m}$ takes values in a finite set $Y_k$. \end{enumerate}We observe that \begin{multline*} \Big|\int \prod_{\varepsilon\in [[k]]]} T^{\floor{ \sum_{1\leq j\leq k-1} \ p'_{\underline{\varepsilon},j}(m_1,...,m_{k-1})(a_{1,N}-a_{j,N})+ p'_{\underline{\varepsilon},k}(m)a_{1,N}} +h'_{\underline{\varepsilon},{\bf m}}}\ \mathcal{C}^{|\underline{\varepsilon}|}f_{1,N} \ d \mu \Big|\leq \\ \sum_{{\bf h}\in [[Y_k]]}^{} \Big|\int \prod_{\varepsilon\in [[k]]]} T^{\floor{ \sum_{1\leq j\leq k-1} \ p'_{\underline{\varepsilon},j}(m_1,...,m_{k-1})(a_{1,N}-a_{j,N})+ p'_{\underline{\varepsilon},k}(m)a_{1,N}} +h_{\underline{\varepsilon}}}\ \mathcal{C}^{|\underline{\varepsilon}|}f_{1,N} \ d \mu \Big|. \end{multline*} Averaging over $m,m_1,...,m_{k-1}$ and summing over ${\bf z}\in \{0,\pm 1\}^{k-1}$, we have that for the finite set $Y_k$ above, the original expression is bounded by $O_k(1)$ times \begin{equation*} \frac{1}{M}+ \sum_{{\bf h}\in [[Y_k]]}^{} \ \underset{{\bf m}\in [-M,M]^k}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[k]]}^{} T^{\floor{\sum_{1\leq j\leq k} \ p'_{\underline{\varepsilon},j}({\bf m})u_{j,N}}+h_{\underline{\varepsilon}}}\ (\mathcal{C}^{|\underline{\varepsilon}|}f_{1,N}) \ d \mu \Big|+ o_N(1), \end{equation*}where $u_{1,N}=a_{1,N}$ and $u_{j,N}=a_{1,N}-a_{j,N}$. The conclusion follows. \end{proof} \begin{remark*} It follows from the above proof that the polynomials $A_{\underline{\varepsilon},N}$ in the statement of Proposition \ref{PET} have the following form: \begin{equation*} A_{\underline{\varepsilon},N}(m_1,...,m_k)= \underline{\varepsilon} \cdot (m_1u_{1,N},...,m_k u_{k,N}) \end{equation*}where "$\cdot$" denotes here the standard inner product on $\mathbb{R}^k$. Thus, it is straightforward to check that the polynomials $A_{\underline{\varepsilon},N}$ satisfy the conditions $i),ii)$, $iii)$ and $iv)$ of Proposition \ref{PET}. Note that all these polynomials have degree 1. This will not be the case when working with polynomials of higher degree, where we may have higher degree terms (like products of the form $m_1m_2$), but they will be linear in each variable separately. \end{remark*} \subsection{The PET induction.} For a polynomial $p_N$, a family $P_N$ and $h\in\mathbb{N}$, we define the {\em van der Corput} operation (or vdC operation), where we form the family \begin{equation*} \{ p_{1,N}(t+h)-p_{N}(t),...,p_{k,N}(t+h)-p_N(t),\ p_{1,N}(t)-p_{N}(t),...,p_{k,N}(t)-p_N(t) \} \end{equation*}and remove polynomials of degree 0. We denote this new family by $(p_N,h)^{*}P_N$. At first glance, it is not obvious that this operation is well defined, because the constant polynomials that we discard may be different for different values of $N$. We will see that this is not the case for nice polynomial families below. We will use the vdC operation successively to reduce the "complexity" of a polynomial family. Our main observation is that the leading vector of a polynomial family is well behaved under the vdC operation. Consider a family of variable polynomials $P_N=\{p_{1,N},...,p_{k,N}\}$ and let the leading vector of $P_{N}$ corresponding to $p_{1,N}$ be \begin{equation*} \mathcal{S}(P_{N}) =\{u_{1,N},...,u_{k,N}\}. \end{equation*}Fix any $1\leq i_0\leq k$, as well as the polynomial $p_{i_0,N}$, which we symbolize as $p_N$ from now on for convenience. Consider the new polynomial family $P'_{N,h}=(p_N,h)^{\star} P_N$ that arises from the van der Corput operation. Here, $h$ ranges over the non-zero integers. \begin{lemma}\label{form} Assume that the family $P_N$ of degree $d$ is nice and let $(u_{1,N},...,u_{k,N})$ be its leading vector corresponding to $p_{1,N}$. For every choice of polynomial $p_N$ above and the value of $h\in\mathbb{Z}^{*}$, we have that the elements of the leading vector of $P'_{N,h}$ corresponding to the new polynomial $p_{1,N}(t+h)-p_N(t)$ have the following form:\begin{itemize} \item They are equal to one of the $u_{i,N}$. \item They have the form $d u_{1,N}h$. \item They are the sum $d u_{1,N}h+u_{i,N}$ for some $u_{i,N}$ with $i\neq 1$. \end{itemize} \end{lemma} \begin{proof} Without loss of generality, we will assume that we have taken $p_N=p_{k,N}$ (the case $p_N=p_{1,N}$ is very similar). We want to study the leading vector corresponding to the polynomial $p_{1,N}(t+h)-p_{k,N}(t)$. Therefore, it is sufficient to find the leading coefficients of the polynomials \begin{align*} \big(p_{1,N}(t+h)-p_{k,N}(t)\big) &-\big(p_{1,N}(t)-p_{k,N}(t) \big)\\ \big(p_{1,N}(t+h)-p_{k,N}(t)\big)&-\big( p_{i,N}(t+h)-p_{k,N}(t) \big)\\ \big(p_{1,N}(t+h)-p_{k,N}(t)\big)&-\big( p_{i,N}(t)-p_{k,N}(t) \big) \end{align*}for $2\leq i\leq k$. The leading coefficient of the first polynomial is always $dhu_{1,N}$ and that satisfies our required property. The leading coefficient of the second polynomial is always equal to the leading coefficient of $p_{1,N}(t+h)-p_{i,N}(t+h)$ and this is always equal to the leading coefficient of $p_{1,N}(t)-p_{i,N}(t)$ which belongs to the leading vector. Finally, the leading coefficient of the third polynomial is equal to the leading coefficient of $p_{1,N}(t+h)-p_{i,N}(t)$. If $p_{1,N}$ and $p_{i,N}$ have distinct degrees, then that coefficient is equal to $u_{1,N}$ or the leading coefficient of $p_{i,N}$ (which in this case would be equal to $u_{i,N}$). If the two polynomials have the same degree but distinct leading coefficients, then the required leading coefficient is equal to the leading coefficient of $p_{1,N}(t)-p_{i,N}(t)$, which is $u_{i,N}$. In the final case, assume that the two polynomials have the same degree and equal leading coefficients. If $u_{i,N}$ is the leading coefficient of $p_{1,N}(t)-p_{i,N}(t)$, we can easily check that the required coefficient is either equal to $dhu_{1,N}$ or $dhu_{1,N}+u_{i,N}$. \end{proof} Observe that the particular form each element of the new leading vector has does not depend on the value of $N$ (i.e. it cannot have the first form for one value of $N$ and then the second form for some other value of $N$). This follows from the fact that the type of the original family is independent of $N$, if $N$ is large enough. We will now use this lemma to study how the van der Corput operation affects the type of the original family. \begin{corollary}\label{typecorollary} Let $P_N,p_N$ be as above and let $d$ be the degree of the family $P_N$. Then, there exists a set of integers $Y$ with at most $O_{k,d}(1)$ elements such that, for every $h\notin Y$, the polynomial family $P'_{N,h}=(p_N,h)^{\star} P_N$ that arises from the van der Corput operation is nice and its type is independent\footnote{ The type depends only on which polynomial of the initial family we choose to be the polynomial $p_N$, as well as the type of the original family.} of the value of $h$. \end{corollary} \begin{proof} We denote by $u_{ii,N}$ the leading coefficient of $p_{i,N}$, while $u_{ij,N}$ denotes the leading coefficient of $p_{i,N}-p_{j,N}$ for $i\neq j$. These are all good sequences by the definition of a nice family. Using Lemma \ref{form}, we can prove that the leading coefficients of all the polynomials in $P'_{N,h}$ and of their differences can take one of the following forms:\\ i) they are equal to some $u_{ij,N}$,\\ ii) they have the form $ru_{ii,N}h$ for some $1\leq r\leq d$ or\\ iii) they have the form $ru_{ii,N}h+u_{ij,N}$ for some $1\leq r\leq d$. We prove that these sequences are good for all except $O_{d,k}(1)$ values of $h$. For all values of $1\leq i,j\leq k$ and $1\leq r\leq d$, we consider the set $A(i,j,r)$ of all possible sequences of the above three forms (not all of them appear as leading coefficients, but this does not affect our argument), where $h$ is some fixed non-zero integer. There are only finitely many such sets. Note that (for $h\neq 0$), the sequences of the first two forms are always good. Now consider a sequence of the form $ru_{ii,N}h+u_{ij,N}$. There exist functions $f_1,f_2\in\mathcal{H}$, not converging to 0, such that $|u_{ii,N}/f_1(N)|=1+o_N(1)$ and $|u_{ij,N}/f_2(N)|=1+o_N(1)$. The function $rhf_1(t)+f_2(t)$ is obviously an element of $\mathcal{H}$. In addition, for our fixed $r$, the relation\begin{equation*} \lim\limits_{t\to+\infty }rhf_1(t)+f_2(t)=0 \end{equation*}can hold only for at most one possible value of $h\in\mathbb{Z}$, which we call a "bad value". Then, if $h$ is not a bad value, we have \begin{equation*} \Big|\frac{ru_{ii,N}h+u_{ij,N}}{rhf_1(N)+f_2(N)}\Big|=1+o_N(1). \end{equation*}Indeed, this follows easily because the functions $f_1$ and $f_2$ are comparable, which also means that all the involved sequences are comparable. Thus, dividing the numerator and denominator of the above fraction by either $f_1(N)$ or $f_2(N)$, we easily get the result. In conclusion, the sequence $ru_{ii,N}h+u_{ij,N}$ is a good sequence for all non-bad values of $h$. Now, if we take all possible values of the $i,j,r$, we conclude that there are at most $O_{d,k}(1)$ bad values of $h$. We have shown that for every non-bad value of $h$, the family $P'_{N,h}$ is a nice polynomial family and, therefore, has a fixed type (independent of $N$). We show that its type does not depend on $h$. Therefore, consider two polynomials $q_1,q_2$ of $P'_{N,h}$ of the same degree. We consider some possible cases:\\ a) If $q_1$ and $q_2$ have the form $p_{i,N}(t)-p_N(t)$, then whether or not their leading coefficients are equal depends only on the type of the original family and the choice of $p_N$ (and not on $h$).\\ b) If $q_1$ has the form $p_{i,N}(t+h)-p_N(t)$, while $q_2$ has the form $p_{j,N}(t)-p_N(t)$, then their leading coefficients can be equal in only two possible cases: if the polynomial $p_N$ has degree strictly larger than the degree of both $p_{i,N}$ and $p_{j,N}$ (this depends only on the choice of $p_N$, not on $h$), or if the polynomials $p_{i,N}(t+h)$ and $p_{j,N}(t)$ have the same degree (bigger than or equal to the degree of $p_N$) and equal leading coefficients. In the second case, we must have that $p_{i,N}(t)$ and $p_{j,N}(t)$ have equal leading coefficients, which depends only on the type of the original family and not on $h$.\\ c) If $q_1$ and $q_2$ both have the form $p_{i,N}(t+h)-p_N(t)$, then the result follows similarly as in the case a). The fact that the degrees of the polynomials of the new family and of their differences do not depend on $N$ and $h$ can also be established easily using the preceding arguments. We omit the details. \end{proof} \begin{proposition}\label{induction} If $P_N=\{p_{1,N},...,p_{k,N}\}$ is an ordered polynomial family, then there exists a polynomial $p_N\in P_N$, such that for all, except at most one value of $h\in \mathbb{Z}$, the polynomial family $P'_{N,h}=(p_N,h)^{*} P_N$ has type strictly smaller than the type of $P_N$ and its leading polynomial is the polynomial $p_{1,N}(t+h)-p_N(t)$. \end{proposition} \begin{proof} We describe the operation that reduces the type. At each step, we choose a polynomial $p_{N}\in P_N$ that has minimal degree in the family. For an $h\in\mathbb{Z}$, apply the van der Corput operation. This forms a polynomial family \begin{equation}\label{P} P'_N=\{ p_{1,N}(t+h)-p_{N}(t),...,p_{k,N}(t+h)-p_N(t),\ p_{1,N}(t)-p_{N}(t),...,p_{k,N}(t)-p_N(t) \} \end{equation} and choose $p_{1,N}(t+h)-p_{N}(t)$ to be the new leading polynomial. We distinguish between some cases: a) Assume that the polynomials $p_{1,N}$ and $p_{k,N}$ have distinct degrees. Then, choose $p_N=p_{k,N}$, which by the "ordered" assumption has minimal degree. We notice that the polynomial $p_{1,N}(t+h)-p_N(t)$ has maximal degree in the polynomial family. We check that the type of the polynomial family is reduced. Indeed, if the degree of $p_{k,N}(t)$ is $d'$, then the number $w_d'$ is reduced, while all the numbers $w_i$ are left unchanged for $i>d'.$ b) Suppose the polynomials $p_{1,N}$ and $p_{k,N}$ have the same degree and not all leading coefficients in the family $P_N$ are equal. In particular, we may assume, without loss of generality, that this holds for the polynomials $p_{1,N}$ and $p_{k,N}$. Again, choose $p_{N}=p_{k,N}$. Then, the polynomial $p_{1,N}(t+h)-p_{N}(t)$ has maximal degree in the new polynomial family. In addition, the number $w_{d}$ is reduced, which means that the new family has smaller type than the original. c) Finally, assume that all polynomials have the same degree and the same leading coefficient. We choose again $p_N=p_{k,N}$. The polynomial $p_{1,N}(t+h)-p_{N}(t)$ has maximal degree equal to $d-1$ in $P'_N$, except possibly for one value of $h\in Z$ (to see this, we can work similarly as in the proof of Corollary \ref{typecorollary}). Also, the family $P'_N$ has smaller type than $P_N$, since it has degree at most $d-1$. \end{proof} To summarize all of the above, we have: \begin{corollary}\label{all in one} Let $P_N$ be a nice polynomial family of degree $d$, with $k$ polynomials and with type ${\bf W}$. Then, there exists a $p_N\in P_N$, such that the family $P'_N=(p_N,h)^{*}P_N$ is nice and has (fixed) type smaller than ${\bf W}$ for all, except at most $O_{d,k}(1)$ values of $h$. \end{corollary} We are now ready to finish the proof of Proposition \ref{PET}: \begin{proof}[Proof of the higher degree case] We can assume that $\text{deg }(p_{1,N}(n))\geq 2$. Let ${\bf W}=(d,w_d,...,w_{1})$ be the type of the given polynomial family, and assume that the claim holds for all families of polynomials with type smaller than ${\bf W}$. Fix a natural number $M$. For a $t_0>0$ to be chosen later (that will depend only on $d,k,{\bf W}$), we apply the van der Corput inequality to get \begin{multline}\label{25} \bignorm{ \underset{0\leq h\leq L_N}{\mathbb{E}}\ c_{n,N} \prod_{i=1}^{k} T^{\floor{p_{i,N}(n)}}f_{i,N}}_{L^2(\mu)}^{2^{t_0}} \ll_{t_0} \frac{1}{M} +\\ \underset{|m|\leq M}{\mathbb{E}}\Big|\underset{0\leq n\leq L_N}{\mathbb{E}}\ c_{n+m,N}\overline{c_{n,N}} \int \prod_{i=1}^{k}\ T^{\floor{p_{i,N}(n+m)}}f_{i,N}\cdot T^{\floor{p_{i,N}(n)}}\overline{f_{i,N}}\ d\mu \Big|^{2^{t_0 -1}} +o_N(1). \end{multline} Let $p_N$ be the polynomial given by Corollary \ref{all in one}. Without loss of generality, assume that $p_N=p_{k,N}$ (the case where $p_N=p_{1,N}$ is similar). We compose with $T^{-\floor{p_{N}(n)}}$ in the above integral, so that \begin{align*} &\underset{0\leq n\leq L_N}{\mathbb{E}}\ c_{n+m,N}\overline{c_{n,N}}\int \prod_{i=1}^{k}\ T^{\floor{p_{i,N}(n+m)}}f_{i,N}\cdot T^{\floor{p_{i,N}(n)}}\overline{f_{i,N}}\ d \mu =\\ &\underset{0\leq n\leq L_N}{\mathbb{E}}\ c_{n+m,N}\overline{c_{n,N}}\int \prod_{i=1}^{k}\ T^{\floor{p_{i,N}(n+m)}-\floor{p_N(n)}}f_{i,N}\cdot T^{\floor{p_{i,N}(n)}-\floor{p_N(n)}{}}\overline{f_{i,N}}\ d\mu=\\ &\underset{0\leq n\leq L_N}{\mathbb{E}} c_{n+m,N}\overline{c_{n,N}}\int \prod_{i=1}^{k}\ T^{\floor{p_{i,N}(n+m)-p_N(n)}+e_{1,n,i,m,N}}f_{i,N}\cdot T^{\floor{p_{i,N}(n)-p_N(n)}+e_{2,n,i,N}}\overline{f_{i,N}}\ d\mu, \end{align*}where the numbers $e_{1,n,i,m,N} \text{ and } e_{2,n,i,N}$ take values in the set $\{0,\pm 1\}$. We use the Cauchy-Schwarz inequality and then use Lemma \ref{errors} to bound the absolute value of the last quantity by a constant (depending only on $k$) multiple of the expression \begin{multline*} \sup_{|c_{n,N}|\leq 1}\bignorm{ \underset{0\leq n\leq L_N }{\mathbb{E}} c_{n,N} \big(\prod_{i=1}^{k-1} T^{\floor{p_{i,N}(n+m)-p_N(n)}}f'_{i,N}\cdot T^{\floor{p_{i,N}(n)-p_N(n)}}\overline{f'_{i,N}}\big) \\ T^{\floor{p_{k,N}(t+m)-p_{k,N}(t)}} f'_{k,N} }_{L^2{(\mu)}}+o_N(1) \end{multline*}for some 1-bounded functions $f'_{1,N}=f_{1,N}, f'_{2,N},...,f'_{k,N}$. Recall that we chose $p_N=p_{k,N}$. The family of polynomials \begin{equation*} P'_{N,m}=\{ p_{1,N}(t+m)-p_{k,N}(t),...,p_{k,N}(t+m)-p_{k,N}(t),\ p_{1,N}(t)-p_{k,N}(t),...,p_{k-1,N}(t)-p_{k,N}(t) \} \end{equation*}is nice and has (fixed) type ${\bf W}'<{\bf W} $ independent of $m$ for all, except at most $O_{d,k}(1)$ values of $m\in\mathbb{N}$. Let $Q$ be this finite set of "bad" values of $m$ and let \begin{equation*} \mathcal{S}(P'_{N,m})=\{u'_{1,m,N},...,u'_{k',m,N}\} \end{equation*} be the leading vector of $P'_{N,m}$, where $k'\leq 2k-1$. For all $m\notin Q$, we use the induction hypothesis to deduce that \begin{multline}\label{uiop} \sup_{|c_{n,N}|\leq 1}\bignorm{\underset{0\leq n\leq L_N }{\mathbb{E}} c_{n,N} \big(\prod_{i=1}^{k-1} T^{\floor{p_{i,N}(n+m)-p_N(n)}}f'_{i,N}\cdot T^{\floor{p_{i,N}(n)-p_N(n)}}\overline{f'_{i,N}}\big)\cdot \\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ T^{\floor{p_{k,N}(t+m)-p_{k,N}(t)}} f'_{k,N} }_{L^2{(\mu)}}^{2^t} \ll_{k,d,{\bf W}'} \\ \frac{1}{M}+\ \sum_{{\bf h}\in [[Y_0]]}^{} \underset{(m_1,...,m_t)\in [-M,M]^t}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{\sum_{1\leq j\leq k'} \ p_{\underline{\varepsilon},j}(m_1,...,m_t)u'_{j,m,N}}+h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{1,N}) \ d\mu \Big| + o_N(1) \end{multline}for some natural numbers $t$, $s$, a positive constant and a finite set $Y_0$ that all depend only on $d, {\bf W}' \text{ and } k'$ (i.e. $d,{\bf W}$ and $k$). We can now choose our original number $t_0$ (see \eqref{25}) in the exponents to be equal to $t+1$ (therefore, it depends only on $d,k,{\bf W}$). We also observe that our induction imposes that the polynomials \begin{equation*} A_{\underline{\varepsilon},N}(m_1,...,m_t) = \sum_{1\leq j\leq k'} \ p_{\underline{\varepsilon},j}(m_1,...,m_t)u'_{j,m,N} \end{equation*}are non-constant and pairwise essentially distinct for any (non-zero) values of the leading vector $\{u'_{1,m,N},..., u'_{k',m,N}\}$ and that all the polynomials $p_{\underline{\varepsilon},j}$ are at most linear in each variable. In addition, we claim that \begin{equation}\label{e^c} A_{\underline{\varepsilon},N}(m_1,...,m_t) +A_{\underline{\varepsilon^c},N}(m_1,...,m_t)=A_{\underline{1},N}(m_1,...,m_t). \end{equation} (we have seen that all of the above are true in the linear case). These are the properties i)-iii) in Proposition \ref{PET}. All the $u'_{j,m,N}$ have the form described by Lemma \ref{form}. Therefore, we can write \begin{equation}\label{essential} p_{\underline{\varepsilon},j}(m_1,...,m_t)u'_{j,m,N}=p'_{1,\underline{\varepsilon},\ell}(m,m_1,...,m_t)u_{\ell ,N} +p'_{2,\underline{\varepsilon},\ell'}(m_1,...,m_t)u_{\ell',N}. \end{equation}In order to describe the form of the new polynomials $p'_{1,\underline{\varepsilon},\ell},p'_{2,\underline{\varepsilon},\ell'}$, we split into cases depending on the form of $u'_{j,m,N}$ (cf. Lemma \ref{form}):\\ a) If $u'_{j,m,N}$ is equal to some $u_{\ell,N}$ for $1\leq \ell\leq k$, then we have $p'_{2,\underline{\varepsilon},\ell'}=0$ and $$p'_{1,\underline{\varepsilon},\ell}(m,m_1,...,m_t)=p_{\underline{\varepsilon},j}(m_1,...,m_t)$$ (thus $p'_{1,\underline{\varepsilon},\ell}(m_1,...,m_t)$ is constant as a polynomial in $m$).\\ b) If $u'_{j,m,N}$ is equal to $dmu_{1,N}$ ($\ell=1$), then we have again $p'_{2,\underline{\varepsilon},\ell'}=0$ and \begin{equation*} p'_{1,\underline{\varepsilon},1}(m,m_1,...,m_t)=dm p_{\underline{\varepsilon},j}(m_1,...,m_t). \end{equation*} c) In the final case that $ u'_{j,m,N}=dmu_{1,N} +u_{\ell',N} $ for some $\ell'\neq 1$, then we have $p'_{2,\underline{\varepsilon},\ell'}=p_{\underline{\varepsilon},j}(m_1,...,m_t)$ and \begin{equation*} p'_{1,\underline{\varepsilon},1}(m,m_1,...,m_t)=dm p_{\underline{\varepsilon},j}(m_1,...,m_t). \end{equation*} Therefore, the new polynomials $p_{1,\underline{\varepsilon},\ell}$ and $p_{2,\underline{\varepsilon},\ell'}$ are at most linear in each of the variables $m_1,...,m_t$, as well as the new variable $m$. By grouping the terms corresponding to the same $u_{\ell,N}$, we can rewrite \begin{equation*} \sum_{1\leq r\leq k'}^{} \ p_{\underline{\varepsilon},r}(m_1,...,m_t)u'_{r,N} =\sum_{1\leq r\leq k}^{} q_{\underline{\varepsilon},r}(m,m_1,....,m_t)u_{r,N} \end{equation*} for some new polynomials $q_{\underline{\varepsilon},r}$. \begin{claim} The new polynomials $\sum_{1\leq r\leq k}^{} q_{\underline{\varepsilon},r}(m,m_1,....,m_t)u_{r,N} $ satisfy conditions i), ii), iii) and iv) of Proposition \ref{PET}, for any (non-zero) values of the $u_{r,N}$. \end{claim} \begin{proof}[Proof of the Claim]The fact that they are non-constant is trivial, since otherwise one of the polynomials \begin{equation*} \sum_{1\leq r\leq k'}^{} \ p_{\underline{\varepsilon},r}(m_1,...,m_t)u'_{r,N} \end{equation*}would be constant, which is at odds with the induction hypothesis. Assume that condition ii) fails for two $\underline{\varepsilon_1}, \underline{\varepsilon_2} \in [[s]]$. Regarding these two polynomials as polynomials only in $ (m_1,...,m_t)$, \eqref{essential} would give that the polynomials \begin{equation*} \sum_{1\leq r\leq k'}^{} p_{\underline{\varepsilon_1},r}(m_1,...,m_t)u'_{r,m,N}\ \text{ and }\ \sum_{1\leq r\leq k'}^{} p_{\underline{\varepsilon_2},r}(m_1,...,m_t) u'_{r,m,N}, \end{equation*}are not essentially distinct, which is false by the induction hypothesis. Therefore, we have established both i) and ii). Now, we want to prove an analogue of \eqref{e^c} for our new polynomials. But this follows trivially by \eqref{essential} (the new polynomials are just a rewritten form of the $A_{\underline{\varepsilon},N}$). This establishes that the new polynomials satisfy condition iii) in the statement of Proposition \ref{PET}. Finally, we are going to prove that the new polynomials $q_{\underline{\varepsilon},j}$ satisfy condition iv) of Proposition \ref{PET}. Fix an $\underline{\varepsilon}\in [[s]]$. We will assume that all $q_{\underline{\varepsilon},j}$ are non-zero and we will show that they are linearly independent (if there are identically zero polynomials among the $q_{\underline{\varepsilon},j}$, we proceed similarly by ignoring these polynomials). It suffices to show that if $a_1,...,a_k$ are real numbers, such that \begin{equation*} a_1q_{\underline{\varepsilon},1}(m,m_1,...,m_t)+...+a_kq_{\underline{\varepsilon},k}(m,m_1,...,m_t) \end{equation*}is the zero polynomial, then all the numbers $a_i$ are zero. Recalling the form of the $q_{\underline{\varepsilon}, r}$, this becomes a linear combination of the form \begin{equation}\label{bigpoly} a_1P_{1,\underline{\varepsilon}}(m,m_1,...,m_t)+\sum_{i\in I_1}^{}b_i p_{\underline{\varepsilon},i}(m_1,...,m_t) \end{equation}for some $I_1\subset \{1,2,...,k'\}$ and $b_i\in\{a_2,...,a_k\}$. In addition, the polynomial $P_{1,\underline{\varepsilon}}$ has the form \begin{equation*} dm\sum_{i\in I_2}^{}p_{\underline{\varepsilon},i} +\sum_{i\in I_3}^{}p_{\underline{\varepsilon},i} \end{equation*}for some $I_2,I_3\subset \{1,2,...,k'\} $ with $I_1\cap I_2=\emptyset$ and $I_1\cap I_3=\emptyset$. We argue by contradiction. For $m=0$, the polynomial in \eqref{bigpoly} must be identically zero and this easily yields that all the $b_i$ must be zero and that $a_1\sum_{i\in I_3}^{}p_{\underline{\varepsilon},i}$ is also the zero polynomial. The first relation implies that $a_2=...=a_k=0$ by the induction hypothesis, while the second implies that either $a_1=0$ (in which case we are done), or $I_3=\emptyset$ (since the $p_{\underline{\varepsilon},i}$ are linearly independent by the induction hypothesis). If $I_3=\emptyset$, then \eqref{bigpoly} implies that the polynomial \begin{equation*} a_1dm\sum_{i\in I_2}^{}p_{\underline{\varepsilon},i} \end{equation*}is the zero polynomial. This implies that $a_1=0$ or $I_2=\emptyset.$ However, we cannot have $I_2=I_3=\emptyset$, because that would imply that the polynomial $q_{\underline{\varepsilon},1}$ is identically zero, which is absurd (since we assumed that we have already discarded the zero polynomials among the $q_{\underline{\varepsilon},i}$). Our claim follows. \end{proof} Combining all of the above we rewrite \eqref{uiop} as \begin{multline*} \sup_{|c_{n,N}|\leq 1}\bignorm{ \underset{0\leq n\leq L_N }{\mathbb{E}} c_{n,N} \big(\prod_{i=1}^{k-1} T^{\floor{p_{i,N}(n+m)-p_N(n)}}f_{i,N}\cdot T^{\floor{p_{i,N}(n)-p_N(n)}}\overline{f_{i,N}}\big)\cdot \\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ T^{\floor{p_{k,N}(t+m)-p_{k,N}(t)}} f_{k,N} }_{L^2{(\mu)}}^{2^{t}} \ll_{d,k,{\bf W}} \\ \frac{1}{M}+ \sum_{{\bf h}\in [[Y_0]]}^{} \underset{ |m_1|,...,|m_t|\leq M}{\mathbb{E}}\ \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{\sum_{1\leq r\leq k}^{} q_{\underline{\varepsilon},r}(m,m_1,....,m_t)u_{r,N}]}+ h_{\underline{\varepsilon}}} (\mathcal{C}^{|\underline{\varepsilon}|}f_{1,N}) d\mu \Big|+ o_N(1). \end{multline*} We use the above bounds for all $-M\leq m\leq M$ in \eqref{25}. The possible error coming from the bad values of the set $Q$ can be absorbed by an $O_{d,k}({1}/ {M})$ term. Finally, we get \begin{multline*} \bignorm{ \underset{0\leq h\leq L_N}{\mathbb{E}} c_{n,N} \prod_{i=1}^{k} T^{\floor{p_{i,N}(n)}}f_{i,N}}_{L^2(\mu)}^{2^{t+1}} \ll_{d,k,{\bf W}} \\ \frac{1}{M} +\ \sum_{{\bf h}\in [[Y_0]]}^{} \underset{|m|,|m_1|,...,|m_t|\leq M}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{\sum_{1\leq r\leq k}^{} q_{\underline{\varepsilon},r}(m,m_1,....,m_t)u_{r,N}}+ h_{\underline{\varepsilon}}} (\mathcal{C}^{|\underline{\varepsilon}|}f_{1,N}) d\mu \Big|+ o_N(1), \end{multline*} which is what we wanted to show. \end{proof} \section{The sub-linear plus polynomial case}\label{sublinearsection} In this section, we establish a particular case of Proposition \ref{factors}, which we shall also use in the general case in the next section. Let $\mathcal{S}$ denote the subset of $\mathcal{H}$ that contains the functions with sub-linear growth rate and $\mathcal{P}\subseteq \mathcal{H}$ denotes the collection of polynomials with real coefficients. Then, we let $\mathcal{S+P}$ denote the collection of functions that can be written as a sum of a function in $\mathcal{S}$ and a function in $\mathcal{P}$ (or equivalently, linear combinations of functions in $\mathcal{S}$ and $\mathcal{P}$). Let $a_1,...,a_k$ be a collection of functions in $\mathcal{S+P}$. Then, we can write $a_i=u_i+p_i$, where $u_i\in\mathcal{S}$ and $p_i$ is a polynomial. We will also define the {\em degree} and {\em type} of the collection $a_1,a_2,...,a_k$ using a similar notion to the degree and type of a polynomial family defined in the previous section. More precisely, since we do not impose that the polynomials $p_1,...,p_k$ are essentially distinct, we choose a maximal subset of the polynomials $p_i$ consisting of non-constant and essentially distinct polynomials and we define the degree and type of the collection $a_1,...,a_k$ to be the degree and type of this new subfamily of polynomials, respectively. Similarly, we define the leading vector of $a_1,...,a_k$ as the leading vector of the maximal subfamily that we defined above. We can always choose this maximal subset to contain the polynomial $p_1$. We define the cardinality of this new maximal subset to be the {\em size} of the collection $a_1,...,a_k$. \begin{proposition}\label{sublinearseminorm} Let $M$ be a positive integer and let $a_1,...,a_k$ be a collection of functions in $\mathcal{S+P}$ with degree $d$, type ${\bf W}$ and size $k'\leq k$. Let $(c_1,...,c_{k'})$ be the leading vector of the family $\{a_1,...,a_k\}$. In addition, assume that $a_1(t)\succ \log t$ and $a_1(t)-a_j(t)\succ \log t$ for $j\neq 1$. Then, there exist positive integer $s,t$, a finite set $Y$ of integers and real polynomials $p_{\underline{\varepsilon},j}$ in $t$ variables, where $\underline{\varepsilon}\in [[s]]$ and $1\leq j\leq k$, all depending only on $d,k',{\bf W}$, such that, for any measure preserving system $(X,\mu,T)$ and function $f_1\in L^{\infty}(\mu)$ bounded by 1, we have \begin{multline}\label{61} \sup_{\norm{f_2}_{\infty},...,\norm{f_k}_{\infty}\leq 1 } \sup_{|c_n|\leq 1 }\bignorm{ \underset{1\leq n\leq N}{\mathbb{E}} c_n\ T^{\floor{a_1(n)}}f_1\cdot...\cdot T^{\floor{a_k(n)}}f_k }_{L^2(\mu)}^{2^t}\ll_{d,k,k',{\bf W}} \\ \frac{1}{M} +\sum_{{\bf h}\in Y^{[[s]]} }^{} \underset{m\in [-M,M]^t}{\mathbb{E}} \nnorm{ \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})} +h_{\underline{\varepsilon}}}f_1 }_{2k+1}+o_N(1) \end{multline}where \begin{equation*} A_{\underline{\varepsilon}}({\bf m})=\sum_{j=1}^{k'} p_{\underline{\varepsilon},j}({\bf m})c_j. \end{equation*} are pairwise essentially distinct polynomials. \end{proposition} Observe that the iterates inside the seminorm in \eqref{61} are real polynomials in several variables. We can take $M\to+\infty$ and expand these seminorms to arrive at an iterated limit of polynomial averages. It is possible to bound these averages by a suitable seminorm of the function $f_1$ using the results in \cite{Leibmanseveral} and get a simpler bound in \eqref{61}. This necessitates that we substitute the $O_{d,k,k',{\bf W}}(1)$ implicit constant by an $O_{a_1,...,a_k}(1)$ constant and this is insufficient for our purposes in the next section, where we will have to apply Proposition \ref{sublinearseminorm} for several collections of functions simultaneously. However, in view of the above discussion, we deduce the following: \begin{corollary}\label{sublinearcorollary} Let $a_1,...,a_k$ be a collection of functions in $\mathcal{S}+\mathcal{P}$ such that $a_1(t)\succ \log t$ and $a_1(t)-a_j(t)\succ \log t$ for $j\neq 1$. Then, there exists a positive integer $s$ such that, for any measure preserving system $(X,\mu,T)$ and 1-bounded function $f_1\perp Z_s(X)$, we have \begin{equation*} \lim\limits_{N\to+\infty} \ \sup_{\norm{f_2}_{\infty},...,\norm{f_k}_{\infty}\leq 1 } \ \sup_{|c_n|\leq 1 }\bignorm{ \underset{1\leq n\leq N}{\mathbb{E}} c_n\ T^{\floor{a_1(n)}}f_1\cdot...\cdot T^{\floor{a_k(n)}}f_k }_{L^2(\mu)}=0. \end{equation*} \end{corollary} We analyze the conditions imposed on the functions $a_1,...,a_k$ more closely: write each function $a_i$ in the form $a_i(t)=u_i(t)+p_i(t)$, where $u_i\in \mathcal{S}$ and $p_i\in \mathcal{P}$. The condition $a_1(t)\succ \log t $ implies that either $u_1(t)\succ \log t$ or $p_1(t)$ is a non-constant polynomial. Similarly, the second condition implies that either $u_1(t)-u_i(t)\succ \log t $ or $p_1(t)-p_j(t)$ is a non-constant polynomial. Furthermore, we can make one more reduction. Writing again $a_i(t)=u_i(t)+p_i(t)$ as above and using the same argument as in Section \ref{sectionfactors} (see the discussion following the statement of Proposition \ref{factors}), we may assume that the function $u_1$ has the largest growth rate among the functions $u_i$. In order to establish the main result of this section, we will also use the following proposition, which is special case of Proposition \ref{sublinearseminorm}. The proof of Proposition \ref{sublinearseminorm} corresponds to Step 1 in example b) of Section \ref{sectionfactors}, while Proposition \ref{sublinearonly} corresponds to step 2 of the same example. \begin{proposition}\label{sublinearonly} Let $ a_1,..., a_k$ be sub-linear functions in $\mathcal{H}$ and assume that all the functions $a_1,a_1-a_2,...,a_1-a_k$ dominate $\log t$. Then, for any measure preserving system $(X,\mu,T)$ and function $f_1\in L^{\infty}(\mu)$ bounded by 1, we have \begin{equation}\label{62} \limsup\limits_{N\to+\infty} \sup_{\norm{f_2}_{\infty},...,\norm{f_k}_{\infty}\leq 1 } \sup_{|c_n|\leq 1 }\ \bignorm{ \underset{1\leq n\leq N}{\mathbb{E}} c_n\ T^{\floor{a_1(n)}}f_1\cdot...\cdot T^{\floor{a_k(n)}}f_k }_{L^2(\mu)}\ll_k \ \nnorm{f_1}_{2k}. \end{equation} \end{proposition} \begin{proof}[Proof that Proposition \ref{sublinearonly} implies Proposition \ref{sublinearseminorm}] First of all, we write each $a_i(t)$ in the form $u_i(t)+p_i(t)$ as we discussed above. Our main tool will be to use Lemma \ref{mainlemma} in order to reduce our problem to studying averages on small intervals, where the sublinear functions $u_i$ will have a constant integer part. Suppose that not all of the polynomials $p_1(t),...,p_k(t)$ are constant, since the other case follows from Proposition \ref{sublinearonly} (that means the family has degree $\geq 1$). We can assume, without loss of generality, that $p_i(0)=0$ for all $i$ (the constant terms can be absorbed by the functions $u_i$). Therefore, let $L(t)\in \mathcal{H}$ be a sub-linear function to be chosen later. In addition, we choose functions $f_{2,N},...,f_{k,N}$ so that the average in the left-hand side of \eqref{61} is $1/N$ close to the supremum. We want to bound \begin{equation*} \underset{1\leq r\leq R}{\mathbb{E}}\ \sup_{|c_{n,r}|\leq 1 }\ \bignorm{ \underset{r\leq n\leq r+L(r)}{\mathbb{E}}\ c_{n,r} \ T^{\floor{u_1(n)+p_{1}(n) }}f_1\cdot...\cdot T^{\floor{u_k(n)+p_k(n)}}f_{k,R} }_{L^2(\mu)}^{2^t} \end{equation*}for some integer parameter $t$, which we will choose later to depend only on the quantities $d,k',{\bf W}$ (thus, when applying Lemma \ref{errors} below to remove the error terms in the iterates, we will always have that the implicit constant depends only on $d,k',{\bf W}$). Recall that we have reduced our problem to the case that the function $u_1$ has the largest growth rate among the functions $u_i$. Now, we want to choose the sub-linear function $L(t)\in \mathcal{H}$ so that the functions $u_i(n)$ restricted to the interval $[r,r+L(r)]$ become very close to the value $u_i(r)$. To achieve this, it suffices to take $L(t)\in\mathcal{H}$ such that \begin{equation*} 1\prec L(t)\prec (u_1'(t))^{-1}. \end{equation*}To see that such a function exists, we only need to show that $(u_1'(t))^{-1}\succ 1$ which follows easily from the fact that $u_1(t)\prec t$. Observe that for every $i\in\{1,2,...,k\}$ we must have $L(t)\prec (u_i'(t))^{-1}$, since $u_1$ has maximal growth among the functions $u_i$. For every $n\in [r,r+L(r)]$, we observe that \begin{equation*} |u_i(n)-u_i(r)|\leq (n-r)\max_{x\in [r,r+L(r)]}|u_i'(x)|. \end{equation*}Since $|u_i'(t)|\searrow 0$, we have that for $r$ large enough \begin{equation*} |u_i(n)-u_i(r)|\leq L(r)u_i'(r)=o_r(1), \ \ \ \ \ \ n\in [r,r+L(r)]. \end{equation*}Therefore, for $r$ sufficiently large we have \begin{equation*} \floor{u_i(n)+p_i(n)}=\floor{u_i(r)}+\floor{p_i(n)}+e_{i,n}, \ \ \ \ \ \ \ n\in[r,r+L(r)], \end{equation*}where $e_{i,n}\in\{0,\pm 1,\pm 2\}$. Therefore, our original problem reduces to bounding the quantity \begin{equation}\label{don} \underset{1\leq r\leq R}{\mathbb{E}}\ \sup_{|c_{n,r}|\leq 1 }\bignorm{ \underset{r\leq n\leq r+L(r)}{\mathbb{E}}\ c_{n,r}\ T^{\floor{u_1(r)}+\floor{p_{1}(n) }+e_{1,n}}f_1\cdot...\cdot T^{\floor{u_k(r)}+\floor{p_k(n)}+e_{k,n}}f_{k,R} }_{L^2(\mu)}^{2^t}. \end{equation}Using Lemma \ref{errors}, we may reduce to the case that the error terms $e_{i,n}$ in the iterates are all equal to zero. Let $S$ be the set of those $i\in\{1,...,k\}$ for which the polynomial $p_i(t)$ is equal to the polynomial $p_1(t)$. Reordering, if necessary, we may assume that $S=\{1,...,k_0\}$ for some $k_0\leq k$. Note that the original condition then implies that $u_1(t)-u_i(t)\succ \log t$ for each $2\leq i\leq k_0$. We rewrite \eqref{don} as \begin{multline}\label{buf} \underset{1\leq r\leq R}{\mathbb{E}}\ \sup_{|c_{n,r}|\leq 1 }\bignorm{ \underset{r\leq n\leq r+L(r)}{\mathbb{E}}\ c_{n,r}\ T^{\floor{p_1(n)}} (\prod_{i=1}^{k_0} T^{\floor{u_i(r) }}f_{i,R} ) \prod_{i=k_0+1}^{k} T^{\floor{u_i(r)}+\floor{p_i(n)}}f_{i,R} }_{L^2(\mu)}^{2^t}=\\ \underset{1\leq r\leq R}{\mathbb{E}}\ \sup_{|c_{h,r}|\leq 1 }\bignorm{ \underset{0\leq h\leq L(r)}{\mathbb{E}}\ c_{h,r} \ T^{\floor{p_1(r+h)}} (\prod_{i=1}^{k_0} T^{\floor{u_i(r) }}f_{i,R} ) \prod_{i=k_0+1}^{k} T^{\floor{u_i(r)}+\floor{p_i(r+h)}}f_{i,R} }_{L^2(\mu)}^{2^t}\leq \\ \underset{1\leq r\leq R}{\mathbb{E}}\ \sup_{\norm{f_{k_0+1}}_{\infty},...,\norm{f_{k}}_{\infty}\leq 1 }\ \sup_{|c_{h,r}|\leq 1 }\bignorm{ \underset{0\leq h\leq L(r)}{\mathbb{E}}\ c_{h,r} \ T^{\floor{p_1(r+h)}} (\prod_{i=1}^{k_0} T^{\floor{u_i(r) }}f_{i,R} ) \prod_{i=k_0+1}^{k} T^{\floor{p_i(r+h)}}f_{i} }_{L^2(\mu)}^{2^t}, \end{multline}where $f_{1,R}=f_1$. We also write $F_{r,R}:= \prod_{i=1}^{k_0} T^{\floor{u_i(r) }}f_{i,R}$ for brevity. We can assume that the polynomials $p_i(r+h) $ are non-constant (otherwise, we just ignore the corresponding iterate in the last average). In addition, we may assume that they are pairwise essentially distinct, because if two polynomials are equal, we can combine both of these iterates into a single iterate (this operation does not change the type or leading vector of the given collection of functions). Note that under these assumptions the family of polynomials \begin{equation*} P_r= \{p_1(r+h),p_{k_0+1}(r+h),...,p_k(r+h) \} \end{equation*}is a nice family of polynomials in the variable $h$ (the leading coefficients of the polynomials and their pairwise differences are all constant sequences) and has type and leading vector equal to that of the original collection $\{p_1,...,p_k\}$. Therefore, we can apply Proposition \ref{PET}: there exist positive integers $t_0$ and $s$, a finite set $Y$ of integers and polynomials $p_{\underline{\varepsilon},j}$ where $\underline{\varepsilon}\in [[s]]$ and $1\leq j\leq k$ such that \begin{multline} \sup_{\norm{f_{k_0+1}}_{\infty},...,\norm{f_{k}}_{\infty}\leq 1 }\ \sup_{|c_{h,r}|\leq 1 } \bignorm{ \underset{0\leq h\leq L(r)}{\mathbb{E}}\ c_{h,r}\ T^{\floor{p_1(r+h)}} F_{r,R} \prod_{i=k_0+1}^{k} T^{\floor{p_i(r+h)}}f_{i} }_{L^2(\mu)}^{2^{t_0}}\ll_{d,k',{\bf W}}\\ \frac{1}{M}+ \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^{t_0}}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}F_{r,R}) \ d\mu \Big|+ o_r(1), \end{multline}where \begin{equation*} A_{\underline{\varepsilon}}({\bf m})=\sum_{1\leq j\leq k'} \ p_{\underline{\varepsilon},j}({\bf m})c_{j} \end{equation*}and $(c_1,...,c_{k'})$ is the leading vector of the initial family (here we have $k'\leq k-k_0+1$). Using this in \eqref{buf} with $t={t_0}$ (which depends only on $d,k',{\bf W}$ as we claimed in the beginning), we deduce that our original average is bounded by $O_{d,k,k',{\bf W}}(1)$ times \begin{equation*} \frac{1}{M} +\underset{1\leq r\leq R}{\mathbb{E}} \ \sum_{{\bf h}\in Y^{[[s]]}}^{} \ \underset{{\bf m}\in [-M,M]^{t_0}}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}F_{r,R}) \ d\mu \Big|+ o_R(1). \end{equation*}Using the definition of $F_{r,R}$, we rewrite this as \begin{multline*} \frac{1}{M} +\underset{1\leq r\leq R}{\mathbb{E}}\ \sum_{{\bf h}\in Y^{[[s]]}}^{} \ \underset{{\bf m}\in [-M,M]^{t_0}}{\mathbb{E}} \Big| \int T^{\floor{u_1(r)}}\big(\prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{1})\big)\\ \prod_{i=2}^{k_0} T^{\floor{u_i(r)}}\big(\prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{i,R})\big) \ d\mu \Big|+ o_R(1). \end{multline*} Now, we consider two cases:\\ \underline{Case 1}: Firstly, assume that $k_0=1$. Then, the above quantity can be rewritten as \begin{multline*} \frac{1}{M} +\underset{1\leq r\leq R}{\mathbb{E}} \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^{t_0}}{\mathbb{E}} \Big| \int T^{\floor{u_1(r)}}(\prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{1})) \ d\mu \Big|+ o_R(1)=\\ \frac{1}{M} + \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^{t_0}}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{1})) \ d\mu \Big|+ o_R(1). \end{multline*} The result follows immediately, since \begin{equation*} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{1})) \ d\mu \Big|\leq \nnorm{ \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{1})) }_{2k+1}. \end{equation*} \underline{Case 2}: Assume that $k_0>1$ and we want to bound \begin{multline}\label{ssa} \frac{1}{M} +\underset{1\leq r\leq R}{\mathbb{E}} \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^{t_0}}{\mathbb{E}} \Big| \int T^{\floor{u_1(r)}}(\prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{1}))\\ \prod_{i=2}^{k_0} T^{\floor{u_i(r)}}(\prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{i,R})) \ d\mu \Big|+ o_R(1). \end{multline}Our original hypothesis implies that the functions $u_1-u_i$ (where $2\leq i\leq k_0$) dominate $\log t$. Since $u_i$ was assumed in the beginning to have the biggest growth rate among the functions $u_i$, we must also have $u_1(t)\succ \log t$. We take the limit as $R\to+\infty$ and rewrite the quantity in \eqref{ssa} as \begin{multline*} \frac{1}{M}+\sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \limsup\limits_{R\to+\infty} \underset{1\leq r\leq R}{\mathbb{E}}\Big| \int T^{\floor{u_1(r)}}(\prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{1}))\\ \prod_{i=2}^{k_0} T^{\floor{u_i(r)}}(\prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{i,R})) \ d\mu \Big|. \end{multline*}Applying the Cauchy-Schwarz inequality, we deduce that \begin{multline*} \underset{1\leq r\leq R}{\mathbb{E}}\Big| \int T^{\floor{u_1(r)}}(\prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{1})) \prod_{i=2}^{k_0} T^{\floor{u_i(r)}}(\prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{i,R})) \ d\mu \Big|\leq\\ \Big( \underset{1\leq r\leq R}{\mathbb{E}} \int S^{\floor{u_1(r)}}(\prod_{\underline{\varepsilon}\in [[s]]}^{} S^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}F_{1}))\\ \prod_{i=2}^{k_0} S^{\floor{u_i(r)}}(\prod_{\underline{\varepsilon}\in [[s]]}^{} S^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}F_{i,R})) \ d(\mu\times \mu) \Big)^{1/2}, \end{multline*}where $S=T\times T$, $F_1=\overline{f_1}\otimes f_1$ and $F_{i,R}=\overline{f_{i,R}}\otimes f_{i,R}$. A final application of the Cauchy-Schwarz inequality bounds the last quantity by \begin{multline*} \bignorm{\underset{1\leq r\leq R}{\mathbb{E}} S^{\floor{u_1(r)}}(\prod_{\underline{\varepsilon}\in [[s]]}^{} S^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}F_{1})) \prod_{i=2}^{k_0} S^{\floor{u_i(r)}}(\prod_{\underline{\varepsilon}\in [[s]]}^{} S^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}F_{i,R})) }_{L^2(\mu\times \mu)}^{1/2}. \end{multline*}Applying Proposition \ref{sublinearonly}, we deduce that the $\limsup$ of this last average is bounded by $O_{k_0}(1)$ (which is $O_k(1)$) times \begin{equation*} \nnorm{\prod_{\underline{\varepsilon}\in [[s]]}^{} S^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}F_{1})}_{2k_0,T\times T}\leq \nnorm{\prod_{\underline{\varepsilon}\in [[s]]}^{} S^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}F_{1})}_{2k,T\times T}. \end{equation*}Our original problem reduces to bounding \begin{equation*} \frac{1}{M}+\sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \nnorm{\prod_{\underline{\varepsilon}\in [[s]]}^{} S^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}F_{1})}_{2k,T\times T}^{1/2}+o_R(1), \end{equation*}which is smaller than \begin{equation*} \frac{1}{M}+\sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \nnorm{\prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon}}({\bf m})}+ h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{1})}_{2k+1,T}+o_R(1) \end{equation*}and the conclusion follows. \end{proof} \begin{proof}[Proof of Proposition \ref{sublinearonly}] Using the arguments after the statement of Proposition \ref{factors}, we may reduce to the case that $a_1(t)$ has maximal growth rate among $a_1,...,a_k$. We induct on $k$. In the base case of the induction, we want to show that \begin{equation*} \limsup\limits_{N\to+\infty} \ \sup_{|c_n|\leq 1} \ \bignorm{\underset{1\leq n\leq N}{\mathbb{E}} c_n T^{\floor{a_1(n)}} f_1 }_{L^2(\mu)}\ll \nnorm{f_1}_2. \end{equation*}Due to Lemma \ref{mainlemma}, it suffices to show that \begin{equation}\label{sngl} \limsup\limits_{N\to+\infty} \sup_{|c_{n,N}|\leq 1}\bignorm{\underset{N\leq n\leq N+L(N)}{\mathbb{E}} c_{n,N} T^{\floor{a_1(n)}} f_1 }_{L^2(\mu)}\ll \nnorm{f_1}_2 \end{equation}for some suitable sub-linear function $L(t)\in\mathcal{H}$. Since $a_1(t)\succ \log t$, we conclude that \begin{equation*} |a'_1(t)|^{-1}\prec |a_1''(t)|^{-1/2} \end{equation*}by Proposition \ref{growth}. We choose the function $L(t)$ to satisfy \begin{equation*} |a'_1(t)|^{-1}\prec L(t) \prec |a_1''(t)|^{-1/2}. \end{equation*} Therefore, for every $n\in [N,N+L(N)]$, we can write \begin{equation*} a_1(n)=a_1(N)+(n-N)a_1'(N)+o_N(1), \end{equation*}which in turn implies that, for $N$ sufficiently large, we can write \begin{equation*} \floor{a_1(n)}=\floor{a_1(N)+(n-N)a_1'(N)}+e_{n,N}, \end{equation*}where $e_{n,N}\in \{0,\pm 1\}$. Substituting this in \eqref{sngl}, we want to prove that \begin{equation*} \limsup\limits_{N\to+\infty} \sup_{|c_{n,N}|\leq 1}\bignorm{\underset{N\leq n\leq N+ L(N)}{\mathbb{E}} c_{n,N} T^{\floor{a_1(N)+(n-N)a_1'(N)}+e_{n,N}} f_1 }_{L^2(\mu)}\ll \nnorm{f_1}_2. \end{equation*} Using Lemma \ref{errors}, we can reduce our problem to \begin{equation*} \limsup\limits_{N\to+\infty} \sup_{|c_{h,N}|\leq 1}\bignorm{\underset{0\leq h\leq L(N)}{\mathbb{E}} c_{h,N} T ^{\floor{a_1(N)+ha_1'(N)}} f_1 }_{L^2(\mu)}\ll \nnorm{f_1}_2. \end{equation*}This bound can be proven using the change of variables trick that we have seen in the first example in Section \ref{sectionfactors}. However, we will establish our assertion with a slightly quicker argument below. We shall apply the van der Corput inequality. We fix a positive integer $M$ and choose the quantity $M_N= \floor{|M/a_1'(N)|} $. It is easy to check that $M_N\prec L(N)$, since $L(N)|a'_1(N)|\to+\infty$. Therefore, we can apply the van der Corput inequality to deduce that \begin{multline*} \bignorm{\underset{0\leq h\leq L(N)}{\mathbb{E}} c_{h,N} T^{\floor{a_1(N)+ha_1'(N)}} f_1 }_{L^2(\mu)}^2\ll \\ \frac{1}{M_N} +\underset{|m|\leq M_N}{\mathbb{E}}\Big|\underset{0\leq h \leq L(N)}{\mathbb{E}} \overline{c_{h,N}}c_{h+m,N}\int T^{\floor{a_1(N)+ha_1'(N)}}\overline{f_1}\cdot T^{\floor{a_1(N)+(h+m)a_1'(N)}}f_1 \ d\mu \Big| +o_N(1), \end{multline*}where the implied constant is absolute (and does not depend on $M$). We write \begin{equation*} \floor{a_1(N)+(h+m)a_1'(N)}=\floor{a_1(N)+ha_1'(N)}+\floor{ma_1'(N)}+e_{m,h,N}, \end{equation*}where $e_{m,h,N}\in \{0,\pm 1\}$. We rewrite the double average in the middle as \begin{multline*} \underset{|m|\leq M_N}{\mathbb{E}}\Big|\underset{0\leq h \leq L(N)}{\mathbb{E}} \overline{c_{h,N}}c_{h+m,N}\int \overline{f_1}\cdot T^{\floor{ma_1'(N)}+e_{m,h,N} }f_1 \ d\mu \Big| \leq \\ \sum_{z\in \{0,\pm 1\}}^{} \underset{|m|\leq M_N}{\mathbb{E}} \Big|\int \overline{f_1}\cdot T^{\floor{ma_1'(N)} +z}f_1\ d\mu \Big|. \end{multline*}However, note that $|ma_1'(N)|\leq M_N|a_1'(N)|\leq M$. Thus, for any $z\in \{0,\pm 1\}$, we have \begin{equation*} \underset{|m|\leq M_N}{\mathbb{E}} \Big|\int \overline{f_1} \cdot T^{\floor{ma_1'(N)} +z}f_1\ d\mu\Big|=\frac{2M+1}{2M_N+1}\underset{|m'|\leq M}{\mathbb{E}}\ p_N(m') \Big|\int \overline{f_1}\cdot T^{m' +z}f_1\ d\mu\Big|, \end{equation*}where $p_N(m')=\#\{ m\in \mathbb{N}{:}\; \floor{ma_1'(N)}=m' \}$. Since $a_1'(N)\to 0$, we can easily see that for $N$ large enough, we must have \begin{equation*} p_N(m')\leq \Big|\frac{1}{a_1'(N)}\Big|. \end{equation*}Therefore, we have \begin{multline*} \frac{2M+1}{2M_N+1}\underset{|m'|\leq M}{\mathbb{E}}\ p_N(m') \Big|\int \overline{f_1}\cdot T^{m' +z}f_1\ d\mu\Big|\leq \frac{(2M+1)}{(2M_N+1)|a_1'(N)|}\ \underset{|m'|\leq M}{\mathbb{E}}\ \Big|\int \overline{f_1}\cdot T^{m' +z}f_1\ d\mu\Big|\ll\\ \underset{|m'|\leq M}{\mathbb{E}}\ \Big|\int \overline{f_1}\cdot T^{m' +z}f_1\ d\mu\Big|. \end{multline*} Thus, the square of our original average is $\ll$ \begin{equation*} \sum_{z\in\{0,\pm 1\}} \underset{|m'|\leq M}{\mathbb{E}}\ \Big|\int \overline{f_1}\cdot T^{m' +z}f_1\ d\mu\Big|+o_N(1) \end{equation*}for some implied constant that does not depend on the original integer $M$. Therefore, we take first $N\to +\infty$ and then $M\to+\infty$ and use the Cauchy-Schwarz inequality to easily reach the conclusion. This establishes the base case of the induction. Now assume the claim has been established for all positive integers less than or equal to $ k-1$ (for some $k\geq 2$). We prove that it holds for $k$ as well. Since we have assumed that $a_1$ has maximal growth rate, we may reorder the given functions so that we have $a_1(t)\gg \dots \gg a_k(t)$. Let $k_0\leq k$ be the largest integer, such that the function $a_{k_0}$ has the same growth rate as $a_1(t)$. This means that all the functions $a_1,...,a_{k_0}$ have the same growth rate. We rewrite our average in \eqref{62} as \begin{multline} \sup_{\norm{f_2}_{\infty},...,\norm{f_k}_{\infty}\leq 1 }\ \sup_{|c_n|\leq 1 }\bignorm{ \underset{1\leq n\leq N}{\mathbb{E}} c_n \ \prod_{i=1}^{k_0} T^{\floor{(a_i(n) -a_{k_0}(n)) +a_{k_0}(n)}}f_i\cdot \prod_{i=k_0+1}^{k} T^{\floor{a_i(n)}}f_i }_{L^2(\mu)}=\\ \sup_{\norm{f_2}_{\infty},...,\norm{f_k}_{\infty}\leq 1 }\ \sup_{|c_n|\leq 1 }\bignorm{ \underset{1\leq n\leq N}{\mathbb{E}} c_n \ T^{\floor{a_{k_0}(n)}}( \prod_{i=1}^{k_0} T^{\floor{(a_i(n) -a_{k_0}(n))} +e_{i,n}}f_i)\ \prod_{i=k_0+1}^{k} T^{\floor{a_i(n)}}f_i }_{L^2(\mu)} \end{multline}for some $e_{i,n}\in\{0,\pm 1\}$. Using Lemma \ref{errors}, we may reduce our problem to the case that all the error terms $e_{i,n}$ are zero. Note that the function $a_{k_0}(n)$ dominates each one of the functions $a_1-a_{k_0},...,a_{k_0-1}-a_{k_0}$, as well as the functions $a_i, i\geq k_0$. Now, we choose sequences of functions $f_{2,N},...,f_{k,N}$ so that the above average is $1/N$ close to the supremum (we also write $f_{1,N}=f_1$). In addition, we invoke Lemma \ref{mainlemma} to deduce that it is sufficient to show that \begin{multline} \limsup\limits_{R\to+\infty}\ \underset{1\leq r\leq R}{\mathbb{E}}\ \sup_{|c_{n,r}|\leq 1}\ \bignorm{ \underset{r\leq n\leq r+L(r)}{\mathbb{E}} c_{n,r}\ T^{\floor{a_{k_0}(n)}}( \prod_{i=1}^{k_0} T^{\floor{(a_i(n) -a_{k_0}(n))}}f_{i,R})\\ \prod_{i=k_0+1}^{k} T^{\floor{a_i(n)}}f_{i,R} }_{L^2(\mu)} \ll_k \nnorm{f_1}_{2k} \end{multline}for a sub-linear function $L(t)\in\mathcal{H}$ that we shall choose momentarily. Namely, we choose the function $L\in\mathcal{H}$ to satisfy \begin{equation*} |a_{k_0}'(t)|^{-1} \prec L(t)\prec |a_{k_0}''(t)|^{-1/2} \end{equation*}and \begin{equation*} L(t)\prec (\psi'(t))^{-1} \end{equation*}for all the functions $\psi$ of the set $\mathcal{A}=\{a_1-a_{k_0},...,a_{k_0-1}-a_{k_0},a_{k_0+1},...,a_k\}$. To see that such a function exists, we only need to prove that for any function $\psi\in\mathcal{A}$, we have \begin{equation*} (a_{k_0}'(t))^{-1}\prec (\psi'(t))^{-1} \end{equation*}and \begin{equation*} (a_{k_0}'(t))^{-1} \prec |a_{k_0}''(t)|^{-1/2}. \end{equation*}The first relation follows easily from the fact that $a_{k_0}$ dominates all functions in $\mathcal{A}$ and L' Hospital's rule. The second relation follows from Proposition \ref{growth}, since $\log t\prec a_{k_0}(t)\prec t$. Using similar approximations as in the proof of Proposition \ref{sublinearseminorm}, we deduce that for $r$ sufficiently large, we can write \begin{equation*} \floor{\psi(n)}=\floor{\psi(r)}+e_{\psi,n} \ \ \text{ for }\ \ n\in [r,r+L(r)] \end{equation*}for every $\psi\in\mathcal{A}$, where $e_{\psi,n}\in \{0,\pm 1\}$. In addition, we can write \begin{equation*} \floor{a_{k_0}(n)}=\floor{a_{k_0}(r)+(n-r)a'_{k_0}(r)}+e_{a_{k_0},n}\ \ \text{ for } \ \ \ n\in [r,r+L(r)], \end{equation*}where $e_{a_{k_0},n}\in \{0,\pm 1\}$. Using the argument Lemma \ref{errors} once more to remove the error terms, our original problem reduces to showing \begin{multline} \limsup\limits_{R\to+\infty} \sup_{\norm{f_2}_{\infty},...,\norm{f_k}_{\infty}\leq 1 }\underset{1\leq r\leq R}{\mathbb{E}} \ \sup_{|c_{h,r}|\leq 1}\bignorm{\underset{0\leq h\leq L(r)}{\mathbb{E}} c_{h,r}T^{\floor{a_{k_0}(r)+ha'_{k_0}(r) }}( \prod_{i=1}^{k_0} T^{\floor{(a_i(r) -a_{k_0}(r))}}f_{i})\\ \prod_{i=k_0+1}^{k} T^{\floor{a_i(r)}}f_{i} }_{L^2(\mu)}\ll_k \nnorm{f_1}_{2k}. \end{multline}Since the functions $f_i$ are bounded by 1, the last relation follows if we prove that \begin{multline*} \limsup\limits_{R\to+\infty} \sup_{\norm{f_2}_{\infty},...,\norm{f_k}_{\infty}\leq 1 } \underset{1\leq r\leq R}{\mathbb{E}} \ \sup_{|c_{h,r}|\leq 1}\bignorm{\underset{0\leq h\leq L(r)}{\mathbb{E}} c_{h,r}T^{\floor{a_{k_0}(r)+ha'_{k_0}(r) }}( \prod_{i=1}^{k_0} T^{\floor{(a_i(r) -a_{k_0}(r))}}f_{i}) }_{L^2(\mu)}\\ \ll_k \nnorm{f_1}_{2k}. \end{multline*}We choose functions $f_{2,R},...,f_{k_0,R}$ so that the corresponding average is $1/R$ close to the supremum. Write $F_{r,R}:= \prod_{i=1}^{k_0} T^{\floor{(a_i(r) -a_{k_0}(r))}}f_{i,R}$. We also fix a positive integer $M$. Repeating the same argument as in the base case, we can show that \begin{multline}\label{label} \sup_{|c_{h,r}|\leq 1}\bignorm{\underset{0\leq h\leq L(r)}{\mathbb{E}} c_{h,r}\ T^{\floor{a_{k_0}(r)+ha'_{k_0}(r) }}F_{r,R} }_{L^2(\mu)}^2\ll\\ \frac{1}{M}+ \sum_{z\in \{0,\pm 1\} }^{} \underset{|m|\leq M}{\mathbb{E}}\ \Big|\int F_{r,R}\ \cdot T^{m+z}F_{r,R}\ d \mu\Big|+o_r(1). \end{multline}Therefore, we have \begin{multline}\label{sw} \sup_{\norm{f_2}_{\infty},...,\norm{f_k}_{\infty}\leq 1 } \underset{1\leq r\leq R}{\mathbb{E}} \ \sup_{|c_{h,r}|\leq 1}\bignorm{\underset{0\leq h\leq L(r)}{\mathbb{E}} c_{h,r}\ T^{\floor{a_{k_0}(r)+ha'_{k_0}(r) }}( \prod_{i=1}^{k_0} T^{\floor{(a_i(r) -a_{k_0}(r))}}f_{i}) }_{L^2(\mu)}^2\ll \\ \frac{1}{M}+\underset{1\leq r\leq R}{\mathbb{E}} \sum_{z\in \{0,\pm 1\} }^{} \underset{|m|\leq M}{\mathbb{E}}\ \Big|\int F_{r,R} \cdot T^{m+z}F_{r,R}\ d \mu\Big|+ O_R(1) \end{multline}and we want to bound this last quantity by $O_k(1)$ times $\nnorm{f_1}_{2k}^2$. For a fixed $m\in [-M,M]$ and $z\in\{0,\pm 1\}$, we apply the Cauchy-Schwarz inequality to get \begin{multline*} \underset{1\leq r\leq R}{\mathbb{E}}\Big|\int F_{r,R} \cdot T^{m+z}F_{r,R}\ d \mu\Big|\leq \Big( \underset{1\leq r\leq R}{\mathbb{E}}\Big|\int F_{r,R} \cdot T^{m+z}F_{r,R}\ d \mu\Big|^2 \Big)^{1/2}=\\ \Big( \int \underset{1\leq r\leq R}{\mathbb{E}} (\overline{F_{r,R}}\otimes F_{r,R}) \ (T\times T)^{m+z} (F_{r,R}\otimes \overline{F_{r,R}}) \ d (\mu\times \mu) \Big)^{1/2}=\\ \Big(\int \underset{1\leq r\leq R}{\mathbb{E}} \ \prod_{i=1}^{k_0} (T\times T)^{\floor{a_i(r)-a_{k_0}(r)}}\big(( \overline{f_{i,R}}\otimes f_{i,R}) \cdot (T\times T)^{m+z} (f_{i,R} \otimes \overline{f_{i,R}}) \big) \ d(\mu\times \mu) \Big)^{1/2}\leq \\ \bignorm{ \underset{1\leq r\leq R}{\mathbb{E}} \ \prod_{i=1}^{k_0-1} (T\times T)^{\floor{a_i(r)-a_{k_0}(r)}}\big( (\overline{f_{i,R}}\otimes f_{i,R}) \cdot (T\times T)^{m+z} (f_{i,R} \otimes \overline{f_{i,R}}) \big) }_{L^2(\mu\times \mu)}^{1/2} \end{multline*}where $f_{1,R}=f_1$. Note that the functions $a_1-a_{k_0},...,a_{k-1}-a_{k_0}$ satisfy the hypotheses of Proposition \ref{sublinearonly}. Therefore, we can apply the induction hypothesis (for $k_0-1<k$) to conclude that \begin{multline*} \bignorm{ \underset{1\leq r\leq R}{\mathbb{E}} \ \prod_{i=1}^{k_0-1} (T\times T)^{\floor{a_i(r)-a_{k_0}(r)}}\big( (\overline{f_{i,R}}\otimes f_{i,R}) \cdot (T\times T)^{m+z} (f_{i,R} \otimes \overline{f_{i,R}}) \big) }_{L^2(\mu\times \mu)}^{1/2} \ll_{k_0}\\ \nnorm{(\overline{f_{1}}\otimes f_{1}) \cdot (T\times T)^{m+z} (f_{1} \otimes \overline{f_{1}})}_{2k_0-2,T\times T}^{1/2} \end{multline*}and the last quantity is smaller than $ \nnorm{\overline{f_1} \cdot T^{m+z} f_1}_{2k_0-1,T}$. Putting this in \eqref{sw}, we get \begin{multline*} \sup_{\norm{f_2}_{\infty},...,\norm{f_k}_{\infty}\leq 1 } \underset{1\leq r\leq R}{\mathbb{E}} \ \sup_{|c_{h,r}|\leq 1}\bignorm{\underset{0\leq h\leq L(r)}{\mathbb{E}} c_{h,r}\ T^{\floor{a_{k_0}(r)+ha'_{k_0}(r) }}( \prod_{i=1}^{k_0} T^{\floor{(a_i(r) -a_{k_0}(r))}}f_{i}) }_{L^2(\mu)}^2\ll_k\\ \frac{1}{M}+\sum_{z\in \{0,\pm 1\}}^{}\ \underset{|m|\leq M}{\mathbb{E}}\nnorm{\overline{f_1} \cdot T^{m+z} f_1}_{2k_0-1,T} +o_R(1)\leq\\ \frac{1}{M}+\sum_{z\in \{0,\pm 1\}}^{}\ \underset{|m|\leq M}{\mathbb{E}}\nnorm{\overline{f_1} \cdot T^{m+z} f_1}_{2k-1,T} +o_R(1), \end{multline*}since $k_0\leq k$. Taking $R\to+\infty$ and then $M\to+\infty$, we get that it suffices to show that \begin{equation*} \limsup\limits_{M\to+\infty} \underset{|m|\leq M}{\mathbb{E}}\nnorm{\overline{f_1} \cdot T^{m+z} f_1}_{2k-1} \leq \nnorm{f_1}_{2k}^2 \end{equation*}for any $z\in \{0,\pm 1\}$. This follows easily by raising to the $2^{2k-1}$-th power and using the power mean inequality, as well as the definition of the Host-Kra seminorms. \end{proof} \section{The general case of Proposition \ref{factors} }\label{reductionestimates} In this section we aim to prove the general case of Proposition \ref{factors}. We maintain the notation of Proposition \ref{factors} and we also assume that the function $a_1$ has maximal growth rate among the functions $a_1,...,a_k$ (and is at least super-linear). We also consider the set of functions \begin{equation*} S=\{a_1(t),a_1(t)-a_2(t),...,a_1(t)-a_k(t) \} \end{equation*} Functions in $S$ dominate $\log t$ by our hypothesis. Finally, we assume that not every one of the involved functions has the form $p(t)+g(t)$, where $p\in\mathbb{R}[t]$ and $g\in \mathcal{H}$ is sub-fractional, since this case was covered in the previous section (it follows from Corollary \ref{sublinearcorollary}). We will use the following decomposition result from \cite{Richter}. \begin{lemma}\cite[Lemma A.3]{Richter}\label{decomposition} Let $a_1,...,a_k\in\mathcal{H}$ have polynomial growth. Then, there exist a natural number $m$, functions $g_1,...,g_m\in S(a_1,...,a_k)$, real numbers $c_{i,j}$, where $1\leq i\leq k$ and $1\leq j\leq m$, and real polynomials $p_1,...,p_k$ such that: \begin{enumerate} \item $g_1\prec g_2\prec...\prec g_m$, \item $t^{l_i}\prec g_i(t)\prec t^{l_i+1}$ for some $l_i\in \mathbb{Z}^{+}$ or $g_i\equiv 0$ (i.e. they are strongly non-polynomial) and \item for all $i\in \{1,2,...,k\}$ we have \begin{equation*} a_i(t)=\sum_{j=1}^{m} c_{i,j}g_j(t)+p_j(t)+o_t(1). \end{equation*} \end{enumerate} \end{lemma} Note that the functions $g_j$ do not necessarily belong in the set of linear combinations of the $a_1,...,a_k$. The proof of this lemma can be found in the appendix of \cite{Richter}. As an example, if we have the pair $\{t+t^{3/2}, t^2+t^{5/2}\}$, then the functions in the above decomposition are $\{g_1,g_2,p_1,p_2\}=\{t^{3/2},t^{5/2},t,t^2\}$. Returning to our original problem, we split the given family of functions into two sets \begin{equation*} J_1=\{a_i{:}\; a_i(t)\ll t^{\delta} \text{ for all } \delta>0\} \text{ and } J_2=\{a_i{:}\; \exists\ \delta>0 \text{ with } \ a_i(t)\gg t^{\delta} \}. \end{equation*} We do the same for the set $S$ of differences:\begin{equation*} S_1=\{f\in S{:}\; f(t)\ll t^{\delta} \text{ for all } \delta>0\} \text{ and } S_2=\{f\in S{:}\; \exists\ \delta>0 \text{ with } \ a_i(t)\gg t^{\delta} \}. \end{equation*} We will see that the slow-growing functions in sets $J_1$ and $S_1$ will be approximately equal to a constant, when we consider averages on small intervals. For the remaining functions, we will use the Taylor expansion to approximate them. We split the proof into several steps. Steps 1 through 4 of this proof correspond to step 1 in example a) of section \ref{sectionfactors}, while steps 5 and 6 of the proof correspond to step 2 of the same example. The remaining two steps correspond to step 3 of example a). In Step 8, we will also use the results of the special case of the previous section. \subsection{Step 1: Introducing a double averaging} Let $L(t)\in\mathcal{H}$ be a sub-linear function to be specified later. We can consider a priori functions that satisfy $L(t)\prec t^{1-\varepsilon}$ for some $\varepsilon>0$ (i.e. we exclude functions like $t/\log t$ ). Invoking Lemma \ref{mainlemma}, we see that it is sufficient to prove that\begin{equation}\label{xef} \limsup\limits_{R\to\infty} \underset{1\leq r\leq R}{\mathbb{E}} \sup_{|c_{r,n}|\leq 1} \bignorm{\underset{r\leq n\leq r+L(r)}{\mathbb{E}} c_{r,n}\ T^{\floor{a_1(n)}} f_1\cdot ...\cdot T^{\floor{a_k(n)}}f_{k,R} }_{L^2(\mu)}^{2^t}= 0 \end{equation}for any sequences of 1-bounded functions $f_{2,R},...,f_{k,R}$ and some positive integer parameter $t$, which will depend only on the original functions $a_1,...,a_k$. Therefore, when applying Lemma \ref{errors}, we can always assume that the implicit constant (which depends on the exponent $2^t$) is an $O_{a_1,...,a_k}(1)$ constant. We observe that \eqref{xef} follows if we show that\begin{equation}\label{R} \sup_{||f_2||_{\infty}\leq 1,...,||f_k||_{\infty} \leq 1} \underset{1\leq r\leq R}{\mathbb{E}} \sup_{|c_{r,n}|\leq 1} \bignorm{\underset{r\leq n\leq r+L(r)}{\mathbb{E}} c_{r,n}\ T^{\floor{a_1(n)}} f_1\cdot ...\cdot T^{\floor{a_k(n)}}f_{k} }_{L^2(\mu)}^{2^t} \end{equation}goes to 0, as $R\to+\infty$. \subsection{Step 2: Eliminating the small functions of $J_1$} While in example a) of Section 4 we used the Taylor expansion right at the beginning, it is more convenient to reverse our steps a bit in the proof. Assume that the function $a_i$ belongs to the set $J_1$ (namely, it is a sub-fractional function). Then, for any $n\in [r,r+L(r)]$, we have\begin{equation*} |a_i(n)-a_i(r)|=|n-r||a'_i(\xi)| \end{equation*} for some $\xi \in [r,n]$. Since $|a'_i(t)|\searrow 0$, we get \begin{equation*} |a_i(n)-a_i(r)|\leq |L(r)||a'_i(r)|, \end{equation*}which is $o_r(1)$. Note that we already assumed that we will eventually choose $L\in\mathcal{H}$ such that $L(t)\ll t^{1-\varepsilon}$, which makes the previous statements valid (see the discussion at the end of the Appendix). Thus, if $r$ is sufficiently large and $n\in [r,r+L(r)]$, we can write $\floor{a(n)}=\floor{a(r)}+\varepsilon_{r,n}$, where $\varepsilon_{r,n}\in \{0,1\}$. Using the argument in Lemma \ref{errors}, we absorb the error terms $\varepsilon_{r,n}$ in the supremum outside of the averages in \eqref{R}. The iterate corresponding to the function $f_{i}$ has now become constant and we can ignore it. In conclusion, we have reduced our problem to the case that the set $J_1$ is empty. \subsection{Step 3: Concatenating the functions of the set $S_1$} Assume that the function $a_1-a_i$ belongs to $S_1$. Then, mimicking the arguments of the previous step, we can write $a_i=a_1+(a_i-a_1)$ where the function $a_i-a_1$ is asymptotically a constant in the interval $[r,r+L(r)]$. Then, we can combine the product of all such terms \begin{equation*} T^{\floor{a_1(n)}}f_1 \prod_{a_1-a_i\in S_1} T^{\floor{a_i(n)}}f_{i} \end{equation*}into one iterate $T^{\floor{a_1(n)}} f_{r}$ (we use again the argument in Lemma \ref{errors} to remove the error terms), where \begin{equation}\label{fapeiro} f_r= f_1\cdot T^{\floor{\theta_1(r)}}h_{1}\cdot... \cdot T^{\floor{\theta_{\ell}(r)}}h_{\ell}, \end{equation}where $h_1,...,h_{\ell}$ are functions in $L^{\infty}(\mu)$ and the functions $\theta_1,...,\theta_{\ell} \in \mathcal{H}$ are sub-linear functions that satisfy \begin{equation*} \log t\prec \theta_{i}(t)\prec t^{\delta} \end{equation*}for all $\delta>0$. In addition, the assumption that the pairwise differences of the functions $a_1,...,a_k$ dominate $\log t$ implies that \begin{equation*} \log t\prec \theta_{i}(t)-\theta_j(t) \ \end{equation*}for $i\neq j$. Now the original problem reduces to the following: If all the functions $a_1,...,a_k$ are such that the sets $J_1$ and $S_1$ are empty, then show that the averages \begin{multline} \sup_{||f_2||_{\infty},...,||f_k||_{\infty} \leq 1}\sup_{ ||h_1||_{\infty},...,||h_{\ell}||_{\infty}\leq 1}\\ \underset{1\leq r \leq R}{\mathbb{E}}\ \sup_{|c_{r,n}|\leq 1} \bignorm{\underset{{r\leq n\leq r+L(r)}}{\mathbb{E}}\ c_{r,n}\ T^{\floor{a_1(n)}} f_{r}\cdot ...\cdot T^{\floor{a_k(n)}}f_{k} }_{L^2(\mu)}^{2^t} \end{multline} go to 0 as $R\to+\infty$, where the function $f_r$ is the function in \eqref{fapeiro}. We can repeat the same argument of this step to reduce to the case where $a_i(t)-a_j(t)\gg t^{\delta}$ for some $\delta>0$. Indeed, if the difference $a_i-a_j$ is sub-fractional, we can combine the iterates corresponding to these two functions into a single iterate of the form $T^{\floor{a_i(n)}}g_r$ for some function $g_r$. In order to replace $g_r$ by a function that does not depend on $r$, we move the supremum of the $f_2,...,f_k$ inside the outer average. In conclusion, it suffices to show that \begin{multline}\label{no logs} \sup_{ ||h_1||_{\infty},...,||h_{\ell}||_{\infty}\leq 1}\ \underset{1\leq r \leq R}{\mathbb{E}} \ \sup_{||f_2||_{\infty},...,||f_k||_{\infty}\leq 1}\ \sup_{|c_{r,n}|\leq 1} \bignorm{\underset{{r\leq n\leq r+L(r)}}{\mathbb{E}}c_{r,n}\ T^{\floor{a_1(n)}} f_{r}\cdot ...\cdot T^{\floor{a_k(n)}}f_{k} }_{L^2(\mu)}^{2^t} \end{multline}goes to 0 as $R\to+\infty$, where $f_r$ is the function in \eqref{fapeiro} and all differences $a_i-a_j$ dominate some fractional power\footnote{Since our functions $a_1,...,a_k$ dominate a fractional power, we can now use the fact that the classes $S(a_i,k)$ can be well defined in order to approximate all of them by polynomials.}. Recall that the functions $\theta_i$ satisfy \begin{equation*} \log t\prec \theta_{i}(t)\prec t^{\delta} \ \text{ for every }\ \delta>0 \end{equation*}and \begin{equation*} \log t\prec \theta_{i}(t)-\theta_j(t). \end{equation*} \subsection{Step 4: Approximating by polynomials} In this step, we will use the Taylor expansion to replace the functions $a_i$ by polynomials in the intervals $[r,r+L(r)]$. First of all, we can use Lemma \ref{decomposition} in order to write \begin{equation}\label{expansionform} a_i(t)=\sum_{i=1}^{k}c_{i,j}g_i(t)+q_i(t)+o_t(1), \end{equation}where $g_1\prec g_2\prec...\prec g_m$ are strongly non-polynomial functions and $q_i(t)$ are real polynomials. We immediately conclude that the function $g_m$ cannot be sub-fractional. Indeed, if that was the case, then all the functions $a_i$ would be a sum of a polynomial plus a sub-fractional function, which is at odds with our initial assumption. The $o_t(1)$ terms can be eliminated by using an argument similar to the proof of Lemma \ref{errors}. In addition, we may assume that $c_{1,m}\neq 0$ (and thus $g_m$ exists in the expansion of $a_1$). This can be proven by an argument similar to the one in the beginning of Section \ref{sectionfactors} (the same reasoning we used to reduce our problem to the case that $a_1$ has maximal growth rate). Of course, by assuming this new property, we abandon the assumption that $a_1$ has maximal growth rate. We define \begin{equation*} \mathcal{F}=\{g_1,...,g_m\} \end{equation*} and let $\mathcal{A}=\{g_1,...,g_l\}\subseteq\mathcal{F}$ be the set of functions that satisfy $g_i(t)\ll t^{\delta}$ for all $\delta>0$ (i.e the sub-fractional functions). We have that $g_m\not\in \mathcal{A}$. By the reductions in steps 2 and 3, we have that $a_i(t)\gg t^{\delta_i}$ for some $\delta_i>0$ and a similar relation holds for the differences $a_i-a_j$. Therefore, we have the following property: \begin{equation}\label{Papeiro}\tag{{\bf P}} \text{ If}\ i_1\neq i_2, \text{ we have either}\ c_{i_1,j}\neq c_{i_2,j}\ \text{ for some}\ j>l, \text{ or}\ q_{i_1}(t)-q_{i_2}(t)\ \text{is non-constant.} \end{equation} Now every function $g\in\mathcal{A}$ satisfies \begin{equation*} \max_{n\in [r,r+L(r)]}|g(n)-g(r)|=o_r(1) \end{equation*}by the arguments in the preceding steps. We can use the argument in Lemma \ref{errors} to remove the error term $o_r(1)$ and then substitute each function $g\in \mathcal{A}$ in the interval $[r,r+L(r)]$ by a constant (namely, the value of the function $g$ at $r$). These constants can be absorbed by the supremum of the $f_2,...,f_k$ and the use of Lemma \ref{errors}. Therefore, we may assume that all functions $g_1,...,g_m$ dominate some fractional power $t^{\delta}$ (equivalently $\mathcal{A}=\emptyset$) and that property \eqref{Papeiro} above holds with $l=0$. Since the functions $g_1,...,g_m$ dominate some fractional power, the classes \begin{equation*} S(g_i,n)=\{f\in\mathcal{H}, (g_i^{(n)}(t))^{-1/n}\preceq f(t)\prec (g_i^{(n+1)}(t))^{-1/(n+1)} \} \end{equation*}are well defined for $n$ large enough. We remind the reader that these classes and their properties are all studied in the Appendix and we will use them freely from this point onward. Let $d$ be natural number and for every function $g\in \mathcal{F}$, we consider the natural $k_g$, such that the function $|g_m^{(d)}(t)|^{-\frac{1}{d}}$ belongs to the class $S(g,k_g)$. This class always exists, if we pick our number $d$ to be sufficiently large. We immediately deduce that $k_g\leq d$ for every $g\in \mathcal{F}$, while $k_{g_m}=d$. Let $q$ be a non-integer, such that $t^q$ dominates all functions $g_1,...,g_m$ and the polynomials $p_1,...,p_k$. In particular, this implies that $g_i^{(q)}(t)\prec 1$, for all $1\leq i\leq m$ by Proposition \ref{prop:basic}. We make the additional assumption that our integers $k_g$ are very large compared to $q$, which can be attained if we take our initial number $d$ to be sufficiently large. The inequality $k_g\geq 10q$ will suffice for our purposes. \begin{definition} We say that two functions $f\ll g$ of $\mathcal{H}$ have the property $\mathcal{Q}$, if they have the same growth rate, or if the ratio \begin{equation*} \frac{g(t)}{f(t)} \end{equation*}dominates some fractional power $ t^{\delta},\ \delta>0$. \end{definition} We consider two possible cases:\\ a) Assume that for every $g\in \mathcal{F}\setminus\{g_m\}$, the functions $|g_m^{(d)}(t)|^{-\frac{1}{d}}$ and $|g^{(k_g)}(t)|^{-\frac{1}{k_g}}$ have the property\footnote{An example of functions that fall in this case is the pair $(t^{3/2},t\log t)$, if we consider their second derivatives. We can easily check that the ratio of the second derivatives of these two functions {raised to the $-\frac{1}{2}$-th power} grows like the function $t^{1/4}$.} $\mathcal{Q}$. Then, our selection will be the classes $S(g,k_g)$ as they stand. Furthermore, we choose a (sub-linear) function $L(t)\in\mathcal{H}$ that belongs in the intersection of these classes (which is non-empty by definition). In this case, we call the function $g_m$ our "special" function. Note that \begin{equation*} |g^{(k_g)}(t)|^{-\frac{1}{k_g}}\preceq |g_m^{(d)}(t)|^{-\frac{1}{d}} \end{equation*}for $g\neq g_m$ in this case.\\ b) Assume that the above case does not hold\footnote{An example of functions that fall in this second case is the pair $(t\log t,t\log\log t)$, if we again consider their second derivatives. A simple computation yields that the growth rate of the ratio of the involved functions grows like the function $\sqrt{\log t}$ and, thus, they fail property $\mathcal{Q}$.}. Then, among all the functions $|g^{(k_g)}(t)|^{-\frac{1}{k_g}}$ for which the property $\mathcal{Q}$ fails (in relation to $|g_m^{(d)}(t)|^{-\frac{1}{d}}$), we choose a function $g$ for which $|g^{(k_g)}(t)|^{-\frac{1}{k_g}}$ has minimal growth rate. Then, we choose a function $L\in\mathcal{H}$ with the following properties: i) If a function $\tilde{g}$ is such, that $|\tilde{g}^{(k_{\tilde{g}})}(t)|^{-\frac{1}{k_{\tilde{g}}}}$ fails to satisfy property $\mathcal{Q}$ in relation to $|g_m^{(d)}(t)|^{-\frac{1}{d}}$ and has different growth rate than $g$, then we have \begin{equation*} |{(\tilde{g})}^{(k_{\tilde{g}}-1)}(t)|^{-\frac{1}{k_{\tilde{g}}-1}} \prec L(t)\prec |(\tilde{g})^{(k_{\tilde{g}})}(t)|^{-\frac{1}{k_{\tilde{g}}}}. \end{equation*} Namely, we have $L(t)\in S(\tilde{g},k_{\tilde{g}}-1)$. ii) If the function $\tilde{g}$ has the same growth rate as $g$, then we have $k_g=k_{\tilde{g}}$ and the classes $S(g,k_g)$ and $S(\tilde{g},k_{\tilde{g}})$ coincide. In this case, we leave the integer $k_{\tilde{g}}$ as is. iii) The third case is when the function $\tilde{g}$ satisfies property $\mathcal{Q}$ in relation to $|g_m^{(d)}(t)|^{-\frac{1}{d}}$. Then, we leave the the integer $S(\tilde{g},k_{\tilde{g}})$ as is and take $L(t)\in S(\tilde{g},k_{\tilde{g}})$. The existence of such a function $L(t)$ follows by our minimality assumption on $|g^{(k_g)}(t)|^{-\frac{1}{k_g}}$. In this case, $g$ is our "special" function. We denote by $k'_g$ the new integers that appear after the above procedure. \begin{claim} For the choice we have made above, the function $|z^{(k'_z)}(t)|^{-\frac{1}{k'_z}}$ satisfies property $(\mathcal{Q})$ in relation to our special function, for any $z\in \mathcal{F}$. \end{claim} \begin{proof} If we are in case a) above, the functions $ |g_m^{(d)}(t)|^{-\frac{1}{d}}$ and $|g^{(k_g)}(t)|^{-\frac{1}{k_g}}$ have the same growth rate or their ratio dominates a fractional power (for any $g\in \mathcal{F}$) and we are done. In the second case, we have a special function $g$ ($k_g=k'_g$). We consider functions $z\neq g$ such that $|g^{(k_g)}(t)|^{-\frac{1}{k_g}}$ and $|z^{(k'_z)}(t)|^{-\frac{1}{k'_z}}$ have different growth rates (because otherwise the claim is trivial). Then there are two possibilities: $\bullet$ If the original function $|g_m^{(d)}(t)|^{-\frac{1}{d}}$ and $|z^{(k_z)}(t)|^{-\frac{1}{k_z}}$ had a ratio dominating a fractional power, then the claim follows (in this case, we must have $k'_z=k_z$). $\bullet$ If the original function $|z^{(k_z)}(t)|^{-\frac{1}{k_z}}$ failed property $\mathcal{Q}$ in relation to $|g_m^{(d)}(t)|^{-\frac{1}{d}}$, then we have \begin{equation*} |g^{(k_g)}(t)|^{-\frac{1}{k_g}} \prec |z^{(k_z)}(t)|^{-\frac{1}{k_z}} \ \ \text{ (due to minimality) } \end{equation*}and thus $L(t)\in S(z,k_z-1)$. We easily see that the functions $|g^{(k_g)}(t)|^{-\frac{1}{k_g}}$ and $|z^{(k_z-1)}(t)|^{-\frac{1}{k_z-1}}$ differ by a fractional power\footnote{Roughly speaking, we have a gain of a power $t^{\delta}$ when passing from $S(z,k_z-1)$ to $S(z,k_z)$ due to \eqref{20000}. Therefore, if the functions $|z^{(k_z)}(t)|^{-\frac{1}{k_z}}$ and $|g^{(k_g)}(t)|^{-\frac{1}{k_g}}$ were "close", then $|z^{(k_z-1)}(t)|^{-\frac{1}{k_z-1}}$ and $|g^{(k_g)}(t)|^{-\frac{1}{k_g}}$ differ by a fractional power.}. \end{proof} For convenience, we will use the same notation $S(g,k_g)$ for the new classes that have been chosen after the above operation (that is we replace $k'_g$ by $k_g$). \begin{remark*} The above proof also implies that the growth rate of $ |g^{(k_g)}(t)|^{-\frac{1}{k_g}}$ is maximized when $g$ is the special function. \end{remark*} We denote by ${\tilde{g}}$ the special function given by our above arguments. For any function $g\in \mathcal{F}$, we use the Taylor expansion around the point $r$ to obtain \begin{equation}\label{expansion} g(r+h)=g(r) +\cdots+\frac{g^{(k_g)}(r)h^{k_g}}{k_g!} + \frac{g^{(k_g+1)}(\xi_m)h^{k_g+1}}{k_g!} \ \text{ for some } \xi_m\in [r,r+m], \end{equation}for all $0\leq h\leq L(r)$. We observe that the last term is $o_r(1)$ while the second to last term in the above expansion diverges when $h=L(r)$ (see the discussion after the proof of Proposition \ref{growth}). Therefore, we have \begin{equation*} g(r+h) =p_{r,g}(h) +o_r(1) \end{equation*}where $p_{r,g}$ is a polynomial. \subsection{Step 5: The change of variables} In this step, we do a change of variables trick. Our purpose is to rewrite the above polynomials in such a way, that the leading coefficients are good sequences in order to be able to apply Proposition \ref{PET}. All the work we did in the previous step (namely, making sure that our functions satisfied Property $\mathcal{Q}$) will ensure that the leading coefficients of our polynomials will be good sequences that either converge to a (non-zero) real number, or their growth rate is larger than some fractional power. A similar trick is also used in \cite{FraHardy1}. Assume that ${\tilde{g}}$ is our special function with the polynomial expansion \begin{equation*} {\tilde{g}}(r+h)={\tilde{g}}(r) +\cdots +\frac{{\tilde{g}}^{(k_{{\tilde{g}}})}(r)h^{k_{{\tilde{g}}}}}{k_{{\tilde{g}}}!} + o_r(1). \end{equation*} Every $0\leq h \leq L(r)$ can be written as \begin{equation*} h=w \bigfloor{ \Big| \frac{k_{{\tilde{g}}}!}{{\tilde{g}}^{(k_{{\tilde{g}}})}(r)} \Big|^{\frac{1}{k_{{\tilde{g}}}}}} +v \end{equation*} for some integers $w,v$, where \begin{equation*} 0\leq w\leq \frac{L(r)}{\bigfloor{ \Big| \frac{k_{{\tilde{g}}}!}{{\tilde{g}}^{(k_{{\tilde{g}}})}(r)} \Big|^{\frac{1}{k_{{\tilde{g}}}}}}} = D_r \end{equation*} and \begin{equation*} 0\leq v\leq \bigfloor{ \Big| \frac{k_{{\tilde{g}}}!}{{\tilde{g}}^{(k_{{\tilde{g}}})}(r)} \Big|^{\frac{1}{k_{{\tilde{g}}}}}}-1. \end{equation*}Note that $D_r \succ 1$, because $L(t)\in S({\tilde{g}},k_{{\tilde{g}}})$. We denote by $u(r)$ the function inside the integer part above, namely, we define \begin{equation*} u(r):=\Big| \frac{k_{{\tilde{g}}}!}{{\tilde{g}}^{(k_{{\tilde{g}}})}(r)} \Big|^{\frac{1}{k_{{\tilde{g}}}}}, \end{equation*}which is a (sub-linear) function in $\mathcal{H}$. In addition, since we have chosen the numbers $k_g$ to be sufficiently large, we can ensure that the function $u$ dominates some fractional power (this follows by statement ii) of Lemma \ref{basic}). We observe that (recall that $f_r$ is given by \eqref{fapeiro})\begin{multline}\label{compl} \sup_{\norm{f_2}_{\infty},...,\norm{f_k}_{\infty}\leq 1} \bignorm{\underset{0\leq h\leq L(r)}{\mathbb{E}}\ c_{h,r}\ T^{\floor{a_1(r+h)}} f_{r}\cdot ...\cdot T^{\floor{a_k(r+h)}}f_k }_{L^2(\mu)}^{2^t}\leq \\ \sup_{\norm{f_2}_{\infty},...,\norm{f_k}_{\infty}\leq 1} \ \underset{1\leq v\leq \floor{u(r)}-1}{\mathbb{E}}\ \sup_{|c_{h,r,v}|\leq 1} \bignorm{ \underset{h\equiv v (mod \ \floor{u(r)})}{\mathbb{E}}\ c_{h,r,v}\ T^{\floor{a_1(r+h)}} f_{r}\cdot ...\cdot T^{\floor{a_k(r+h)}}f_k }_{L^2(\mu)}^{2^t}, \end{multline}where the above bound follows by applying the H\"{o}lder and triangle inequalities. We will bound the innermost average in the norm by a quantity that does not depend on $v$. Fix a $v$ as above. For every $h\equiv v (mod\ \floor{u(r)})$, we can write each of the polynomials $p_{g,r}(h)$ in the previous step as a new polynomial $\tilde{p}_{r,v,g}(w)$ in the new variable $w$. We are only interested in the leading coefficients of the new polynomials. Using \eqref{expansion}, we see that it is equal to \begin{equation}\label{c_g} c_{g}(r)= \frac{g^{(k_g)}(r)}{k_g!}\cdot \floor{u(r)}^{k_g}= \frac{g^{(k_g)}(r)}{k_g!}\cdot \bigfloor{ \Big| \frac{k_{{\tilde{g}}}!}{{\tilde{g}}^{(k_{{\tilde{g}}})}(r)} \Big|^{\frac{1}{k_{{\tilde{g}}}}}}^{{k_g}}. \end{equation} Now assume that $g\in \mathcal{F}$. The function $c_g(r)$ is not a function in the Hardy field $\mathcal{H}$, but we will prove that it is a good sequence (see Definition \ref{good sequence}). Therefore, we seek to approximate it by a function in $\mathcal{H}$. To achieve this, we can define the function $d_g(t)\in \mathcal {H}$ by removing the floor function: \begin{equation}\label{d_g} d_g(t) = \frac{g^{(k_g)}(t)}{k_g!}\cdot \Big| \frac{k_{{\tilde{g}}}!}{{\tilde{g}}^{(k_{{\tilde{g}}})}(t)} \Big|^{\frac{k_g}{k_{{\tilde{g}}}}}. \end{equation}It is obvious that $c_g(r)/d_g(r)\to 1$. However, we have something stronger: \begin{claim} For all $g\in \mathcal{F}$, we have \begin{equation*} |c_g(r) -d_g(r)|=o_r(1). \end{equation*} \end{claim} \begin{proof} We will use the inequality \begin{equation*} |a^{c}-b^{c}|\leq c|a-b||a|^{c-1}, \end{equation*}which holds when $|b|\leq |a|$ and $c\in \mathbb{N}$. An application of this inequality reduces the problem to showing that \begin{equation}\label{zxcvbnm} |{\tilde{g}}^{(k_{{\tilde{g}}})}(t)|^{-\frac{1}{k_{{\tilde{g}}}}}\prec |g^{(k_g)}(t)|^{-\frac{1}{k_g-1}}. \end{equation} Since $L(t)\in S({\tilde{g}},k_{{\tilde{g}}})$, it is sufficient to show that \begin{equation*} L(t)\prec |g^{(k_g)}(t)|^{-\frac{1}{k_g-1}} \end{equation*}and now using the fact that $L(t)\in S(g,{k_g})$, our conclusion follows if we prove that \begin{equation*} |g^{(k_g+1)}(t)|^{-\frac{1}{k_g+1}} \prec |g^{(k_g)}(t)|^{-\frac{1}{k_g-1}}. \end{equation*} Substituting $g^{(k_g+1)}(t)\sim g^{(k_g)}(t)/t$ in the above equation (we use Proposition \ref{prop:basic} and the fact that the numbers $k_g$ are assumed to be large enough), this reduces to \begin{align} g^{(k_g)}(t)&\prec t^{\frac{1-k_g}{2}}. \end{align} However, recall that we have chosen a non-integer $q$, such that $g(t)\ll t^q$ for all $g\in \mathcal{F}$ and we have also chosen $k_g \geq 10q-1$. Applying Proposition \ref{prop:basic}, we have $g^{(k_g)}(t)\prec t^{q-k_g}$ and now the claim easily follows. \end{proof} \begin{claim} We have that the function $d_g(t)$ in \eqref{d_g} is a sub-linear function that either satisfies $ t^{\varepsilon}\prec d_g(t)$ for some $\varepsilon>0$ or converges to a non-zero constant\footnote{ Thus, the leading coefficients $c_g(r)$ in \eqref{c_g} are good sequences.}. \end{claim} \begin{proof} Property ($\mathcal{Q}$) implies that $d_g(t)$ converges to a non-zero constant, or dominates a fractional power $t^{\delta}$. Therefore, we only need to prove sub-linearity. We can prove something stronger, namely that $d_g$ is dominated by the function $({\tilde{g}}^{(k_{{\tilde{g}}})}(t))^{-\frac{1}{k_{{\tilde{g}}}}}$. However, a simple computation shows that this is equivalent to \eqref{zxcvbnm}, which has already been established. \end{proof} \begin{claim} If $g,h$ are distinct functions in the set $\{g_1,...,g_m\}$ such that $d_g(t) \sim d_h(t)$, then $k_g\neq k_h$. \end{claim} \begin{proof} Assume that we have both $k_g=k_h$ and $d_g\sim d_h$. This implies that \begin{equation*} g^{(k_g)}(t)\sim h^{(k_h)}(t) \end{equation*}and L'Hospital' rule implies that $g\sim h$. Since $g,h$ have distinct growth rates, this last relation cannot hold and we arrive at a contradiction. \end{proof} We have seen that the functions $g_1,...,g_m$ admit a polynomial expansion and, after the change of variables above, their leading coefficients become sub-linear good sequences. Now, we look how the leading coefficients of the polynomials $q_1,...,q_k$ in \eqref{expansionform} transform after the above change of variables. Note that $q_i(r+h)$ is also a polynomial $q_{i,r}(h)$ in the variable $h$. Writing again \begin{equation*} h=w\floor{u(r)}+v \end{equation*}as above, we see that $q_i(r+h)=q_{i,r,v}(w)$ where $q_{i,r,v}$ is a real polynomial. It is straightforward to check that the leading coefficients of the $q_{i,r,v}$ have the form $c\floor{u(r)}^{\theta}$, where $c\in \mathbb{R}^{*}$ and $\theta\in \mathbb{N}^{+}$. These are good sequences, since they are asymptotically equal to \begin{equation*} c \Big| \frac{k_{{\tilde{g}}}!}{{\tilde{g}}^{(k_{{\tilde{g}}})}(r)} \Big|^{\frac{\theta}{k_{{\tilde{g}}}}}, \end{equation*}which is a function in $\mathcal{H}$ (and its limit is obviously non-zero). Now, we recall \eqref{expansionform}. When restricted to the interval $[r,r+L(r)]$, every one of our original functions $a_i$, where $1\leq i\leq k$ can be written as a sum of polynomials, whose leading coefficients are good sequences, plus an $o_r(1)$ term. We can eliminate the error terms $o_r(1)$ by using the argument in Lemma \ref{errors} once again. In particular, any one of these good sequences (denote $a_r$) satisfies one of the following:\\ a) there exists a sub-linear function $\phi\in \mathcal{H}$, such that $a_r=\phi(r)+o_r(1)\prec u(r)$ and $\phi(t)\gg t^{\delta}$ for some $\delta>0$, \\ b) they have the form $c\floor{u(r)}^{\theta}$, where $c\in\mathbb{R}$ and $\theta$ is a positive integer or\\ c) they converge to a non-zero real number. We denote the polynomial corresponding to $a_i$ as $P_{i,r,v}$ and we observe that its degree is independent of $r$. In view of Property \eqref{Papeiro}, we deduce that the leading coefficient of $P_{i,r,v}-P_{j,r,v}$ is either the leading coefficient of the polynomial $q_{i,r,v}(t)-q_{j,r,v}(t)$ (which in this case must be a non-constant polynomial), or it is equal to the leading coefficient of \begin{equation}\label{RJ} R_{ij.r.v}(w)= \sum_{n=1}^{m} \big(c_{i,n}-c_{j,n}\big)\tilde{p}_{r,g_j,v}(w). \end{equation} In the first case, it has the form b) above and is a good sequence. In the second case, it is a linear combination of sequences of the form $a)$ or $c)$. That is, there are functions $g_{i_1},...,g_{i_{\lambda}}$, where $i_1,...,i_{\lambda}\in\{1,2,...,m\}$ such that the leading coefficients of the polynomials $p_{r,g_{i_j},v}$ are all sequences of the form $a)$ or $c)$ and the leading coefficient of the polynomial $R_{ij,r,v}$ in \eqref{RJ} is equal to the leading coefficient of \begin{equation}\label{arx} \sum_{\alpha=1}^{\lambda}(c_{i,i_{\alpha}}-c_{j,i_{\alpha}} )\tilde{p}_{r,g_{i_{\alpha}},v }. \end{equation} We will use Claim 5: if any two of the polynomials $p_{r,g_{i_{\alpha}},v }$ have the same degree, then their leading coefficients are sequences with distinct growth rates. Therefore, the leading coefficient of $R_{ij,r,v}$ is a linear combination of good sequences with pairwise distinct growth rates and it is straightforward to see that it is itself a good sequence. Our original problem reduces to the following (recall \eqref{compl}): for every measure-preserving system $(X,\mu,T)$ and function $f_1\in L^{\infty}(\mu)$ with $f_1 \perp Z_{\tilde{s}}(X)$ for some $\tilde{s}\in \mathbb{N}$, there exists a positive integer $t=t(a_1,...,a_k)$ such that: \begin{multline}\label{semifinalreduction} \lim\limits_{R\to+\infty} \ \sup_{ ||h_1||_{\infty}\leq 1,...,||h_{\ell}||_{\infty}\leq 1}\ \underset{1\leq r\leq R}{\mathbb{E}}\ \ \underset{0\leq v\leq \floor{u(r)}-1}{\mathbb{E}}\\ \sup_{||f_2||_{\infty}\leq 1,...,||f_k||_{\infty} \leq 1}\ \sup_{|c_{w,r,v}|\leq 1}\ \bignorm{ \ \underset{0\leq w\leq D_r}{\mathbb{E}} \ c_{w,r,v}\ T^{\floor{ P_{1,r,v}(w) }} f_{r}\cdot ...\cdot T^{\floor{P_{k,r,v}(w) } }f_k }_{L^2(\mu)}^{2^t}=0, \end{multline}where \begin{equation}\label{f1} f_r= f_1\cdot T^{\floor{\theta_1(r)}}h_{1}\cdot... \cdot T^{\floor{\theta_{\ell}(r)}}h_{\ell} \end{equation}for functions $\theta_1,...,\theta_{\ell}\in\mathcal{H} $ that satisfy \begin{align*} & \log t \prec \theta_i(t)\prec t^{\delta}\\ \log t \prec & \ \theta_i(t)-\theta_j(t)\prec t^{\delta} \ \text{ for } \ i\neq j \end{align*}for all $\delta>0$. Observe that \begin{multline*} \underset{0\leq v\leq \floor{u(r)}-1}{\mathbb{E}}\ \sup_{||f_2||_{\infty}\leq 1,...,||f_k||_{\infty} \leq 1}\ \sup_{|c_{w,r,v}|\leq 1}\ \bignorm{ \underset{0\leq w\leq D_r}{\mathbb{E}} \ c_{w,r,v}\ T^{\floor{ P_{1,r,v}(w) }} f_{r}\cdot ...\cdot T^{\floor{P_{k,r,v}(w) } }f_k }_{L^2(\mu)}^{2^t}\leq \\ \max_{0\leq v\leq \floor{u(r)}-1} \sup_{||f_2||_{\infty}\leq 1,...,||f_k||_{\infty} \leq 1}\ \sup_{|c_{w,r,v}|\leq 1}\ \bignorm{ \underset{0\leq w\leq D_r}{\mathbb{E}} \ c_{w,r,v}\ T^{\floor{ P_{1,r,v}(w) }} f_{r}\cdot ...\cdot T^{\floor{P_{k,r,v}(w) } }f_k }_{L^2(\mu)}^{2^t}. \end{multline*}For each $r\in \mathbb{N}$, let $v_r$ be the value of $v$ for which the above max is attained. Then, the polynomial family \begin{equation*} \mathcal{P}_r =\{P_{1,r,v_r},...,P_{k,r,v_r}\} \end{equation*}is a nice polynomial family. Indeed, the degrees of its elements are fixed integers and the leading coefficients of the polynomials and of their differences are good sequences irrespective of the value of $v_r$, as we discussed previously. Therefore, under the above assumptions, we reduce our problem to \begin{multline}\label{finalreduction} \lim\limits_{R\to+\infty}\ \sup_{ ||h_1||_{\infty}\leq 1,...,||h_{\ell}||_{\infty}\leq 1}\ \underset{1\leq r\leq R}{\mathbb{E}}\\ \sup_{||f_2||_{\infty},...,||f_k||_{\infty} \leq 1}\ \sup_{|c_{w,r}|\leq 1}\ \bignorm{ \underset{0\leq w\leq D_r}{\mathbb{E}} \ c_{w,r}\ T^{\floor{ P_{1,r,v_r}(w) }} f_{r}\cdot ...\cdot T^{\floor{P_{k,r,v_r}(w) } }f_k }_{L^2(\mu)}^{2^t}=0. \end{multline} We also choose functions $h_{1,R},...,h_{\ell,R}\in L^{\infty}(\mu)$ so that the corresponding average is $1/R$ close to the supremum of the $h_1,...,h_{\ell}$. Namely, we want to prove \eqref{finalreduction} where $f_r$ is now the function \begin{equation*} f_1\cdot T^{\floor{\theta_1(r)}}h_{1,R}\cdot... \cdot T^{\floor{\theta_{\ell}(r)}}h_{\ell,R}. \end{equation*} \subsection{Step 6: Applying the polynomial bounds} Now, we apply Proposition \ref{PET} for the inner average in the above relation. We have established that its hypotheses are satisfied. The degree and the type of the polynomial family all depend on the initial functions $a_1,...,a_k$. Therefore, all asymptotic bounds are assumed to depend only on $a_1,...,a_k$ and we omit the indices. Let us denote the leading vector of the family $\mathcal{P}_r$ by $(u_{1,r},...,u_{k,r})$ and recall again here that each $u_{i,r}$ satisfies one of the following:\\ a) there exists a sub-linear function $\phi_i(r)\prec u(r)$ that dominates some fractional power, such that $u_{i,r}=\phi_i(r)+o_r(1)$,\\ b) they have the form $c\floor{u(r)}^{\theta}$, where $c\in\mathbb{R}$ and $\theta$ is a positive integer or \\ c) they converge to a non-zero real number. Fix a positive integer $M$. There exist integers $s,t$, a finite set $Y$ of integers and polynomials $p_{\underline{\varepsilon},i}$ (all depending only on the original functions $a_1,...,a_k$), where $\underline{\varepsilon}\in [[s]]$ and $1\leq i\leq k$ such that \begin{multline}\label{afterpet} \sup_{||f_2||_{\infty}\leq 1,...,||f_k||_{\infty} \leq 1}\ \sup_{|c_{w,r}|\leq 1}\ \bignorm{ \underset{0\leq w\leq D_r}{\mathbb{E}} \ c_{w,r}\ T^{\floor{ P_{1,r,v_r}(w) }} f_{r}\cdot ...\cdot T^{\floor{P_{k,r,v_r}(w) } }f_k }_{L^2(\mu)}^{2^t}\ll\\ \frac{1}{M}+ \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon},r}({\bf m})}+h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{r}) \ d\mu \Big|+ o_r(1), \end{multline} where \begin{equation}\label{A_e} A_{\underline{\varepsilon},r}({\bf m})=\sum_{1\leq j\leq k} \ p_{\underline{\varepsilon},j}({\bf m})u_{j,r}. \end{equation}The polynomials $A_{\underline{\varepsilon}}$ are essentially distinct for any value of the $u_{j,r}$ and satisfy \begin{equation*} A_{\underline{\varepsilon},r}({\bf m})+A_{\underline{\varepsilon}^c,r}({\bf m})=A_{\underline{1},r}({\bf m}). \end{equation*}In addition, for an $\underline{\varepsilon}\in [[s]]$, we have that the non-zero polynomials among the $p_{\underline{\varepsilon},j}$ are linearly independent. Applying the bounds of \eqref{afterpet} to \eqref{finalreduction}, we deduce that our original average is bounded by the quantity \begin{multline}\label{1940} \frac{1}{M}+ \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}}\ \underset{1\leq r\leq R}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{A_{\underline{\varepsilon},r}({\bf m})}+h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_{r}) \ d\mu \Big|+ o_R(1)= \\ \frac{1}{M}+ \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}}\ \underset{1\leq r\leq R}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} T^{\floor{A_{\underline{\varepsilon},r}({\bf m})}+ \floor{\theta_{i}(r)} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}h_{i,R}) \ d\mu \Big|+ o_R(1), \end{multline}where we set $\theta_0(r)\equiv 0$ and $h_{0,R}\equiv f_1$ for convenience in notation. We may assume without loss of generality that $0\equiv \theta_0(r)\ll \theta_1(r)\ll...\ll\theta_{\ell}(r)$. Then, we compose with $T^{-\floor{\theta_{\ell}(r)}}$ inside the above integral and combine the integer parts to obtain that the aforementioned integral is equal to \begin{equation*} \int \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} T^{\floor{A_{\underline{\varepsilon},r}({\bf m})}+ \floor{\theta_{i}(r)-\theta_{\ell}(r)}+h_{i,r} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}h_{i,R}) \ d\mu , \end{equation*}where $h_{i,r}\in \{0,\pm 1\}$. Putting this in \eqref{1940}, we want to bound \begin{multline*} \frac{1}{M}+ \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}}\ \underset{1\leq r\leq R}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} T^{\floor{A_{\underline{\varepsilon},r}({\bf m})}+ \floor{\theta_{i}(r)-\theta_{\ell}(r)} +h_{i,r} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}h_{i,R}) \ d\mu \Big|+ o_R(1). \end{multline*}Using the argument present in Lemma \ref{errors}, we deduce that the last quantity is smaller than a constant multiple of \begin{multline*}\frac{1}{M}+ \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}}\ \ \sup_{\norm{h_1}_{\infty},...,\norm{h_{\ell}}_{\infty} \leq 1 } \\\underset{1\leq r\leq R}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} T^{\floor{A_{\underline{\varepsilon},r}({\bf m})} + \floor{\theta_{i}(r)-\theta_{\ell}(r)} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}h_{i}) \ d\mu \Big|+ o_R(1). \end{multline*}We choose again sequences of functions in place of the $h_1,...,h_{\ell}$, so that the corresponding quantity is $1/R$ close to the supremum and we denote them again $h_{1,R},...,h_{\ell,R}$ for convenience. Note that this final quantity is essentially has the same form as the one in \eqref{1940}, but the function $\theta_0$ corresponding to $f_1$ now has maximal growth rate among the $\theta_i$. Therefore, our original problem reduces to finding a bound for \begin{multline}\label{1973} \frac{1}{M}+ \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}}\ \underset{1\leq r\leq R}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} T^{\floor{A_{\underline{\varepsilon},r}({\bf m})}+ \floor{\theta_{i}(r)} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}h_{i,R}) \ d\mu \Big|+ o_R(1) \end{multline}under the assumption that $\theta_0(t)\gg \theta_{i}(t)\succ\log t$ for every $1\leq i\leq l-1$, $\theta_{\ell}\equiv 0$ and $\theta_i(t)-\theta_j(t)\succ \log t$ for all $ i\neq j$. We write \begin{equation*} B_{{\bf m},{\bf h}}(r):= \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} T^{\floor{A_{\underline{\varepsilon},r}({\bf m})}+ \floor{\theta_{i}(r)} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}h_{i,R}) \ d\mu \Big|. \end{equation*}Taking the limit as $R\to+\infty$, our goal is to show that the quantity \begin{equation*} \frac{1}{M}+ \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \big( \limsup\limits_{R\to+\infty} \underset{1\leq r\leq R}{\mathbb{E}} B_{{\bf m},{\bf h}}(r) \big). \end{equation*}goes to 0, as $M$ goes to infinity. \subsection{Step 7: Another change of variables trick} Before we proceed with the final details of the proof, we will make a final trick to reduce our problem to a statement, where the results of Section \ref{sublinearsection} can be applied. We will use a lemma very similar to \cite[Lemma 5.1]{FraHardy1}, which can also be proven similarly by a standard partial summation argument. \begin{lemma}\label{floor} Let $(V_R(n))_{n,R\in\mathbb{N}}$ be a 1-bounded, two-parameter sequence of vectors in a normed space and let $a\in\mathcal{H}$ satisfy the growth condition $t^{\delta}\prec a(t)\prec t$. Then, we have \begin{equation*} \limsup\limits_{R\to +\infty} \bignorm{\underset{1\leq n\leq R}{\mathbb{E}} V_R(\floor{a(n)}{}) }\ll_a \limsup\limits_{R\to +\infty} \bignorm{ \underset{1\leq n\leq R}{\mathbb{E}} V_R(n) }. \end{equation*} \end{lemma} Our main objective is the following: since the sequences $u_{j,r}$ of the leading vector can have the form $c\floor{u(r)}^{k}$, which are tough to handle, we want to use the above lemma to replace these terms with the terms $cr^k$, which are just polynomials. In order to facilitate this, we need to write the entire integral $B_{{\bf m},{\bf h}}(r)$ as a function of $\floor{u(r)}$. Note that $u(r)$ satisfies the growth condition in the statement of Lemma \ref{floor}. We consider three cases:\\ i) If the sequence $u_{j,r}$ has the form $c\floor{u(r)}^q$, for $c\in\mathbb{R}$ and $q\in \mathbb{N}^{*}$, then it is already written as a function of $\floor{u(r)}$.\\ ii) If the sequence $u_{j,r}$ converges to a non-zero real number $a_j$, then, we have $u_{j,r}-a_j=o_r(1)$ and the constant function $a_j$ is already written as a function of $\floor{u(r)}$.\\ iii) Finally, assume the sequence $u_{j,r}$ satisfies the remaining possible condition, namely that there exists a function $\phi_j\in\mathcal{H}$ satisfying the growth condition \begin{equation*} t^{\delta}\prec \phi_j(t)\prec u(t) \end{equation*}for some $\delta>0$. Let us assume that $\phi_j(t)$ is eventually positive (in the other case, we work with the number $-u_{j,r}$). We write $\phi_j(t)=\Phi_j(u(t))$, where $\Phi_j= \phi_j\circ u^{-1}$, which is well defined and thus a function in $\mathcal{H}$ \footnote{Note that $u(t)$ is a positive function by its definition and therefore, goes to $+\infty$. Consequently, $u^{-1}$ also goes to $+\infty$. }. We also have that $\Phi_j(t)\prec t $ (this follows easily from the fact that $\phi_j(t)\prec u(t)$) and we can easily see that $\Phi_j(t)$ also dominates some fractional power. In addition, we have \begin{equation*} |\Phi_j(u(t))-\Phi_j(\floor{u(t)})|\leq \sup_{x\in\mathbb{R}, \floor{u(t)}\leq x\leq u(t)}|\Phi'_j(t)|=o_t(1), \end{equation*}since $\Phi'_j(t)\ll \Phi_j(t)/t\prec 1$. In all three cases above, we have the following: there exists a function $w_j\in\mathcal{H}$, such that \begin{equation}\label{wdefinition} |u_{j,r}-w_j(\floor{u(r)})|=o_r(1) \end{equation}and the function $w_j$ is either a polynomial, or a constant function or a sub-linear (but not a sub-fractional) function. We write \begin{equation}\label{A'} \tilde{A}_{\underline{\varepsilon},r }({\bf m})=\sum_{1\leq j\leq k} \ p_{\underline{\varepsilon},j}({\bf m})w_{j}(\floor{u(r)}) \end{equation}and observe that $|A_{\underline{\varepsilon},r}({\bf m})-\tilde{A}_{\underline{\varepsilon},r}({\bf m})|=o_r(1)$, for any fixed value of ${\bf m}$. Therefore, for $r$ large enough, we have \begin{equation}\label{asdf1} \floor{A_{\underline{\varepsilon},r}({\bf m})}=\floor{\tilde{A}_{\underline{\varepsilon},r}({\bf m})}+h'_{r,\underline{\varepsilon},{\bf m}}, \end{equation}where $h'_{r,\underline{\varepsilon},{\bf m}}\in\{0,\pm 1\}$. We do the same for the function $\theta_i$. Indeed, we can use the same arguments as above to deduce that $|\theta_i(t)-\psi_i(\floor{u(t)})|=o_t(1)$, where $\psi_i(t)\in\mathcal{H}$ is the function $\theta_i\circ u^{-1}$ In addition, since $u$ dominates some fractional power, we have that $u^{-1}$ has polynomial growth and, therefore, we easily get $t^{\varepsilon}\succ \psi_i(t)\succ \log t$ for all $\varepsilon>0$, that is $\psi_i$ is a (sub-fractional) function. Finally, for $r$ large enough, we can write \begin{equation}\label{asdf2} \floor{\theta_i(r)}=\floor{\psi_i(u(r))}+h''_{i,r}, \end{equation}where $h''_{i,r}\in \{0,\pm 1\}$. In view of the above, we have \begin{multline*} \ \ \ \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \ \underset{1\leq r\leq R}{\mathbb{E}}\ B_{{\bf m},{\bf h}}(r) = \\ \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \ \underset{1\leq r\leq R}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} T^{\floor{\tilde{A}_{\underline{\varepsilon},r}({\bf m})}+h'_{r,\underline{\varepsilon},{\bf m}}+ \floor{\psi_{i}(\floor{u(r)})}+h'' _{i,r} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}h_{i,R}) \ d\mu \Big| +o_R(1) \leq \\ \ \ \ \ \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \ \Big( \underset{1\leq r\leq R}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} T^{\floor{\tilde{A}_{\underline{\varepsilon},r}({\bf m})}+h'_{r,\underline{\varepsilon},{\bf m}}+ \floor{\psi_{i}(\floor{u(r)})}+h'' _{i,r} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}h_{i,R}) \ d\mu \Big|^2 \big)^{1/2}\\ +o_R(1), \end{multline*}where we applied the Cauchy-Schwarz inequality (the $o_R(1)$ term on the second line exists to account for small values of $r$ for which \eqref{asdf1},\eqref{asdf2} may not hold with error terms in the set $\{0,\pm 1\}$). Thus, we want to bound \begin{multline}\label{the average} \frac{1}{M}+ \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \\ \Big( \underset{1\leq r\leq R}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} T^{\floor{\tilde{A}_{\underline{\varepsilon},r}({\bf m})}+h'_{r,\underline{\varepsilon},{\bf m}}+ \floor{\psi_{i}(\floor{u(r)})}+h'' _{i,r} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}h_{i,R}) \ d\mu \Big|^2 \big)^{1/2} +o_R(1), \end{multline}where $h_{0,R}=f_1$. \begin{claim} Proposition \ref{factors} holds in the case when all the functions $w_j$ (defined in \eqref{wdefinition}) are constant and $\ell=0$. \end{claim} \begin{proof}[Proof of the claim] This means that the polynomials $\tilde{A}_{\underline{\varepsilon},r}({\bf m})$ are actually independent of $r$ and we write them as $\tilde{A}_{\underline{\varepsilon}}({\bf m})$. In addition, there are no functions $\psi_i$ in the iterates of the above quantity. Finally, the error terms $h''_{i,r}$ do not exist in this case. Our problem reduces to finding a bound for \begin{equation}\label{bnm} \frac{1}{M}+ \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \big( \underset{1\leq r\leq R}{\mathbb{E}}\ \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{\tilde{A}_{\underline{\varepsilon}}({\bf m})}+h'_{r,\underline{\varepsilon},{\bf m}} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_1) \ d\mu \Big|^2 \big)^{1/2}\\ +o_R(1), \end{equation}where $h'_{r,\underline{\varepsilon},{\bf m}}\in\{0,\pm 1\}$. Note that \begin{multline*} \underset{1\leq r\leq R}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{\tilde{A}_{\underline{\varepsilon}}({\bf m})}+h'_{r,\underline{\varepsilon},{\bf m}} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_1) \ d\mu \Big|^2\leq \\ \sum_{h'_{\underline{\varepsilon}}\in \{0,\pm 1\},\varepsilon\in [[s]] } \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{}T^{\floor{\tilde{A}_{\underline{\varepsilon}}({\bf m})}+h'_{\underline{\varepsilon}} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_1) \ d\mu \Big|^2, \end{multline*}which implies that the quantity in \eqref{bnm} is smaller than $O(1)$ times \begin{equation*} \frac{1}{M}+ \sum_{{\bf h}\in \tilde{Y}^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{\tilde{A}_{\underline{\varepsilon}}({\bf m})} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_1) \ d\mu \Big|\\ +o_R(1) \end{equation*}for some new, larger finite set $\tilde{Y}$. The statement follows if we prove that \begin{equation*} \lim\limits_{M\to+\infty}\underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{\tilde{A}_{\underline{\varepsilon}}({\bf m})} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_1) \ d\mu \Big| =0 \end{equation*}for any $h_{\underline{\varepsilon}}\in \mathbb{Z}$. Note that the polynomials $\tilde{A}_{\underline{\varepsilon}}({\bf m})$ are essentially distinct due to the statement of Proposition \ref{PET}. Squaring and applying the Cauchy-Schwarz inequality, we want to prove that \begin{equation*} \lim\limits_{M\to+\infty}\underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{} T^{\floor{\tilde{A}_{\underline{\varepsilon}}({\bf m})} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}f_1) \ d\mu \Big|^2 =0, \end{equation*}which can be rewritten as \begin{equation*} \lim\limits_{M\to+\infty}\underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \int \prod_{\underline{\varepsilon}\in [[s]]}^{} S^{\floor{\tilde{A}_{\underline{\varepsilon}}({\bf m})} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}F_1) \ d(\mu\times\mu) =0, \end{equation*}where $S=T\times T$ and $F_1=\overline{f_1}\otimes f_1$. This is an average where the iterates are real polynomials and using \cite[Lemma 4.3]{Frajointhardy}, we can prove that this last relation holds, provided that $\nnorm{ T^{h_{\underline{1} }}F_1 }_{\tilde{s},T\times T}=0$, for some positive integer $\tilde{s}$ that depends only on the polynomials $A_{\underline{\varepsilon}}$ (which depend on the original Hardy field functions $a_1,...,a_k$). However, since $\nnorm{F_1}_{\tilde{s},T\times T}\leq \nnorm{f_1}_{\tilde{s}+1,T}^2$, we get that the statement holds if the function $f_1$ satisfies $\nnorm{f_1}_{\tilde{s}+1,T}=0$. This completes the proof of our claim. \end{proof} From now on, we assume that either at least one of the functions $w_j$ is non-constant, or that $\ell\geq 1$ and we want to bound the quantity in \eqref{the average}. Writing $H_{i,R}=\overline{h_{i,R}}\otimes h_{i,R}$ and $S=T\times T$, we observe that \begin{align*} &\underset{1\leq r\leq R}{\mathbb{E}} \Big| \int \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} T^{\floor{\tilde{A}_{\underline{\varepsilon},r}({\bf m})}+h'_{r,\underline{\varepsilon},{\bf m}}+ \floor{\psi_{i}(\floor{u(r)})}+h'' _{i,r} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}h_{i,R}) \ d\mu \Big|^2 = \\ &\underset{1\leq r\leq R}{\mathbb{E}} \int \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} S^{\floor{\tilde{A}_{\underline{\varepsilon},r}({\bf m})}+h'_{r,\underline{\varepsilon},{\bf m}}+ \floor{\psi_{i}(\floor{u(r)})}+h'' _{i,r} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}H_{i,R}) \ d(\mu\times \mu)\leq \\ \bignorm{ &\underset{1\leq r\leq R}{\mathbb{E}} \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} S^{\floor{\tilde{A}_{\underline{\varepsilon},r}({\bf m})}+h'_{r,\underline{\varepsilon},{\bf m}}+ \floor{\psi_{i}(\floor{u(r)})}+h'' _{i,r} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}H_{i,R}) }_{L^2(\mu\times \mu)} \end{align*}due to the Cauchy-Schwarz inequality. Invoking\footnote{Note that all the error terms depending on $r$ in the iterates take values on finite sets.} Lemma \ref{errors}, we have \begin{multline*} \bignorm{ \underset{1\leq r\leq R}{\mathbb{E}} \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} S^{\floor{\tilde{A}_{\underline{\varepsilon},r}({\bf m})}+h'_{r,\underline{\varepsilon},{\bf m}}+ \floor{\psi_{i}(\floor{u(r)})}+h'' _{i,r} +h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}H_{i,R}) }_{L^2(\mu\times \mu)}\ll_{s,\ell} \\ \sup_{|c_{r,{\bf m},{\bf h}}|\leq 1}\sup_{{\norm{H_{i}}_{\infty}\leq 1} } \bignorm { \underset{1\leq r\leq R}{\mathbb{E}} c_{r,{\bf m},{\bf h}} \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} S^{\floor{\tilde{A}_{\underline{\varepsilon},r}({\bf m})}+\floor{\psi_{i}(\floor{u(r)})}+h_{\varepsilon}}(\mathcal{C}^{|\underline{\varepsilon}|}H_{i}) }_{L^2(\mu\times \mu)}, \end{multline*}where $ H_{0}=\overline{f_1}\otimes f_1$ and ${\bf h}=(h_{\underline{\varepsilon}},\underline{\varepsilon}\in[[s]])$. Note that since both $s,\ell$ depend on the original Hardy field functions $a_1,...,a_k$, the implicit constant in the last bound depends only on $a_1,...,a_k$ (which we omit from the subscripts). Putting everything together, we get that \begin{multline*} \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \ \underset{1\leq r\leq R}{\mathbb{E}}\ B_{{\bf m},{\bf h}}(r) \ll \sum_{{\bf h}\in Y^{[[s]]}}^{} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \sup_{|c_{r,{\bf m},{\bf h}}|\leq 1}\ \sup_{\underset{1\leq i\leq \ell}{\norm{H_{i}}_{\infty}\leq 1}}\\ \bignorm { \underset{1\leq r\leq R}{\mathbb{E}} c_{r,{\bf m},{\bf h}} \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} S^{\floor{\tilde{A}_{\underline{\varepsilon},r}({\bf m})}+\floor{\psi_{i}(\floor{u(r)})}+h_{\varepsilon}}(\mathcal{C}^{|\underline{\varepsilon}|}H_{i}) }_{L^2(\mu\times \mu)}^{1/2}. \end{multline*} Now, we choose functions $H_{1,R},...,H_{\ell,R}$ so that the above average (over $R$) is $1/R$ close to the supremum. Then, we take the limit as $R\to+\infty$ and apply Lemma \ref{floor} to deduce that the limsup of this last quantity is bounded by $O_{u}(1)$ times (which is, of course, $O_{a_1,...,a_k}(1)$)\begin{multline*} \sum_{{\bf h}\in Y^{[[s]]}}^{}\underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \limsup\limits_{R\to+\infty} \sup_{|c_{r,{\bf m},{\bf h}}|\leq 1}\\ \bignorm { \underset{1\leq r\leq R}{\mathbb{E}} c_{r,{\bf m},{\bf h}} \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} S^{\floor{\widehat{A}_{\underline{\varepsilon},r}({\bf m})}+\floor{\psi_{i}(r)}+h_{\underline{\varepsilon}}}(\mathcal{C}^{|\underline{\varepsilon}|}H_{i,R}) }_{L^2(\mu\times \mu)}^{1/2}, \end{multline*}where we define (recall \eqref{A'}) \begin{equation*} \widehat{A}_{\underline{\varepsilon},r}({\bf m}):= \sum_{1\leq j\leq k} \ p_{\underline{\varepsilon},j}({\bf m})w_{j}(r). \end{equation*}and $H_{0,R}=\overline{f_1}\otimes f_1$. Finally, we can combine the integer parts in the iterates of the above quantity (using again Lemma \ref{errors} to remove the error terms). In conclusion, our original average is bounded by $O(1)$ times\begin{multline}\label{Step7average} \frac{1}{M} + \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \Big( \limsup\limits_{R\to+\infty}\sup_{|c_{r,{\bf m}}|\leq 1} \bignorm { \underset{1\leq r\leq R}{\mathbb{E}} c_{r,{\bf m}} \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} S^{\floor{\widehat{A}_{\underline{\varepsilon},r}({\bf m})+\psi_{i}(r)}}(\mathcal{C}^{|\underline{\varepsilon}|}H_{i,R}) }_{L^2(\mu\times \mu)}^{1/2}\Big)\leq\\ \ \ \ \frac{1}{M}+\Big(\ \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \limsup\limits_{R\to+\infty}\ \sup_{|c_{r,{\bf m}}| \leq 1}\ \bignorm { \underset{1\leq r\leq R}{\mathbb{E}} c_{r,{\bf m}} \prod_{\underline{\varepsilon}\in [[s]]}^{}\prod_{0\leq i\leq \ell} S^{\floor{\widehat{A}_{\underline{\varepsilon},r}({\bf m})+\psi_{i}(r)} }(\mathcal{C}^{|\underline{\varepsilon}|}H_{i,R}) }_{L^2(\mu\times \mu)} \Big)^{1/2} \end{multline}by the Cauchy-Schwarz inequality. Note that all implied asymptotic constants above did not depend on either $M$ or $R$. \subsection{Finishing the proof} We describe the final step here. Our main observation is that $\widehat{A}_{\underline{\varepsilon},r}({\bf m})+\psi_i(r)$, when viewed as a function of $r$, is a sum of sub-linear functions that dominate the function $\log r$ and monomials (possibly of degree 0). Our goal is to use the bounds in Proposition \ref{sublinearseminorm} to deduce our result. However, it is not immediately obvious that in our case a linear combination of functions of the above form dominates the logarithmic function $\log r$ (the statement in general is false and a counterexample is given by the pair $(\log^2 t+\log t,\log^2 t)$). We shall establish that this is true for all ${\bf m}\in \mathbb{Z}^t$ outside a negligible set. We recall here that for every large enough $r$ (large enough for $w_j(r)$ to be non-zero), the $\widehat{A}_{\underline{\varepsilon},r}({\bf m})$ are pairwise essentially distinct polynomials in the variable ${\bf m}$ and in addition satisfy \begin{equation*} \widehat{A}_{\underline{\varepsilon},r}({\bf m})+\widehat{A}_{\underline{\varepsilon}^c,r}({\bf m})=\widehat{A}_{\underline{1},r}({\bf m}). \end{equation*} We will use the following lemma: \begin{lemma}\label{rootdensity} Let $p\in \mathbb{R}^t({\bf x})$ be a non-zero real polynomial of degree $d$. Then, the set of integer solutions of the equation \begin{equation*} p({\bf m})=0 \end{equation*}in $[-M,M]^t$ is $O_{d}( M^{t-1})$. \end{lemma} \begin{proof} For $t=1$ it is obvious, since the polynomial has at most $d$ roots. Assume we have proven the result for $t-1$. We can write $p({\bf m})$ in the form \begin{equation*} p(m_1,...,m_t)=a_{d'}(m_1,...,m_{t-1}) m_t^{d'}+\cdots+ a_1(m_1,...,m_{t-1} )m_t+a_0(m_1,....,m_{t-1} ) \end{equation*}for some $d'\leq d$. At least one of the polynomials $a_i(m_1,...,m_{t-1})$ with $1\leq i\leq d'$ is not identically zero and thus has at most $O_{d,t}(M^{t-2})$ zeroes in $[-M,M]^{t-1}$. If $(x_1,...,x_{t-1})$ is not one of these zeroes, then $p(x_1,...,x_{t-1}, m_t )$ is non-trivial as a polynomial in the variable $m_t$. Therefore, it is satisfied by no more than $d$ values of $m_t$. Summing over all tuples $(m_1,...,m_{t-1})\in [-M,M]^{t-1}$, we get the result. \end{proof} \begin{corollary}\label{linearcombhardy} Let $a_1\ll...\ll a_k$ be functions in $\mathcal{H}$ and let $p_1({\bf m}),...,p_k({\bf m})\in \mathbb{R}^t({\bf x})$ be non-zero linearly independent polynomials. Then, for all ${\bf m} \in \mathbb{Z}^t$ outside a set of density 0, we have that \begin{equation}\label{add} p_1({\bf m})a_1+\cdots+p_k({\bf m})a_k\sim a_k. \end{equation} \end{corollary} \begin{proof} Let $a_{k_0},...,a_k$ be the functions among the $a_i$ that have the same growth rate as $a_k$. Then, for $k_0\leq j\leq k$, we can write $a_j(t)=c_ja_k(t)+b_j(t)$, where $c_j\in \mathbb{R}^{*}$ and $b_j(t)\prec a_k(t)$. Then, the function in \eqref{add} has the same growth rate as the function \begin{equation*} \big(c_{k_0}p_{k_0}({\bf m})+\cdots+c_k p_k({\bf m})\big)a_k(t) \end{equation*}unless of course $c_{k_0}p_{k_0}({\bf m})+\cdots +c_k p_k({\bf m})=0$. However, the linear independence hypothesis implies that this polynomial is non-zero, and thus the set of of ${\bf m}\in \mathbb{Z}^t$ for which this last relation holds has density 0 in $\mathbb{Z}^t$ by Lemma \ref{rootdensity}. The conclusion follows. \end{proof} We use this corollary to prove the following: \begin{claim} For all ${\bf m}\in \mathbb{Z}^{t}$ outside a set $\Lambda$ of density 0, we have that the functions (in the variable $r$) \begin{equation*} \widehat{A}_{\underline{\varepsilon},r}({\bf m})+\psi_i(r)=\sum_{1\leq j\leq k} \ p_{\underline{\varepsilon},j}({\bf m})w_{j}(r)+\psi_i(r) \end{equation*}are a sum of a sub-linear function and a real polynomial. In addition, we have that they either dominate the function $\log r$, or they are a constant function. \end{claim} \begin{proof}[Proof of the claim] We split the $w_j$ into two sets: the set $S_1$ consists of those functions that are monomials, while $S_2$ contains the rest (namely the sub-linear functions). Reordering, if necessary, we may assume that $S_1=\{w_1,...,w_{k_0}\}$ while $S_2=\{w_{k_0+1},...,w_{k}\}$. We write \begin{equation}\label{splitting} \widehat{A}_{\underline{\varepsilon},r}({\bf m})=\sum_{j=1}^{k_0}p_{\underline{\varepsilon},j}({\bf m})w_j(r)+\sum_{j=k_0+1}^{k}p_{\underline{\varepsilon},j}({\bf m})w_j(r). \end{equation}For a fixed ${\bf m}\notin \Lambda(M)$, the first summand is a polynomial in the variable $r$, while the second is a sub-linear function of $r$. Since the sub-linear functions $w_j$ with $ k_0+1\leq j\leq k $ dominate some fractional power we can use Corollary \ref{linearcombhardy} to deduce that, either $\widehat{A}_{\underline{\varepsilon},r}(m)$ is either a constant function \footnote{ This is the case when $p_{\underline{\varepsilon},j}({\bf m})\equiv 0$ for $j\geq k_0+1$ and the first summand is a constant polynomial in $r$.}, or the sum of a polynomial and a sub-linear function that dominates some fractional power, for all ${\bf m}$ outside a set of density zero. In addition, if $\psi_i(t)\neq\psi_{\ell}(t)$ (recall that $\psi_{\ell}(t)\equiv 0$), we can use the same argument to show that \begin{equation*} \widehat{A}_{\underline{\varepsilon},r}({\bf m})+\psi_i(r) \end{equation*} is a sum of a sub-linear function that dominates $\log r$ and a polynomial (we use the fact that $\psi_i$ and $w_j$ (for any $j$) have distinct growth rates, since the $\psi_i$ is a sub-fractional function.) \end{proof} Let $\Lambda\subset \mathbb{Z}^{t}$ be the zero density set given by the above claim. Now, we isolate the iterate $S^{\floor{\widehat{A}_{\underline{1},r}({\bf m})+\psi_0(t) }}(\mathcal{C}^{|\underline{1}|}H_{0})$ in \eqref{Step7average} and we also assume that ${\bf m} \notin \Lambda$. The above proof implies that the Hardy field function involved in this iterate is a sum of a sub-linear function (that dominates the logarithm) and a polynomial. In order to apply the results of Section \ref{sublinearsection}, we have to show that the differences of this function with the rest of the functions in the iterates satisfies the same condition. That is, for every $(\underline{\varepsilon},i)\neq (\underline{1},0)$, we have to show that the function \begin{equation*} \big( \widehat{A}_{\underline{1},r}({\bf m})+\psi_0(r) \big)- \big( A_{\underline{\varepsilon},r}({\bf m})+\psi_i(r) \big) \end{equation*}is a sub-linear function plus a polynomial, or is bounded. Rewrite the above as \begin{equation*} \widehat{A}_{\underline{\varepsilon}^c,r}({\bf m})+(\psi_0(r)-\psi_i(r)). \end{equation*} If $i\neq 0$, then we use the fact that $\psi_0-\psi_i\succ \log t$ and the argument of the previous proof to establish that \begin{equation*} \big( \widehat{A}_{\underline{1},r}({\bf m})+\psi_0(r) \big)- \big( \widehat{A}_{\underline{\varepsilon},r}({\bf m})+\psi_i(r) \big)\succ \log r \end{equation*}for all ${\bf m}$ outside a zero density set (which we attach to the set $\Lambda$) and that this function is the sum of a polynomial and a sub-linear function. If $i=0$, then the above difference is equal to $\widehat{A}_{\underline{\varepsilon}^c,r}({\bf m})$ which is either the sum of a polynomial and a sub-linear function (that dominates $\log r$), or a constant function of $r$. We use this characterization to split $[[s]]$ into two subsets: $A_2$ contains those $\underline{\varepsilon}\in[[s]]$, for which $\widehat{A}_{\underline{\varepsilon}^c,r}({\bf m})$ satisfies the first condition, while the set $A_1$ contains the rest. Note that if $\underline{\varepsilon}\in A_1$, then the difference \begin{equation*} \big( \widehat{A}_{\underline{1},r}({\bf m})+\psi_0(r) \big)- \big( \widehat{A}_{\underline{\varepsilon},r}({\bf m})+\psi_0(r) \big) \end{equation*}is a (non-constant) polynomial in the variable ${\bf m}$ and we denote it by $c_{\underline{\varepsilon}}({\bf m})$. Thus, we can write \begin{equation*} \big( \widehat{A}_{\underline{\varepsilon},r}({\bf m})+\psi_0(r) \big)= \big( \widehat{A}_{\underline{1},r}({\bf m})+\psi_0(r) \big) -c_{\underline{\varepsilon}}({\bf m}). \end{equation*}Note that the polynomials $c_{\underline{\varepsilon}}({\bf m})$ are essentially distinct, since the $\widehat{A}_{\underline{\varepsilon},r}$ are essentially distinct. In view of the above, we rewrite the quantity in \eqref{Step7average} as \begin{multline}\label{kos} \frac{1}{M}+\Big(\ \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \limsup\limits_{R\to+\infty}\ \sup_{|c_{r,{\bf m}}| \leq 1}\ \bignorm { \underset{1\leq r\leq R}{\mathbb{E}} c_{r,{\bf m}} \prod_{\underline{\varepsilon}\in A_1}S^{\floor{\widehat{A}_{\underline{1},r}({\bf m})+\psi_0(r) -c_{\underline{\varepsilon}}({\bf m})} }(\mathcal{C}^{|\underline{\varepsilon}|} H_0)\\ \prod_{\underline{\varepsilon}\in A_2} S^{\floor{\widehat{A}_{\underline{\underline{\varepsilon}},r}({\bf m})+\psi_0(r)}}(\mathcal{C}^{|\underline{\varepsilon}|} H_0)\prod_{1\leq i\leq \ell} \prod_{\underline{\varepsilon}\in[[s]]} S^{\floor{\widehat{A}_{\underline{\varepsilon},r}({\bf m})+\psi_{i}(r)} }(\mathcal{C}^{|\underline{\varepsilon}|}H_{i,R}) }_{L^2(\mu\times \mu)} \Big)^{1/2}. \end{multline} Note that \begin{equation*} \floor{\widehat{A}_{\underline{1},r}({\bf m})+\psi_0(r) -c_{\underline{\varepsilon}}({\bf m})}=\floor{\widehat{A}_{\underline{1},r}({\bf m})+\psi_0(r)}+\floor{-c_{\underline{\varepsilon}}({\bf m})}+h_{\underline{\varepsilon},r,{\bf m}}, \end{equation*} where $h_{\underline{\varepsilon},r,{\bf m}}\in \{0,\pm 1\}$. Thus, we rewrite \eqref{kos} as \begin{multline} \frac{1}{M}+\Big(\ \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \limsup\limits_{R\to+\infty}\ \sup_{|c_{r,{\bf m}}| \leq 1}\ \bignorm { \underset{1\leq r\leq R}{\mathbb{E}} c_{r,{\bf m}}\ S^{\floor{\widehat{A}_{\underline{1},r}({\bf m})+\psi_0(r)} } (\prod_{\underline{\varepsilon}\in A_1} \mathcal{C}^{|\underline{\varepsilon}|} S^{\floor{-c_{\underline{\varepsilon}}({\bf m})} +h_{\underline{\varepsilon},r,{\bf m}}}H_0)\\ \prod_{\underline{\varepsilon}\in A_2} S^{\floor{\widehat{A}_{\underline{\underline{\varepsilon}},r}({\bf m})+\psi_0(r)}}(\mathcal{C}^{|\underline{\varepsilon}|} H_0)\prod_{1\leq i\leq \ell} \prod_{\underline{\varepsilon}\in[[s]]} S^{\floor{\widehat{A}_{\underline{\varepsilon},r}({\bf m})+\psi_{i}(r)} }(\mathcal{C}^{|\underline{\varepsilon}|}H_{i,R}) }_{L^2(\mu\times \mu)} \Big)^{1/2}. \end{multline}Since $h_{\underline{\varepsilon},r,{\bf m}}$ take values in $\{0,\pm 1\}$, we can use the argument in Lemma \ref{errors} to deduce that \begin{multline*} \bignorm { \underset{1\leq r\leq R}{\mathbb{E}} c_{r,{\bf m}}\ S^{\floor{\widehat{A}_{\underline{1},r}({\bf m})+\psi_0(r)} } (\prod_{\underline{\varepsilon}\in A_1} \mathcal{C}^{|\underline{\varepsilon}|} S^{\floor{-c_{\underline{\varepsilon}}({\bf m})} +h_{\underline{\varepsilon},r,{\bf m}}}H_0)\\ \prod_{\underline{\varepsilon}\in A_2} S^{\floor{\widehat{A}_{\underline{\underline{\varepsilon}},r}({\bf m})+\psi_0(r)}}(\mathcal{C}^{|\underline{\varepsilon}|} H_0)\prod_{1\leq i\leq \ell} \prod_{\underline{\varepsilon}\in[[s]]} S^{\floor{\widehat{A}_{\underline{\varepsilon},r}({\bf m})+\psi_{i}(r)} }(\mathcal{C}^{|\underline{\varepsilon}|}H_{i,R}) }_{L^2(\mu\times \mu)} \leq \\ \sum_{\underset{\underline{\varepsilon}\in A_1}{h_{\underline{\varepsilon},{\bf m}}\in\{0,\pm 1\}}}\ \sup_{|c'_{r,{\bf m}}|\leq 1}\ \bignorm { \underset{1\leq r\leq R}{\mathbb{E}} c'_{r,{\bf m}}\ S^{\floor{\widehat{A}_{\underline{1},r}({\bf m})+\psi_0(r)} } (\prod_{\underline{\varepsilon}\in A_1} \mathcal{C}^{|\underline{\varepsilon}|} S^{\floor{-c_{\underline{\varepsilon}}({\bf m})} +h_{\underline{\varepsilon},{\bf m}}}H_0)\\ \prod_{\underline{\varepsilon}\in A_2} S^{\floor{\widehat{A}_{\underline{\underline{\varepsilon}},r}({\bf m})+\psi_0(r)}}(\mathcal{C}^{|\underline{\varepsilon}|} H_0)\prod_{1\leq i\leq \ell} \prod_{\underline{\varepsilon}\in[[s]]} S^{\floor{\widehat{A}_{\underline{\varepsilon},r}({\bf m})+\psi_{i}(r)} }(\mathcal{C}^{|\underline{\varepsilon}|}H_{i,R}) }_{L^2(\mu\times \mu)}. \end{multline*} Thus, our problem reduces to showing that \begin{multline}\label{final} \frac{1}{M}+\Big(\ \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}} \limsup\limits_{R\to+\infty}\ \sup_{|c_{r,{\bf m}}| \leq 1}\ \bignorm { \underset{1\leq r\leq R}{\mathbb{E}} c_{r,{\bf m}}\ S^{\floor{\widehat{A}_{\underline{1},r}({\bf m})+\psi_0(r)} } (\prod_{\underline{\varepsilon}\in A_1} \mathcal{C}^{|\underline{\varepsilon}|} S^{\floor{-c_{\underline{\varepsilon}}({\bf m})} +h_{\underline{\varepsilon},{\bf m}}}H_0)\\ \prod_{\underline{\varepsilon}\in A_2} S^{\floor{\widehat{A}_{\underline{\underline{\varepsilon}},r}({\bf m})+\psi_0(r)}}(\mathcal{C}^{|\underline{\varepsilon}|} H_0)\prod_{1\leq i\leq \ell} \prod_{\underline{\varepsilon}\in[[s]]} S^{\floor{\widehat{A}_{\underline{\varepsilon},r}({\bf m})+\psi_{i}(r)} }(\mathcal{C}^{|\underline{\varepsilon}|}H_{i,R}) }_{L^2(\mu\times \mu)} \Big)^{1/2} \end{multline}goes to $0$ as $M\to+\infty$ (that is, our error terms in the iterates do not depend on $r$ now). In order to be able to apply Proposition \ref{sublinearseminorm}, we need to check that the degree, type and size (as defined in the beginning of Section \ref{sublinearsection}) of the given collection of functions in the iterates is constant, as ${\bf m}$ ranges over $\mathbb{Z}^t$ (so that we can use bounds that are uniform in the variable ${\bf m}$). Recall \eqref{splitting}: the "polynomial component" of $\widehat{A}_{\underline{\varepsilon},r}({\bf m}) +\psi_i(r)$ is \begin{equation*} \sum_{j=1}^{k_0}p_{\underline{\varepsilon},j}({\bf m})w_j(r), \end{equation*}where the functions $w_j(r)$ are polynomials. The conclusion follows easily: indeed, for any two real polynomials $p_1({\bf m})$ and $p_2({\bf m})$ we must have that they are either equal for all ${\bf m}$, or the set of integer solutions of $p_{1}({\bf m})=p_{2}({\bf m})$ has density zero. Comparing coefficients, it is straightforward to see that outside a set $\Lambda'$ of density zero, the degree, type and size of the collection of functions in the iterates in \eqref{final} is independent of ${\bf m}$ for any ${\bf m} \notin \Lambda$ (and they all depend only on the initial Hardy field functions $a_1,...,a_k$). In addition, the elements of the leading vector of this collection are polynomials in ${\bf m}$ (we are not concerned with their actual form here). Therefore, we write the leading vector as $(u_1({\bf m}),...,u_{s_0}({\bf m}))$, where $s_0\leq s$ is the size of the given collection of functions, which does not depend on ${\bf m}$ outside our "negligible" set. Furthermore, for ${\bf m}$ outside a set of density zero (which we attach to the set $\Lambda'$), we have that all the numbers $u_1({\bf m}),...,u_{s_0}({\bf m})$ are non-zero, and thus we can now apply Proposition \ref{sublinearseminorm} for all ${\bf m}$ outside a negligible subset of $\mathbb{Z}^t$. Write ${\bf h}_{{\bf m}}:=(h_{\underline{\varepsilon},{\bf m}},\underline{\varepsilon}\in A_1)$ and $$F_{{\bf m},{\bf h}_{{\bf m}}}:=\prod_{\underline{\varepsilon}\in A_1} \mathcal{C}^{|\underline{\varepsilon}|} S^{\floor{-c_{\underline{\varepsilon}}({\bf m})} +h_{\underline{\varepsilon},{\bf m}}}H_0.$$ Now, for any ${\bf m}\notin \Lambda\cup \Lambda'$ we apply Proposition \ref{sublinearseminorm} (note we can have at most $2^s(\ell+1)$ different Hardy field functions in the iterates) to deduce that there exist positive integers $t',s'$, a finite set $\widetilde{Y}$ and polynomials $p'_{\underline{\varepsilon},j}$, where $\underline{\varepsilon}\in [[s']]$ and $1\leq j\leq s_0$ (which depend only on the original functions $a_1,...,a_k$), such that \begin{multline*} \limsup\limits_{R\to+\infty}\ \sup_{|c_{r,{\bf m}}| \leq 1}\ \bignorm { \underset{1\leq r\leq R}{\mathbb{E}} c_{r,{\bf m}} \ S^{\floor{\widehat{A}_{\underline{1},r}({\bf m})+\psi_0(r)} } (\prod_{\underline{\varepsilon}\in A_1} \mathcal{C}^{|\underline{\varepsilon}|} S^{\floor{-c_{\underline{\varepsilon}}({\bf m})} +h_{\underline{\varepsilon},{\bf m}}}H_0)\\ \prod_{\underline{\varepsilon}\in A_2} S^{\floor{\widehat{A}_{\underline{\underline{\varepsilon}},r}({\bf m})+\psi_0(r)}}(\mathcal{C}^{|\underline{\varepsilon}|} H_0)\prod_{1\leq i\leq \ell} \prod_{\underline{\varepsilon}\in[[s]]} S^{\floor{\widehat{A}_{\underline{\varepsilon},r}({\bf m})+\psi_{i}(r)} }(\mathcal{C}^{|\underline{\varepsilon}|}H_{i,R}) }_{L^2(\mu\times \mu)}^{2^{t'}} \ll_{a_1,...,a_k} \\ \frac{1}{M}+ \sum_{{\bf h}\in \widetilde{Y}^{[[s']]}}^{}\underset{{\bf m}'\in [-M,M]^{t'}}{\mathbb{E}} \nnorm{\prod_{\underline{\varepsilon}'\in [[s']]} S^{\floor{A_{\underline{\varepsilon}'}({\bf m}',{\bf m})} +h_{\underline{\varepsilon}'}}F_{{\bf m},{\bf h}_{{\bf m}}}}_{2^{s+1}(\ell+1),S}. \end{multline*}Here, we have defined \begin{equation*} A_{\underline{\varepsilon}'}({\bf m}',{\bf m})=\sum_{j=1}^{s_0}p'_{\underline{\varepsilon},j}({\bf m}')u_j({\bf m}). \end{equation*} Therefore, since the set $\Lambda\cup \Lambda'$ has density zero, we use the H\"{o}lder inequality to get that the quantity in \eqref{final} is $\ll_{a_1,...,a_k}$ \begin{equation*} \underset{{\bf m}\in [-M,M]^t}{\mathbb{E}}\big(\sum_{{\bf h}\in \tilde{Y}^{[[s']]}}^{}\underset{{\bf m}'\in [-M,M]^{t'}}{\mathbb{E}} \nnorm{\prod_{\underline{\varepsilon}'\in [[s']]} S^{\floor{A_{\underline{\varepsilon}'}({\bf m}',{\bf m})} +h_{\underline{\varepsilon}'}}F_{{\bf m},{\bf h}_{{\bf m}}}}_{2^{s+1}(\ell+1),S}^{1/2^{t'}} \big)^{1/2} +o_M(1). \end{equation*} Now, we take the limit as $M\to+\infty$ and use the power mean inequality to bound the $\limsup$ of the above quantity by $O_{a_1,...,a_k}(1)$ times a power of \begin{equation*} \limsup\limits_{M\to+\infty} \sum_{{\bf h}\in \tilde{Y}^{[[s']]}}^{}\underset{{\bf m}'\in [-M,M]^{t'}}{\mathbb{E}}\ \underset{{\bf m}\in [-M,M]^{t}}{\mathbb{E}} \nnorm{\prod_{\underline{\varepsilon}'\in [[s']]} S^{\floor{A_{\underline{\varepsilon}'}({\bf m}',{\bf m})} +h_{\underline{\varepsilon}'}}F_{{\bf m},{\bf h}_{{\bf m}}}}_{2^{s+1}(\ell+1),S}. \end{equation*}Our result will follow if we show that for any integers $h_{\underline{\varepsilon}'}$ we have \begin{equation*} \limsup\limits_{M\to+\infty} \underset{{\bf m}'\in [-M,M]^{t'}}{\mathbb{E}}\ \underset{{\bf m}\in [-M,M]^{t}}{\mathbb{E}} \nnorm{\prod_{\underline{\varepsilon}'\in [[s']]} S^{\floor{A_{\underline{\varepsilon}'}({\bf m}',{\bf m})} +h_{\underline{\varepsilon}'}}F_{{\bf m},{\bf h}_{{\bf m}}}}_{2^{s+1}(\ell+1),S}=0. \end{equation*}We substitute $F_{{\bf m},{\bf h}_{{\bf m}}}$ to rewrite this limit as \begin{multline}\label{ult} \limsup\limits_{M\to+\infty} \underset{{\bf m}'\in [-M,M]^{t'}}{\mathbb{E}}\ \underset{{\bf m}\in [-M,M]^{t}}{\mathbb{E}} \nnorm{\prod_{\underline{\varepsilon}'\in [[s']]} S^{\floor{A_{\underline{\varepsilon}'}({\bf m}',{\bf m})} +h_{\underline{\varepsilon}'}} \big( \prod_{\underline{\varepsilon}\in A_1} \mathcal{C}^{|\underline{\varepsilon}|} T^{\floor{-c_{\underline{\varepsilon}}({\bf m})} +h_{\underline{\varepsilon},{\bf m}}}H_0\big)}_{2^{s+1}(\ell+1),S}=\\ \limsup\limits_{M\to+\infty} \underset{{\bf m}'\in [-M,M]^{t'}}{\mathbb{E}}\ \underset{{\bf m}\in [-M,M]^{t}}{\mathbb{E}} \nnorm{\prod_{\underline{\varepsilon}'\in [[s']]}\prod_{\underline{\varepsilon}\in A_1} S^{\floor{A_{\underline{\varepsilon}'}({\bf m}',{\bf m})} +h_{\underline{\varepsilon}'}+\floor{-c_{\underline{\varepsilon}}({\bf m})} +h_{\underline{\varepsilon},{\bf m}}} \big( \mathcal{C}^{|\underline{\varepsilon}|} H_0\big)}_{2^{s+1}(\ell+1),S}. \end{multline} For a fixed ${\bf m}$ outside all the negligible sets defined above, the polynomials $A_{\underline{\varepsilon}'}({\bf m}',{\bf m})$ are pairwise essentially distinct, as polynomials in ${\bf m}'$. Therefore, they are also essentially distinct as polynomials in $({\bf m}',{\bf m})$. In addition, we have also established that the polynomials $c_{\underline{\varepsilon}}({\bf m})$ are non-constant and essentially distinct. Therefore, it is easy to check that the polynomials $A_{\underline{\varepsilon}'}({\bf m}',{\bf m})-c_{\underline{\varepsilon}}({\bf m})$ are pairwise essentially distinct. We combine the integer parts in the iterates in \eqref{ult} (correcting with some error terms with values in $\{0,\pm 1\}$). Expanding the seminorm in \eqref{ult}, we arrive at an iterated limit of polynomial averages. We also use Lemma \ref{errors} to remove the error terms in the iterates. Using\footnote{This lemma was proven for a specific F{\o}lner sequence (namely $[N]^k$), but the same argument extends to the general case. See also \cite{Leibmanseveral} for a more detailed proof in the case of integer polynomials.} \cite[Lemma 4.3]{Frajointhardy}, we deduce that the limit in \eqref{ult} is zero under the assumption that $\nnorm{H_0}_{q,T\times T}=0$ for some positive integer $q$. Since \begin{equation*} \nnorm{H_0}_{q,T\times T}=\nnorm{\overline{f_1}\otimes f_1}_{q,T\times T}\leq \nnorm{f_1}_{q+1, T}^2, \end{equation*}we deduce that the desired limit is zero if we assume that $\nnorm{f_1}_{q+1,T}=0$. The result follows. \begin{appendix} \section{Some properties of Hardy sequences}\label{Hardy} \subsection{Growth rates of Hardy functions} We assume that we are working with a Hardy field $\mathcal{H}$ that satisfies the properties mentioned in Section \ref{background}. Such a field contains the Hardy field $\mathcal{LE}$ of logarithmico-exponential functions and, for any two functions $f,g$ that belong to $\mathcal{H}$, we have that the limit \begin{equation*} \lim\limits_{t\to\infty} \frac{f(t)}{g(t)} \end{equation*}exists. We also have the assumptions of closure under composition and compositional inversion that we made in Section \ref{background}. We will use these properties freely. \begin{proposition}\label{prop:basic} Let $f\in\mathcal{H}$ have polynomial growth. Then, for any natural number $k$, we have \begin{equation*} f^{(k)}(t) \ll \frac{f(t)}{t^k}. \end{equation*} In addition, if $ t^{\delta}\prec f(t)$ or $f(t)\prec t^{-\delta}$ for some $\delta>0$, we have \begin{equation*} f'(t) \sim \frac{f(t)}{t}. \end{equation*} \end{proposition} \begin{proof} We will show that the limit \begin{equation*} \lim\limits_{t\to\infty} \frac{tf'(t)}{f(t)} \end{equation*}is finite. Using L'Hospital's rule, the above limit is equal to the limit \begin{equation}\label{log} \lim\limits_{t\to\infty} \frac{\log |f(t)|}{\log t}. \end{equation}Since $f$ has polynomial growth, the above limit is bounded. In particular, this implies that \begin{equation*} f'(t)\ll \frac{f(t)}{t}. \end{equation*} The first part now follows by repeated application of this relation. For the second part, we can easily see that the given condition implies that the limit in \eqref{log} is positive in the first case and negative in the second case. Therefore, the limit is non-zero and the claim follows. \end{proof} The above proposition implies that, for any $f\in\mathcal{H}$ of polynomial growth, all derivatives of sufficiently large order of $f$ will converge monotonically to $0$. In addition, we get that for every $k$ sufficiently large, we must have \begin{equation*} f^{(k+1)}(t)\sim \frac{f^{(k)}(t)}{t}. \end{equation*}Indeed, assume that $f(t)\prec t^s$, for some non-integer $s$. Then, we must have $f^{(k)}(t)\prec t^{s-k}$. Thus, if $k$ is large enough, then $f^{(k)}\prec t^{-\delta}$ for some $\delta>0$, which yields our claim. \begin{proposition}\label{growth} Let $f\in\mathcal{H}$ be strongly non-polynomial with $f(t)\succ \log t$. Then, for $k$ sufficiently large, we have \footnote{All the functions defined here belong to $\mathcal{H}$ due to the assumptions we have made on our Hardy field, namely, that it is closed under composition of certain functions.} \begin{equation*} 1\prec |f^{(k)}(t)|^{-1/k}\prec |f^{(k+1)}(t)|^{-1/(k+1)}\prec t. \end{equation*} \end{proposition} \begin{remark*}The above proposition can be proven under the slightly more general condition that $|f(t)-p(t)|\succ \log t$ for all real polynomials $p$ (cf. \cite[Lemma 3.5]{Fraeq}), but we will not need this for the proofs of our main results. We give the proof here for completeness. \end{remark*} \begin{proof} The function $f$ has non-vanishing derivatives of all orders, since it is not a polynomial. Let $d$ be an integer, such that $t^{d}\prec f(t)\prec t^{d+1}$. Then, Proposition \ref{prop:basic} implies that $|f^{d+1}(t)|\to 0$. Therefore, for any $k\geq d+1$, we have $ f^{(k)}(t)\prec 1$. This, of course, gives the leftmost part of the required inequality. In particular, $(d+1)$ is minimal among the integers $k$, for which $f^{(k)}(t)$ converges to 0. To prove the rightmost inequality of the proposition, it is sufficient to prove that \begin{equation*} f^{(d+1)}(t)\succ t^{-d-1}. \end{equation*}For $k\geq d+1$, the result then follows by successive applications of L' Hospital's rule. In the case $d=0$, the above relation follows easily from L'Hospital's rule. Therefore, we may assume that $d\geq 1$. Now, since $f$ is strongly non-polynomial, we have that the function $f^{(d)}(t)$ goes to infinity. We will show that \begin{equation}\label{agrowth} a'(t)\gg \frac{a(t)}{t\log^2 t} \end{equation}where $a$ is any one of the functions $f,f',...,f^{(d)}$ (cf. \cite[Lemma 2.1]{Fraeq}). The result then follows by noting that \begin{equation*} f^{(d+1)(t)}\gg \frac{f(t)}{t^{d+1}(\log t)^{2d+2}}\gg \frac{1}{t(\log t)^{2d+2}}\succ \frac{1}{t^{d+1}}. \end{equation*}Equation \eqref{agrowth} follows by showing that the limit \begin{equation*} \lim\limits_{t\to \infty} \frac{a'(t)t(\log t)^2}{a(t)} \end{equation*}is infinite. If that is not the case, then we must have \begin{equation*} (\log|a(t)|)'\ll \frac{1}{t(\log t)^2}. \end{equation*}Integrating, we get \begin{equation*} \log |a(t)|\ll \frac{1}{\log t}+c \end{equation*}for some real number $c\in \mathbb{R}$. Thus, the function $\log|a(t)|$ is bounded. However, note that for any choice of the function $a$, we have $|a(t)|\to +\infty$, since the original function $f$ dominates the function $t^d$. This gives a contradiction. It remains to establish the middle part, namely that if $k\geq d+1$, then \begin{equation*} |f^{(k+1)}(t)|^{k}\prec |f^{(k)}(t)|^{k+1}. \end{equation*}However, we have $ |f^{(k+1)}(t)|^{k}\ll |f^{(k)}(t)|^k/t^k$ by Proposition \ref{prop:basic} and we easily get the conclusion by combining this relation with the relation $t^{-k}\prec f^{(k)}(t)$ that we established in the previous step. \end{proof} We give here a description of the polynomial approximations that we use in our arguments. Consider a strongly non-polynomial function $f\in\mathcal{H}$ that satisfies $f(t)\succ \log t$. Then, if $k$ is large enough, we can find a function $L(t)\in \mathcal{H}$ such that \begin{equation}\label{L(t)} |f^{(k)}(t)|^{-1/k}\prec L(t)\prec |f^{(k+1)}(t)|^{-1/(k+1)}. \end{equation} Such a function always exists (one can take the geometric mean of the functions $|f^{(k)}(t)|^{-1/k}$ and $|f^{(k+1)}(t)|^{-1/(k+1)}$). We study the function $f$ in small intervals of the form $[N,N+L(N)]$. Observe that if $0\leq h\leq L(N)$, then we have \begin{equation*} f(N+h)=f(N)+\cdots+\frac{h^kf^{(k)}(N)}{k!} +\frac{h^{k+1}f^{(k+1)}(\xi_{h,N})}{(k+1)!} \end{equation*}for some $\xi_{h,N} \in [N,N+h]$. We know that $|f^{(k+1)}(t)|\to 0$ monotonically for $t$ large enough. Then, we observe that (for $N$ sufficiently large) \begin{equation*} \Big|\frac{h^{k+1}f^{(k+1)}(\xi_{h,N})}{(k+1)!}\Big|\leq\Big| \frac{L(N)^{k+1}f^{(k+1)}(N)}{(k+1)!}\Big|\prec 1, \end{equation*}because $L(t)\Big|f^{(k+1)}(t)\Big|^{\frac{1}{k+1}}\prec 1$, by our initial choice of $L(t)$. Using an entirely similar argument, we can prove that \begin{equation*} \Big|\frac{L(N)^{k}a^{(k)}(N+L(N))}{k!}\Big|\to +\infty. \end{equation*}Indeed, since $L$ is a sublinear function, we can easily check that the functions $a^{(k)}(t+L(t))$ and $a^{(k)}(t)$ have the same growth rate and thus we only need to prove that \begin{equation}\label{epr} \Big|\frac{L(N)^{k}a^{(k)}(N)}{k!}\Big|\to +\infty \end{equation}and this follows similarly as above. In conclusion, functions that satisfy \eqref{L(t)} have the following characteristic property: the sequence $f(n)$, when restricted to intervals of the form $[N,N+L(N)]$, is asymptotically equal to a polynomial sequence (that depends on $N$) of degree exactly $k$. This motivates us to study the properties of functions $L(t)$ that satisfy \eqref{L(t)}. \subsection{The sub-classes $S(f,k)$}\label{aproximationlemmas} In the proofs of the main theorems, we need to do the above approximation for several Hardy field functions in tandem. In order to achieve this, we will use the results of this subsection. Let $f\in\mathcal{H}$ be a strongly non-polynomial Hardy function such that $f(t)\gg t^{\delta}$, for some $\delta>0$. For example, we exclude functions that grow like $(\log t)^c$, where $c>1$. For such a function $f$ and $k\in \mathbb{N}$ sufficiently large (it is only required that $f^{(k)}(t)\to 0$), we define the subclass $S(f,k)$ of $\mathcal{H}$ as \begin{equation*} S(f,k)=\{g\in \mathcal{H}\ {:}\; |f^{(k)}(t)|^{-\frac{1}{k}}\preceq g(t)\prec |f^{(k+1)}(t)|^{-\frac{1}{k+1}} \}, \end{equation*} where the notation $g(t)\preceq f(t)$ means that the limit $\lim\limits_{t\to\infty} |f(t)/g(t)|$ is non-zero. Note that every $g\in S(f,k)$ is a sub-linear function, that is $g(t)\prec t$. Some very basic properties of the classes $S(f,k)$ are established in the following lemma. \begin{lemma}\label{basic}Let $f\in\mathcal{H}$ be a strongly non-polynomial function with $f(t)\gg t^{\delta}$, for some $\delta>0$.\\ i) The class $S(f,k)$ is non-empty, for $k$ sufficiently large. \\ ii) For any $0< c< 1$ sufficiently close to 1, there exists $k_0\in \mathbb{N}$, such that the function $t\to t^c$ of $\mathcal{H}$ belongs to $S(f,k_0)$. \\ iii) The class $S(f,k)$ does not contain all functions of the form $t\to t^c$, for $c$ sufficiently close to 1. \end{lemma} \begin{proof} i) This follows immediately from Proposition \ref{growth}. We can actually show something stronger, namely, that if $f(t)\gg t^{\delta}$ for some $0<\delta<1$, then \begin{equation}\label{20000} \frac{|f^{(k+1)}(t)|^{-\frac{1}{k+1}}}{|f^{(k)}(t)|^{-\frac{1}{k}}}\gg t^{\frac{\delta}{k(k+1)}}, \end{equation}which means that the functions at the "endpoints" of $S(f,k)$ differ by a fractional power. This last inequality follows by combining the relations \begin{equation*} f^{(k)}(t)\gg tf^{(k+1)}(t)\ \ \ \ \text{ and }\ \ \ \ f^{(k)}(t)\gg t^{\delta -k}. \end{equation*} \\ ii) It is sufficient to show that for large $k\in \mathbb{N}$, we have $t^c\ll |f^{(k)}(t)|^{-\frac{1}{k}} $. Fix a non-integer $q$, such that $f(t)\ll t^q$. Then, for any $k\in\mathbb{N}$, we have $f^{(k)}(t)\ll t^{q-k}$. It suffices to show that for large enough $k$ we have \begin{equation*} t^{q-k}\ll t^{-ck}\implies t^{k-q}\gg t^{ck}. \end{equation*}This is obvious, since $c<1$.\\ iii) Similar to ii). \end{proof}\normalfont In essence, the claim implies that the classes $S(f,k)$ form a "partition" of the subclass \begin{equation*} A= \{ g(t)\gg t^c{:}\; \ \exists \ \delta>0,\ \text{ with } g(t)\ll t^{1-\delta } \} \end{equation*} for some $c>0$. That means that any sub-linear function that grows approximately as a (sufficiently large) fractional power must be contained in the union of the $S(f,k)$. This union however does not contain functions that are "logarithmically close" to linear functions, such as $t(\log t)^{-1}$. Although inaccurate, it is instructive to imagine the classes $S(f,k)$ as (disjoint) intervals on the real line. For example, if $S(f,k)=\{g(t){:}\; \sqrt{t}\preceq g(t)\prec t^{2/3}\}$, then we can think that $S(f,k)$ is represented by the interval $[\frac{1}{2},\frac{2}{3})$. The following proposition relates the behavior of the subclasses $S(f,k)$ and $S(g,\ell)$ for different functions $f,g\in\mathcal{H}$. \begin{proposition}\label{two classes} For any two functions $f,g\in\mathcal{H}$ as in Lemma \ref{basic} that also satisfy $g(t)\ll f(t)$, we have the following: i) The relation $S(f,k)=S(g,k)$ holds for some $k\in \mathbb{N}$ if and only if $f\sim g$. ii) If $S(f,k)\cap S(g,\ell)\neq \emptyset$, then $ k\geq \ell$. In addition, if the function $|f^{(k)}(t)|^{-\frac{1}{k}}$ is contained in $S(g,\ell)$ and $f\not\sim g$, then $k\geq \ell +1$. iii) There exist infinitely many pairs of integers $(k,\ell)$, such that $S(f,k)\cap S(g,\ell)\neq \emptyset$. \end{proposition} \begin{proof} i) It is a straightforward application of L' Hospital's rule.\\ ii) Due to our assumption, we must necessarily have $|g^{(\ell)}(t)|^{-\frac{1}{\ell}}\preceq |f^{(k+1)}(t)|^{-\frac{1}{k+1}}$ . If $k<\ell$, then $|g^{(\ell)}(t)|^{-\frac{1}{\ell}}\preceq |f^{(k+1)}(t)|^{-\frac{1}{k+1}}\preceq |f^{(l)}(t)|^{-\frac{1}{\ell}}$, which implies that $f^{(l)}(t)\sim g^{(l)}(t)$. Then, we can easily deduce that $f(t)\sim g(t)$ using the fact that both of these functions are strongly non-polynomial. This implies that $k=l$, which is a contradiction. For the second part, we assume that $k=l$ and we shall arrive at a contradiction. We have two cases: \begin{itemize} \item If the entire class $S(f,k)$ is a subclass of $S(g,l)$, then we easily arrive at a contradiction, since $|f^{(k+1)}(t)|^{-\frac{1}{k+1}} \prec |g^{(k+1)}(t)|^{-\frac{1}{k+1}}$. \item There exists a function $F$, such that $F\in S(f,k)$ and $F\notin S(g,\ell)$. Then, there exists $\ell'>\ell$ with $F\in S(g,\ell')$. Since $S(f,k)\cap S(g,\ell')\neq \emptyset$, we have $\ell<\ell'\leq k$, which is a contradiction. \end{itemize} iii) For any $c$ close to 1, we can find $k$, such that the function $t^c$ belongs to $S(f,k)$ (this follows from the second statement of Lemma \ref{basic}) and similarly for the Hardy function $g$. Then, the intersection $S(f,k)\cap S(g,\ell)$ is non-empty. Taking $c\to 1^{-}$ and using the third statement of Lemma \ref{basic}, we can find infinitely many such pairs. \end{proof} \begin{remark*} It is straightforward to generalize the third statement of the above proposition to the case of $k$ distinct functions $f_1,...,f_k$ in $\mathcal{H}$. We will use this observation in our arguments to find a function $L$ in the intersection of these classes. Note that our previous discussion implies that for such a function $L$, all the involved functions $f_1,...,f_k$ will have a polynomial expansion on intervals of the form $[N,N+L(N)]$ and this will play a crucial role in our approximations. \end{remark*} \subsection{The subclasses $S_{sml}(f,k)$} We can similarly define analogs of the classes $S(f,k)$ for functions with small growth rate, that is sub-fractional functions. Let $f\in\mathcal{H}$ be a sub-fractional function such that $\log t\prec f(t)$. If $k\geq 1$, we can define the class \begin{equation*} S_{sml}(f,k)=\{g\in \mathcal{H}{:}\; \ |f^{(k)}(t)|^{-\frac{1}{k}}\preceq g(t)\prec |f^{(k+1)}(t)|^{-\frac{1}{k+1}} \}. \end{equation*} The properties of Proposition \ref{two classes} proven for the classes $S(f,k)$ are carried verbatim to this new setting. The major difference is that now every function $g\in S_{sml}(f,k)$ dominates all functions of the form $t^{1-\delta}$ for $\delta>0$ (an example is the function $t/\log t$). In particular, $S_{sml}(f,k)$ has trivial intersection with the classes $S(h,\ell)$ defined above for any integers $k,\ell$ and appropriate functions $f,h$. As an example, let us consider a fractional power $t^{\delta}$ with $0<\delta<1$ and two functions $f,g\in\mathcal{H}$ such that $f(t)\gg t^{\varepsilon}$ for some $\varepsilon>0$, while $\log t\prec g(t)$ and $g$ is sub-fractional. A typical case is the pair $(t^{3/2},\log^2 t)$. We know that if $\delta$ is close enough to 1, then the function $t^{\delta}$ will belong to $S(f,k)$ for some $k\in\mathbb{N}$. Using approximations similar to the ones in the previous subsection, we can see that the sequence $f(n)$ becomes a polynomial sequence of degree $k$ on intervals of the form $[N,N+N^{\delta}]$. On the other hand, the sequence $g(n)$, restricted to the same interval, is $o_N(1)$ close to the value $g(N)$, which means that it is "essentially" constant on this interval. This difference in behavior leads to some added complexity in our proofs, since some of our functions may be approximated by polynomials, while other functions become constant. On the other hand, a function $f\in\mathcal{H}$ with $f(t)\ll \log t$, when restricted to intervals of the form $[N,N+L(N)]$, is $o_N(1)$-close to the value $f(N)$ for any sub-linear function $L(t)$. Functions of this form always collapse to a constant when restricted to intervals of the above form. \end{appendix}
1,108,101,565,843
arxiv
\section{Introduction} X-ray dim isolated neutron stars (XDINs) form an isolated neutron star population among other young neutron star systems, namely anomalous X-ray pulsars (AXPs), soft gamma ray repeaters (SGRs), rotating radio transients (RRATs), high-B radio pulsars (HBRPs), and central compact objects (CCOs). At present, there are seven known XDINs characterized by their thermal X-ray emission with blackbody temperatures ranging from 40 to 110 eV and low X-ray luminosities in the range of $10^{31}-10^{32}$~erg~s$^{-1}$ \citep{Haberl2007, Turolla2009, Kaplan2011}. Rotational periods of XDINs are in the $3$~--~$17$~s range \citep{Haberletall2004, HaberlF2004, Tiengo2007, Kaplan2009a, Kaplan2009b, Hambaryan2017} similar to those of AXPs and SGRs. Their characteristic ages $\tau_c=P/{2\dot{P}}=(1-4)\times 10^6$~yr where $P$ and $\dot{P}$ are the rotational period and the period derivative of the neutron star. The kinematic ages are estimated to be between a few $10^5$~yr and $10^6$~yr by \citet{Sepeagle2011} which are consistent with the estimated cooling ages of the sources \citep{Page2009}. Soft gamma bursts, shown by AXPs and SGRs, continuous pulsed radio emission, or short radio bursts seen from RRATs have not been observed from XDINs \citep{Mereghetti2011a}. With the assumption that XDINs evolve with purely magnetic dipole torques, the dipole field strengths are inferred to be $B_0=6.4\times10^{19}(P\dot{P})^{1/2} \sim $ a few $10^{13}$~G at the poles of the sources. These strong dipole fields place XDINs above the pulsar death line in the \B0-P plane \citep{Haberl2007}, while no pulsed radio emission has been detected from these sources yet. It was proposed that the non-detection of pulsed radio emission from XDINs could be due to narrow beaming of their radio emission \citep{Haberl2005}. Recently observed radio pulsar PSR J0726--2612 (hereafter J0726) was proposed to be a good candidate to be an XDIN with an observable radio beam \citep{Rigoselli2019}. For this source, $P=3.44$~s, close to the minimum of XDIN periods, and $\dot{P}= 2.93\times 10^{-13} $~s~s$^{-1}$, which give a characteristic age of $\sim 2\times 10^5$~yr. The distance estimated from the dispersion measure \citep{Burgay2006} using the electron distribution model of \citet{Yao2017} gives $d \sim 3$~kpc. Nevertheless, the dispersion measure is likely to be effected by the Gould Belt \citep{Popov2005} crossing the line of sight to J0726. If the source is located in the Gould Belt as suggested by \citet{Sepeagle2011}, then $d \lesssim 1$~kpc. For the model calculations, we take $d = 1$~kpc which gives an X-ray luminosity $L_{\mathrm{x}} \simeq 4 \times 10^{32}$~erg~s$^{-1}$ and comparable to the rotational power $\dot{E}=I\Omega_\ast \dot{\Omega}_\ast=2.8\times 10^{32}$~erg~s$^{-1}$ of the source \citep{Rigoselli2019,Vigano2013}, where $I$ is moment of inertia, $\Omega_\ast$ is the angular velocity of the neutron star and $\dot{\Omega}_\ast$ is its time derivative. Since the rotational properties and $L_{\mathrm{x}}$ of J0726 are in the ranges of those observed from HBRPs ($P = 0.15$~--~$6.7$~s, $\dot{P} = 2.33 \times 10^{-14}$~--~$4.02 \times 10^{-12}$~s~s$^{-1}$, $L_{\mathrm{x}} \simeq 10^{31}$~--~$4 \times 10^{34}$~erg~s$^{-1}$), the source is also classified as a HBRP \citep{Sepeagle2011,Olausen2013,Watanabe2019}. After a supernova explosion, a fallback disc can be formed around the neutron star \citep{Colgate1971,Michel1988, Chevalier1989, Perna2014}. To explain the properties of AXPs, \citet{Chatterjee2000} proposed that these sources are evolving with fallback discs. It was proposed that emergence of different isolated neutron star populations could be explained if the properties of fallback discs are included in the initial conditions together with initial period and magnetic dipole moment \citep{Alpar2001}. Fallback discs were invoked to explain different rotational characteristics of isolated neutron stars that are not explained by evolutions with purely dipole torques \citep{Marsden2001, Menou2001, Eksi2003, Yan2012, Lei2013}. Emission properties of the fallback discs were also studied extensively \citep{Perna2000, Ertan2007}. It was shown by \citet{Ertan2007} that the broad-band spectrum of 4U 0142+61 from the optical to mid-IR bands \citep{Hulleman2000, Hulleman2004, Morii2005, Wang2006} can be accounted for by the emission from the entire disc surface. The fallback disc model proposed by \citet{Alpar2001} was developed later including the effects of the X-ray irradiation, cooling luminosity, and inactivation of the disc in the long-term evolution \citep{ErtanE2009, Ertan2014}. When there is a fallback disc around the star, the spin-down torque arising from the interaction of the inner disc with the dipole field of the star usually dominates the magnetic dipole torque. In the fallback disc model, $B_0$ values are estimated to be one to three orders smaller than the values inferred from the dipole torque formula. The long-term evolution of XDINs and HBRPs with fallback discs was studied earlier by \citet{Ertan2014} and \citet{Benli2017, Benli2018}. The model can reproduce $P$, $\dot{P}$ and $L_{\mathrm{x}}$ of individual XDIN and HBRP sources with $B_0$ in the ranges of ($0.3$~--~$1.3$) $\times 10 ^{12}$~G for XDINs and ($0.3$~--~$6) \times 10^{12}$~G for HBRPs. These relatively weak fields together with the long periods place XDINs well below the pulsar death line \citep[see][figure 4]{Ertan2014} in the $B_0-P$ diagram \citep[][]{Chen1993}, while HBRPs with relatively strong fields and/or short periods are located above the death line \citep{Benli2017, Benli2018}. In other words, in the fallback disc model, the lack of radio pulses from XDINs is due to their weak dipole fields, rather than the beaming geometry. In this work, our aim is to investigate the long-term evolution of J0726, and compare its properties and evolution with those of XDINs and HBRPs in the fallback disc model. The same model was applied earlier to AXP/SGRs, CCOs, and RRATs as well \citep{Ertan2007,ErtanE2009,Ertan2014,Caliskan2013,Benli2017,Gencali2018}. In Section 2, we briefly describe this model. We discuss the results of model calculations in Section 3, and summarize our conclusions in Section 4. \section{The Model} \label{secconc} Since the details of the model calculations and its applications to different neutron star populations are given in earlier works \citep[see e.g.][]{ErtanE2009, Ertan2014, Benli2016}, we briefly describe the model calculations here. We solve the disc diffusion equation starting with a surface density profile of a steady disc using the kinematic viscosity $\nu = \alpha c_{\mathrm{s}} h$ where, $\alpha$ is the kinematic viscosity parameter, $c_{\mathrm{s}}$ is the local sound speed, and $h$ is the pressure scale height of the disc \citep{Shakura1973}. In the accretion with spin-down (ASD) phase, we calculate the disc torque, $N_\mathrm{D}$, acting on the star by integrating the magnetic torques from the conventional Alfv$\acute{\mathrm{e}}$n~ radius, $r_{\mathrm{A}} \simeq (GM)^{-1/7}\mu^{4/7}\dot{M}_{\mathrm{in}}^{-2/7} $ \citep{Lamb1973, Davidson1973}, to the co-rotation radius, $r_{\mathrm{co}} = (GM/\Omega_\ast^2)^{1/3}$ where $G$ is the gravitational constant, $M$ is the mass of the neutron star and $\mu$ is its magnetic dipole moment, $\dot{M}_{\mathrm{in}}$ is the mass inflow-rate at the inner disc. The magnitude of this torque could be written in terms of $\dot{M}_{\mathrm{in}}$ and $r_{\mathrm{A}}$ as $N_\mathrm{D} = \frac{1}{2} \dot{M}_{\mathrm{in}} (GM r_{\mathrm{A}})^{1/2} \Big[ 1 - (r_{\mathrm{A}}/r_{\mathrm{co}})^3 \Big]$ \citep[see][for details]{Ertan2008}. The contributions of the magnetic dipole torque, $N_\mathrm{dip}$, and the spin-up torque associated with accretion on to the star, $N_\mathrm{acc}$, are negligible in the long-term accretion regime of XDINs \citep{Ertan2014}. That is, the total torque acting on the star $N_\mathrm{TOT} = N_\mathrm{D} + N_\mathrm{dip} + N_\mathrm{acc}$ is dominated by $N_\mathrm{D}$ in the ASD phase of XDINs. In this regime, $r_{\mathrm{co}} < r_{\mathrm{A}} < r_{\mathrm{LC}}$, where $r_{\mathrm{LC}}=c/\Omega_\ast$ is the light cylinder radius, and $c$ is the speed of light. During the ASD phase, $r_{\mathrm{A}}$ increases with gradually decreasing $\dot{M}_{\mathrm{in}}$, and eventually becomes equal to $r_{\mathrm{LC}}$. For $\dot{M}_{\mathrm{in}}$ below this critical value, we replace $r_{\mathrm{A}}$ with $r_{\mathrm{LC}}$ in the $N_\mathrm{D}$ equation. In the model, $r_{\mathrm{A}} = r_{\mathrm{LC}}$ is also the condition for the propeller-accretion transition. Since $\dot{M}_{\mathrm{in}}$ enters a sharp decay phase at the end of the ASD phase, exact value of $\dot{M}_{\mathrm{in}}$ for the accretion-propeller transition does not affect our results significantly. In the strong-propeller (SP) phase, we assume that all the matter inflowing to the inner disc is expelled from the system. The pulsed radio emission is allowed only in the SP phase when there is no accretion on to the source. In the ASD phase, the mass accretion on to the star produces an X-ray luminosity, $L_\mathrm{acc} = G M \dot{M}_\ast/R_\ast $, where $R_\ast$ is the radius of the neutron star. In this phase, we take the mass accretion rate $\dot{M}_\ast = \dot{M}_{\mathrm{in}}$. The total X-ray luminosity, $L_{\mathrm{x}} = L_\mathrm{acc} + L_{\mathrm{cool}}$, where $L_{\mathrm{cool}}$ is the intrinsic cooling luminosity of the star \citep{Page2009}. In the $L_{\mathrm{cool}}$ calculation, we also include the small contribution of the external torques to the internal heating of the neutron star \citep{Alpar2007}. In the SP phase, $\dot{M}_\ast = 0$, $N_\mathrm{acc} = 0$, and $L_{\mathrm{x}} = L_{\mathrm{cool}}$, since accretion is not allowed in this regime. The disc is heated by X-ray irradiation in addition to the viscous dissipation. The effective temperature of the disc can be written as $T_{\mathrm{eff}} \simeq \Big[(D + F_{\mathrm{irr}}) /\sigma \Big]^{1/4}$ where $D$ is the rate of viscous dissipation per unit area of the disc, $\sigma$ is the Stefan-Boltzmann constant, $F_{\mathrm{irr}} = 1.2 C L_{\mathrm{x}}/ (\pi r^2)$ is the irritation flux, where $r$ is the radial distance from the star, $C$ is the irradiation parameter, which depends on the albedo and geometry of the disc surfaces \citep{Fukue1992}. The disc becomes viscously inactive below a critical temperature, $T_{\mathrm{p}}$. Dynamical outer radius, $r_{\mathrm{out}}$, of the viscously active disc is equal to the radius currently at which $T_{\mathrm{eff}} = T_{\mathrm{p}}$. Across the outer disc, $F_{\mathrm{irr}}$ dominates D, that is, the X-ray irradiation significantly affects the long-term evolution of the source by extending the life-time of the active disc. The main disc parameters ($\alpha$, $C$, $T_{\mathrm{p}}$) for the fallback discs of different neutron star populations are expected to be similar. The same model employed here can reproduce the individual source properties of AXP/SGRs, CCOs, HBRPs and XDINs with $T_{\mathrm{p}} \sim$~($50$~--~$150$)~K, and C = ($1$~--~$7) \times 10^{-4}$ \citep{Ertan2006, Ertan2007, Caliskan2013, Ertan2014, Benli2016, Benli2017, Benli2018, BenliCCO2018}. These $T_{\mathrm{p}}$ values in the model are in good agreement with the results of the theoretical work indicating that the disc is likely to be active at very low temperatures \citep{Inutsuka2005}. The range of $C$ estimated in our model is similar to that estimated from the optical and X-ray observations of the low-mass X-ray binaries \citep[see e.g.][]{Dubus1999}. We try to obtain the properties of J0726 also with these main disc parameters. This provides a systematic comparison between the initial conditions of different populations, namely the magnetic dipole field strength $B_0$, the initial disc mass $\Md$, and the initial period $P_0$. The $\alpha$ parameter does not significantly affect the long-term evolution. The conditions in a slowly evolving fallback disc are similar to steady-state conditions. The outer regions of the active disc govern the rate of mass-flow to the inner disc. That is, the $\alpha$ parameter in our model should be considered as the property of the outer disc. $T_{\mathrm{p}}$ and $C$ are degenerate parameters. With smaller $T_{\mathrm{p}}$ values the active disc has a longer lifetime. A stronger irradiation (greater $C$) also extends the lifetime of the active disc. A detailed discussion about the effects of these parameters on the evolution of the neutron star can be found in \citet{ErtanE2009}. \section{Results and Discussion} The model curves seen in Fig. \ref{fig:plot} illustrate two different evolutionary histories for J0726: (1) The model source following curve 1 starts its evolution in the ASD phase with $L_{\mathrm{x}} \simeq L_\mathrm{acc}$, and remains in this phase until it makes a transition to the SP phase at $t \sim 3 \times 10^4$~yr. The solid and dashed branches of the curves correspond to the ASD and SP phases respectively. It is seen in the middle and bottom panels that the rapid increase of $P$ stops with sharply decreasing $\dot{P}$ after the ASD/SP transition. (2) Curve 2 represents the evolution of a neutron star that remains always in the SP phase with $L_{\mathrm{x}} \simeq L_{\mathrm{cool}}$. For a given $B_0$, the sources with $\Md$ smaller than a critical value cannot enter the ASD phase, and evolves in the SP phase likely as radio pulsar. The rotational evolution for this type of evolution is sensitive to $\Md$, while for type (1) solution, $P$ and $\dot{P}$ evolution do not significantly depend on $\Md$ \citep[see e.g.][]{Benli2016}. \begin{figure} \centering \includegraphics[width=\columnwidth]{plot_Lx_P_Pdot.pdf} \caption{ Illustrative model curves for the long-term evolution of PSR J0726--2612. The curves are obtained with $B_0$ and $\Md$ (in units of $10^{-6}~M_{\odot}$) values given in the top panel. The main disc parameters employed in both models are $C=1 \times 10^{-4}$, $T_{\mathrm{p}}=100$~K, and $\alpha = 0.045$. Horizontal lines show the observed $P=3.44$~s, $\dot{P}=2.93 \times 10^{-13}$~s~s$^{-1}$, and the estimated $L_{\mathrm{x}}$ range for $d=1$~kpc \citep{Rigoselli2019}. For curve 1, solid and dashed branches correspond to the ASD and SP phases respectively. For the evolution represented by curve 2, the source always remains in the SP phase, and this curve is a more likely representation of the evolution of PSR J0726--2612 (see the text for details). Eventually, $\dot{P}$ curves converge to the levels corresponding to the magnetic dipole torques (shown by two horizontal dotted lines at the bottom of the $\dot{P}$ panel). } \label{fig:plot} \end{figure} \begin{figure} \centering \includegraphics[width=\columnwidth]{plot_Mdot_ra.pdf} \caption{The evolution of the accretion rate, $r_{\mathrm{co}}$, $r_{\mathrm{A}}$ and $r_{\mathrm{LC}}$ in the ASD phase of type (1) evolution (see Fig. \ref{fig:plot}). The accretion is switched off at $t \simeq 3 \times 10^4$~s, and the system enters the SP phase (see the text).} \label{fig:Mdot_ra} \end{figure} Illustrative model curves seen in Fig. \ref{fig:plot} are obtained with the main disc parameters: $\alpha=0.045$, $T_{\mathrm{p}} = 100$~K, $C =1 \times 10^{-4}$. The initial conditions for curve 1 are $P_0=0.3$~s, $\Md = 1.1 \times 10^{-5}~M_{\odot}$, $B_0 = 9 \times 10^{11}$~G. The maximum $B_0$ allowed for the type (1) solution (curve 1) is $\sim 1.2 \times 10^{12}$~G, while the type (2) solution can reproduce the source properties with $B_0 \gtrsim 1.5 \times 10^{12}$~G. For $P = 3.44$~s, the minimum $B_0$ required for the pulsed radio emission is $\sim 1.4 \times 10^{12}$~G. In Fig. \ref{fig:Mdot_ra}, we have also plotted the evolution of $\dot{M}_\ast$, $r_{\mathrm{A}}$, $r_{\mathrm{co}}$ and $r_{\mathrm{LC}}$ in the ASD phase of type (1) solution. Due to the simplifications in our model, we cannot exclude type (1) evolution. Nevertheless, even if the source is inside the death valley, it is too close to the death line, which implies that this solution is not very likely to represent the actual evolution of J0726. Most of the radio pulsars seem to die inside the death valley at points not very close to the pulsar death line. Otherwise, if the sources switch off the radio pulses when crossing the death line, their number density would increase close to the death line, which is not observed. Some of the sources die close to the upper boundary, while some others close to the lower boundary (death line), altogether forming a roughly homogeneous distribution inside the death valley. For our type (1) solution, after termination of the ASD phase, the source finds itself very close to the lower boundary. In this case, the star can show radio pulses only if its actual death point is indeed very close to the lower boundary. Type (2) solution seems to be more reasonable representation of the evolution of J0726. This evolution is similar to those of some of the HBRPs in the same model \citep{Benli2017, Benli2018}. For both solutions, the source is currently evolving in the SP phase at an age $\sim 5 \times 10^4$~yr. At present, the star is slowing down dominantly by the disc torque that will eventually decrease below the magnetic dipole torque at $t \sim$~a few ~$\times~10^5$~yr. For instance, for $B_0 = 2 \times 10^{12}$~G, (curve 2) the sharp decrease in $\dot{P}$ will continue down to $\dot{P} \simeq 2.7 \times 10^{-16}$~s~s$^{-1}$. Our results indicate that J0726 will evolve to these ages with a $\dot{P}$ that is about three orders of magnitude smaller than its present value (Fig. ~\ref{fig:plot}). This means that the source is likely to be classified as a normal radio pulsar with $B_0 \sim $ a few $\times 10^{12}$~G deduced from $P$ and $\dot{P}$ at the ages of XDINs. In Fig. \ref{fig:B0-P}, we have plotted the evolution of J0726 in the $P~-~\dot{P}$ and $P~-~B_0$ diagrams together with XDINs and HBRPs with the properties estimated in our model. \begin{figure} \centering \includegraphics[width=\columnwidth]{B0_pdot_p.pdf} \caption{ Long-term evolution in the $P~-~\dot{P}$ and $B_0-P$ diagrams for the same model curves given in Fig. \ref{fig:plot}. XDINs and HBRPs are indicated by triangles and squares respectively. In the $B_0-P$ plane, empty symbols show $B_0$ values inferred from the dipole torque formula using $P$ and $\dot{P}$ values \citep[ATNF Pulsar Catalogue version 1.63,][]{Manchester2005}\protect\footnotemark. The filled symbols indicate the average $B_0$ values estimated in our model \citep{Ertan2014,Benli2017,Benli2018}. The solid lines are the upper and lower borders of the pulsar death valley \citep{Chen1993}. The filled diamonds show the current location of J0726 estimated for type (1) and type (2) solutions. } \label{fig:B0-P} \end{figure} \footnotetext{\url{https://www.atnf.csiro.au/research/pulsar/psrcat/}} \begin{figure} \centering \includegraphics[width=\columnwidth]{plot_Lx_P_Pdot_Tp.pdf} \caption{Illustrative model curves for the long-term evolution of RX J0720.4–-3125 with the updated period and period derivative. For both models, $\alpha = 0.045$, $C = 1 \times 10^{-4}$, $P_0 = 0.3$~s, $\Md = 4.74 \times 10^{-6} M_{\odot}$. The curves obtained with $B_0$ and $T_{\mathrm{p}}$ values given in the top panel. The dotted curve indicates the theoretical cooling curve \citep{Page2009}. Horizontal dashed lines show $P = 16.78$~s, $\dot{P} = 1.86 \times 10^{-13}$~s~s$^{-1}$, $L_{\mathrm{x}} = 1.6 \times 10^{32}$ erg~s$^{-1}$ as used in \citet{Ertan2014} assuming $d=270$~pc. There is a large uncertainty in $d = 280^{+210}_{-85}$~pc \citep{Eisenbeiss2011, Tetzlaff2011, Hambaryan2017}.} \label{fig:0720} \end{figure} In our present and earlier works, we employed theoretical cooling curve estimated by \citet{Page2009} for conventional dipole fields. This cooling curve could differ from the actual cooling curve depending on some unknown details of the neutron star properties like equation of state and mass of the star \citep[see e.g.][]{Potekhin2018, Potekhin2020}. For the sources that are currently in the ASD phase, the details of the cooling curve do not affect our results, but could modify our model parameters for sources in the SP phase, like XDINs. The ages of XDINs estimated in our model are on the average a few times smaller than the estimated kinematic ages. If the actual ages are indeed close to the kinematic ages, the source properties can be obtained with $B_0$ values smaller than we reported here and in \citet{Ertan2014} by a factor smaller than two. The field strengths estimated in our model should be considered taking these uncertainties into account. These small changes in $B_0$ do not change the qualitative features of the model curves for XDINs. Recently, the period of RX J0720.4--3125 was updated from $8.39$~s to $16.78$~s by \citet{Hambaryan2017}. The period derivative of the source was also updated from $\sim 7 \times 10^{-14}$~s~s$^{-1}$ to $1.86 \times 10^{-13}$~s~s$^{-1}$ by \citet{HambaryanNeuh2017}. For this source, we have performed new simulations and modified the model parameters obtained by \citet{Ertan2014}. Our results and model parameters are given in Fig. \ref{fig:0720}. With the updated period and period derivative, using the same main disc parameters as employed in \citet{Ertan2014}, the model can reproduce the source properties with slightly higher $B_0$ ($(1.3-1.8) \times 10^{12}$~G) values in comparison with the $B_0$ obtained in \citet{Ertan2014}. Similar results could be produced for a large range of disc masses. In our model, the inner disc interacts with the large-scale magnetic dipole field of the neutron star. Close to the surface of the star, there could be quadrupole magnetar fields which could affect the surface temperature distribution and the absorption features \citep{Guver2011}. Presence of these small-scale strong fields in XDINs and other isolated neutron star populations is compatible with the fallback disc model, nevertheless these detailed surface properties are not addressed in our long-term evolution model. Some other spectral features that could be produced by the disc-field interaction should also be studied independently. In particular, \citet{Ertan2017} showed that the heating of the inner disc boundary by the magnetic stresses can account for the optical/UV excesses of XDINs, while the entire disc spectra are consistent with the observed upper limits in the IR bands. We note that there is an uncertainty in the disk spectrum because of unknown inclination angle of the disk. To estimate the entire disk spectrum, at least a single detection in one of the IR bands is needed. At present, there is no IR/optical detection or upper limits estimated for J0726. The X-ray luminosities of XDINs exceed their spin-down powers. In our long-term evolution model, this is a natural outcome of rapid increase in periods by efficient disc torques and sharp decrease in $\dot{P}$ in the late SP phase, which leaves the observed spin-down powers below the cooling luminosities of these sources. The current periods of XDINs together with the weak fields estimated in our model place these sources below the pulsar death line \citep[][plotted also in Fig. \ref{fig:B0-P}]{Ertan2014}. This indicates that known XDINs cannot emit radio pulses in our model. There are a few exceptional active radio pulsars that are close to, but below the death line, namely PSR J0250+5854 with $P = 23.5$~s \citep{Tan2018}, PSR J2251--3711 with $P = 12.1$~s \citep{Morello2020}, PSR J2144--3933 with $P = 8.5$~s \citep{Young1999}. Nevertheless, in our model, we find the locations of XDINs well below the death line where there are no radio pulsars. Is it possible that a source with $B_0 \sim$ a few $\times 10^{12}$~G starts in the ASD phase with a greater $\Md$? It is possible, and we estimate that these sources become AXP/SGRs, and evolve to relatively long periods which leave them below the pulsar death line at the end of the ASD phase. In our model, these sources can never become radio pulsar in their lifetimes provided that the accretion is not hindered occasionally due to instabilities at the inner disc. \section{Conclusions} We have shown that $P$, $\dot{P}$ and $L_{\mathrm{x}}$ of J0726 can be achieved by a neutron star evolving with a fallback disc. We have found that there are two possible evolutionary histories that could produce the properties of J0726. For both solutions, the source is in the strong-propeller (SP) phase at present. In the first alternative (curve 1 in Fig. ~\ref{fig:plot}), the star initially evolves in the accretion with spin-down (ASD) phase, and makes a transition into the SP phase at an earlier time of its evolution. For the second type of solution (curve 2 in Fig. ~\ref{fig:plot}), the source always evolves in the SP phase. Since the X-ray luminosity is powered by the cooling luminosity in the SP phase, the model sources reach the properties of J0726 at an age close to the estimated cooling age ($\sim 5 \times 10^4$~yr) of J0726. The radio pulsars following the type (1) evolution are not likely to be common, since these sources find themselves very close to the pulsar death line after the accretion is switched off. The curve 2 seems to show a more likely evolution for J0726 which is also similar to the evolution of some of the HBRPs, rather than XDINs. The model curves indicate that the source will acquire the rotational properties of normal radio pulsars at the ages of XDINs (Fig. ~\ref{fig:plot}). In our long-term evolution model, the basic difference between the HBRPs and XDINs are the field strengths $B_0$. XDINs with relatively small $B_0$ ($10^{11}~-~10^{12}$~G) tend to start their evolution in the ASD phase, since it is easier for the inner disc to extend down to the co-rotation radius for weaker dipole fields. On the other hand, HBRPs with stronger fields ($B_0 \gtrsim 2 \times 10^{12}$~G) either always evolve in the SP phase, as we estimate for J0726, or make a transition from the initial SP phase to the ASD phase at a later time of evolution. In the latter case, the sources are expected to evolve to the properties of AXP/SGRs \citep{Benli2017}. A detailed comparison of the long-term evolutions and the statistical properties of these neutron star populations in the fallback disc model will be studied in an independent work. \section*{Acknowledgements} We thank the referee for useful comments that have improved this manuscript. We acknowledge research support from Sabanc{\i} University, and from T\"{U}B\.{I}TAK (The Scientific and Technological Research Council of Turkey) through grant 117F144. \bibliographystyle{mnras}
1,108,101,565,844
arxiv
\section{Introduction} \label{sec:introduction} Tensor network (TN) methods have been studied across physics, mathematics, and computer science for their expressive power, interpretability, and computational efficiency~\cite{Kolda2009,Eisert2013,Vervliet2014,Cichocki2015,Sidiropoulos2016,Orus2019,Biamonte2019}. These properties make them well-suited for machine learning, recently leading to several promising results~\cite{Novikov2015, Stoudenmire2016, Cohen2016, Glasser2019}. Under some mild constraints, a TN may be interpreted as a quantum circuit~\cite{Grant2018,Huggins2019,Haghshenas2022}. In some cases, these circuits may be simulated with a computational effort that scales polynomially with the number of qubits, which makes them amenable to numerical exploration on classical computers---in stark contrast with the exponential scaling expected in simulation of arbitrary quantum circuits. One of the underlying challenges in developing quantum TN methods for machine learning is to establish a competitive performance baseline. A classical algorithm, random kitchen sinks (RKS)~\cite{Rahimi07,Rahimi08a,Rahimi08b}, offers some inspiration since it has been shown to be competitive with multilayer neural networks~\cite{May2017}. The principal difference between the two approaches is that RKS, a kernel method, replaces the costly learning of low-level features in a multilayer network with random non-local features. This randomization step may be considered a special kind of feature engineering in a machine learning pipeline. A quantum algorithm based on RKS, known as {\em quantum kitchen sinks} (QKS)~\cite{Wilson2018}, has been shown to produce error rates that are comparable to quantum TN methods~\cite{Huggins2019} at similar qubit utilizations and minimal classical overhead. An important question is whether coherent quantum processing can provide any improvement in classification performance. The original QKS results indicated some mild improvement with coherent processing over a small number of qubits and some degradation at a large number of qubits---and no systematic guidance for how coherent processing could be leveraged. Here we attempt to shed some light into this question by combining QKS and TN circuits into a new variational ansatz, and show this combined ansatz compares favorably to both of its ancestors. In both the classical and quantum contexts, the contest between the multilayer networks and relatively shallow (Q/R)KS models is strongly impacted by the design of the network layers and features. It is well-known that any function can be learned by either shallow or multilayer architecture; the essential questions are (1) how efficient is the corresponding architecture and (2) how much work is required in optimizing the architecture to the task~\cite{Bengio2007}. We assess each of these points in turn by separately studying the two modules of the combined QKS and TN protocol. \begin{figure}[h!] \centering \includegraphics[width=\textwidth]{fig/fig1.eps} \caption{(a) Classical data parameterize rotations in quantum kitchen sinks (QKS). Application of quantum rotation gates such as $R_P(\theta)=\cos(\theta/2)I - i\,\sin(\theta/2)P$ (where $P$ is a Pauli operator) results in quantum states whose amplitudes are mixtures of non-linear functions of the classical angles. Variational circuits where such quantum gates are followed with a tree tensor network (TTN) structure coherently process the output of QKS before a small part of the state is measured. (b) Test errors over 10 realizations (median and 68\% credibility intervals) as a function of the number of episodes. Joint optimization of the rotations and TTN lead to binary classifiers with error rates below $1\%$ (on ``3'' vs. ``5'' handwritten digits from the MNIST dataset).} \label{fig:fig1} \end{figure} A concrete example of a TN is a {\em tree tensor network} (TTN), depicted in (\hyperref[fig:fig1]{Fig. 1a}). In these networks, two collections of $n$ qubits each (where $\chi=2^n$ is referred to as the {\em bond dimension}) interact unitarily, but only one of the resulting collections of $n$ qubits continues on for additional computation. We refer to the quantum circuit that results from combining QKS with TTN as QKS+TTN, in contrast to the previous QKS approach which leveraged linear classifiers (such as support vector machines\footnote{The work of~\citet{Wilson2018} used logistic regression, while we use linear support vector machines here for convenience -- we do not expect a material performance difference between the two.}), which we refer to as QKS+SVM. In this work, we only considered $\chi=2$ and $\chi=4$ and focused on the TTN to build a large coherent computation. An attractive feature of the proposed TTN and other related ansatze (such as multi-scale entanglement renormalization ansatz~\cite{Evenbly2009}) is that they avoid the problem of {\em barren optimization landscapes}, due to the shallow circuit depth~\cite{McClean2018,Wang2020,Cerezo2021,Arrasmith2021}, and their sparse connectivity prevents errors from accumulating pathologically~\cite{Kim2017,Kim2019,Anikeeva2021}, making the implementation of these circuits on near-term hardware appealing. Classification using QKS+TTN requires simulating the evolution of the quantum state through fixed circuits (parameterized by the classical input data), which are called kitchen sinks or \textit{episodes}~\cite{Wilson2018}, and the TTN, which concludes with the application of a measurement on the remaining $n$ qubits at the root of the tree~\cite{Grant2018,Huggins2019}. For a {\em single-shot} execution of the circuit, the resulting outcome corresponds to the classical output of the classifier. For a {\em multi-shot} execution of the circuit, we consider the application of a linear classifier to the observed outcome frequencies. For binary classification, we may consider measuring a single qubit out of the remaining $n$, but more general classification tasks may use all possible $\chi$ outcomes of the final measurement. \textbf{Contributions}. We propose a new circuit ansatz where a TTN coherently processes the non-local feature maps of QKS. We empirically evaluate the performance of the new ansatz on image classification. In terms of classification performance, we find that simply combining QKS and TTN yields no qualitative improvements. However, training QKS+TTN with \textit{feature optimization} (QKS+TTN+FO) significantly boosts performance and improves qubit utilization over the QKS+SVM baseline (\hyperref[fig:fig1]{Fig. 1b}), leading to state-of-the-art quantum circuits for image classification, while requiring only shallow quantum circuits and a small number of qubits -- both well within reach of near-term quantum devices. A crucial feature of the new ansatz is how classical data are mapped to quantum states. Previous work considered \textit{local feature maps}~\cite{Grant2018,Huggins2019}, where each classical dimension $x_i$ was encoded in one qubit amplitude $\cos(x_i)\ket{0}+\sin(x_i)\ket{1}$ (or similar non-linear encoding), which required a number of qubits equal to the number of classical features in the problem (synthetic features may be naturally included as desired). Our work uses the \textit{non-local feature map} provided by QKS so that the quantum state corresponding to each episode contains information about the entire data. This decouples the correlation structure in the data from the quantum circuit structure so that we may apply the same TN structure to datasets of different dimensionality and correlation structure. \section{Methods and Benchmarks} \label{sec:methods} We begin by describing the datasets and the methods used for QKS+SVM, QKS+TTN, and QKS+TTN+FO. We present two approaches to reducing the complexity of the overall model: (\textit{i}) translational symmetry in the TTN and (\textit{ii}) sparsity in the features. \textit{Benchmarks}. The MNIST dataset~\cite{LeCun1998} is a well-known benchmark in machine learning. Each digit is a $(28 \times 28)$-pixel, 8-bit grayscale image of a handwritten digit. We treat each image as a vector with dimension $28^2=784$ and split the dataset into 60,000 training and 10,000 testing images. While it is a benchmark for multi-class classification, we choose to focus on the binary classification two digits that are difficult to distinguish: the handwritten digits ``3'' and ``5'', which we refer to as \textit{(3,5)-MNIST}. All licensing information for existing assets can be found in the supplementary material. \textbf{Establishing a baseline with quantum kitchen sinks}. Consider an unknown target function $f: {\tt Data} \rightarrow {\tt Labels}$. The general approach to ``learning'' the target function is to approximate it with some structured parameterized function $g_{\boldsymbol\theta}$ taken from some \emph{hypothesis space}. In deep learning, such $g_{\boldsymbol\theta}$ is structured as a parameterized composition of a large number of simple non-linear functions, and the many parameters of this composition all have to be learned for optimal performance. As an alternative, Rahimi and Recht~\cite{Rahimi07,Rahimi08a,Rahimi08b} have shown that it was possible to produce a sufficiently rich set of hypothesis functions $g(\cdot,\boldsymbol\theta)$ as weighted linear sums of simple non-linear functions with \emph{random} parameters. The weights in the summary linear combination of such non-linear terms still need to be learned, but this is a linear learning step that can be done at a significantly lower cost than learning a full customary neural network. Results presented in~\cite{Rahimi07,Rahimi08a,Rahimi08b} suggest that there are multiple reasonable choices of non-linearities that can be used as structural blocks for RKS: cosine, sign, and indicator functions in particular. \begin{figure}[h!] \begin{center} \includegraphics[scale=0.75]{fig/fig2.eps} \end{center} \caption{A quantum kitchen sink (QKS), highlighted in \textcolor{lightblue}{blue}, is made up of independent episodes, each consisting of a unitary operation applied to $\ket{0}$ state . Each unitary is parameterized by a random, fixed vector (\textcolor{grey}{grey}) and the classical input. The resulting tensor product state is then measured in a fixed basis, and the outcomes are processed further by classical post-processing (\textcolor{darkred}{red}) to generate a classical output. This procedure can be engineered to approximate any function of the classical input.\label{fig:fig2}} \end{figure} The quantum kitchen sinks (QKS) algorithm proposed by~\citet{Wilson2018} leverages the connection between trigonometric functions and qubit rotations to implement the required non-linearities. Concretely, the original proposal used \emph{linear} random mixtures of features as angles for single-qubit rotations, optionally followed by entangling operations. A quantum circuit composed of such operations impacts the amplitudes of a quantum state with cosines and sines of weighted mixtures of classical data. More specifically, let $\mathbf{x} \in \mathbb{R}^p$ be a $p$-dimensional feature vector. Let $\boldsymbol\Omega = (\omega_1, \ldots, \omega_q)^T$ be a randomized matrix, where for each $k$, $\omega_k$ is a $p$-dimensional vector with $r\leq p$ elements having random values and remaining elements being exactly zero. We can also specify a random $q$-dimensional bias vector $\boldsymbol\beta$. Then we get our set of random quantum circuit parameters $\boldsymbol\theta = \boldsymbol\Omega \, \mathbf{x}+\boldsymbol\beta$. For a sufficiently large count $E$ interpreted as the \emph{number of episodes} we repeat this randomized synthesis $E$ times to form a set of \emph{encoding parameters} $\{\boldsymbol\Omega_e,\boldsymbol\beta_e\}_{e=1}^E$. This set of parameters is drawn only once and becomes a permanent part of a QKS solution. The $\boldsymbol\Omega_e$ and $\boldsymbol\beta_e$ elements can be drawn from various statistical distributions---e.g., normal, uniform, etc.---and parameters of these distributions become hyperparameters of the method. Choice of variances, for one, has a strong impact on the outcomes---as shown by a cross-validation grid-search in the supplementary material. Once we have encoded the data for an episode $e \in [E]$ as vectors of circuit parameters $\boldsymbol\theta_e(\mathbf{x}) = \boldsymbol\Omega_e \, \mathbf{x} + \boldsymbol\beta_e, \, \mathbf{x} \in {\tt Learning data}$ we need to choose an ansatz for quantum circuit(s) driven by these parameters. As shown by~\citet{Wilson2018} the circuit structure can be quite simple provided that the corresponding unitary transformation depends strongly on the $\boldsymbol\theta$s. For example, the one-qubit ansatz presented in \hyperref[fig:fig2]{Fig. 2} has been proven to work well in multiple datasets. As suggested by the figure, the quantum step starts with some basic quantum state, e.g., $\ket{0}$, and the circuit is followed by a full measurement that extracts a classic bit string and collapses the state. This concludes the quantum feature pre-processing step. The bits extracted by measurement of multiple episodes form an aggregated feature vector that is then fed into a classical linear classification algorithm. The critical difference between QKS and classical RKS is that in the case of QKS, the aggregated feature vector is by itself stochastic even though the encoding parameters $\{\boldsymbol\Omega_e,\boldsymbol\beta_e\}_{e=1}^E$ are fixed. This is due to the stochastic nature of quantum measurement. Therefore it might be crucial to allow multiple runs (referred to as \emph{shots}) of the same circuit within each episode and average measurement results across these runs. Such a multi-shot approach provides a more accurate representation of non-linearities induced by the quantum encoding. In either case, the totality of quantum steps generates an aggregated feature vector that is post-processed further. \citet{Wilson2018} used Logistic Regression (LR) for the classical post-processing, while here, we explore several different approaches (both classical and quantum) to achieve better performance. \textbf{Coherently processing quantum kitchen sinks with tensor networks}. Although the non-linearities provided by QKS are sufficient to approximate arbitrary functions of the classical inputs (and may therefore be used for a wide range of machine learning tasks), it is not apparent how to optimally engineer multi-qubit episodes, much less how performance may depend on the number of qubits in an episode. The results of \citet{Wilson2018} indicated some potential improvement with the number of qubits per episode, but also considered only fixed instances of arbitrarily chosen multi-qubit circuits. In this section, we explore an approach to address this question by considering coherent processing of the output of the QKS with highly structured shallow quantum circuits: tree tensor networks (TTN). We chose to focus on TTNs due to computational convenience, but expect additional gains may be possible with other TN structures. The original QKS proposal used tensor product measurements, followed by (largely) unstructured classical post-processing of measurement outcomes. Here, we wanted to consider measurements that have an efficiently contractible TN structure (\hyperref[fig:fig3]{Fig. 3}). This change resulted in a variational quantum circuit with multiple layers of coherent processing. The architecture design consisted of two main modules: the non-local features and TTN. A TTN will map $E/\lg \chi$ qubits down to $n$ qubit in $O(\lg E)$ depth by successively discarding (or rather, ignoring) half of the qubits. Note that the TTN we employ here is a slight modification of the TTN considered in condensed matter studies~\cite{Shi2006,Tagliacozzo2009,Murg2015,Nakatani2013}. In particular, our TTN is not made up of isometries (unitaries followed by projections on some of the outputs) because that would correspond to post-selection on measurement outcomes at each tensor and an exponentially small probability of post-selection success for the overall circuit~\footnote{This may be acceptable in networks that aim to train {\em generative models}, which would be employed by running the networks backward and treating the measurements in the isometries as state preparation. However, such an approach cannot be directly employed with QKS.}. Instead, in our TTN we discard some of the output qubits for each tensor in the tree, so in some sense, our TTN is ``dissipative'', with the advantage that it does not require post-selection. This changes the connectivity of the network compared to the isometric case. However, it only translates to an effective increase in the bond dimension, which is why the contraction cost has asymptotic scaling proportional to $\chi^7$ instead of the usual $\chi^3$ for isometric TTN (see details on computational complexity in the supplementary material). \begin{figure}[!ht] \begin{center} \includegraphics[scale=0.75]{fig/fig3.eps} \end{center} \caption{Coherent post-processing of the quantum features with a tree tensor network (TTN), highlighted in \textcolor{lightblue}{blue}. The TTN is a variational quantum circuit consisting of unitary interactions between qubits. In the $\chi=2$ case, a unitary---represented by a uniquely parameterized tensor---operates on two qubits. Only one of the resulting qubits continues onto the next layer, while the other is measured and discarded (as depicted by the black squares). Furthermore, in a departure from the pure randomization approach of QKS, here we also consider optimizing the features (\textcolor{gold}{yellow}) as well as the TTN, using randomization only to initialize the QKS.} \label{fig:fig3} \end{figure} The $O(\chi^7)$ scaling can be understood by noting that the TTN can be contracted efficiently starting at the leaves. Episodes interact via $\chi^2\times\chi^2$ unitaries (two input indices and two output indices, which are then doubled because we consider density matrices), and a partial trace must be computed to discard half of the episodes at the output of the unitary. Overall this computation requires summing over four input indices, two output indices, and a final index for the degrees of freedom that are traced out, for a total of $7$ indices of dimension $\chi$. After the leaves are contracted with the first layer of unitaries, the TTN structure is recovered, so the next layer can be contracted with the same complexity. This does require a number of contractions that is linear in $E$, but parallelism allows for $O(\lg E)$ time contraction. A quantum computer may implement the contraction by decomposing the $\chi^2\times\chi^2$ unitary into qubit interactions. This can be done in depth $O(\chi^4)$ according to the Solovay-Kitaev theorem~\cite{NielsenChuang00}, pointing to a potential cubic speed-up over the classical implementation. The more meaningful (and subtle) comparison to other classical classification algorithms in terms of the scaling necessary to achieve a fixed target classification accuracy is not addressed here and remains an open problem. \textbf{Architecture Design \& Training}. In the proposed architectural design, each input datum $\mathbf{x}$ is translated into a product state of the form $ \hat{\rho}_{0}\left(\vec{x}_{n}\right) \otimes \hat{\rho}_{1}\left(\vec{x}_{n}\right) \otimes \hat{\rho}_{2}\left(\vec{x}_{n}\right) \otimes \cdots \otimes \hat{\rho}_{E}\left(\vec{x}_{n}\right) $ where $E$ is the number of episodes. If we average over all training data in a class $\ell$, we obtain the separable state $ \hat{\rho}^{\ell}=\frac{1}{N_{\ell}} \sum_{n=0}^{N_{\ell}} \hat{\rho}_{0}\left(\vec{x}_{n}^{\ell}\right) \otimes \hat{\rho}_{1}\left(\vec{x}_{n}^{\ell}\right) \otimes \hat{\rho}_{2}\left(\vec{x}_{n}^{\ell}\right) \otimes \cdots \otimes \hat{\rho}_{E}\left(\vec{x}_{n}^{\ell}\right) $ where $N_{\ell}$ is the number of training examples with class label $\ell$. In the following, we assume binary classification with $\ell \in \{0,1\}$ for simplicity. However, all the equations can be straightforwardly generalized for a multi-class setting. \textit{Tensor network optimization}. We may consider training the TTN by first fixing the QKS parameters. The unitary tensors are optimized by gradient descent methods applied to the training objective described below and in the supplementary material. Similar to~\citet{Huggins2019}, the unitaries are expressed as matrix exponentials of anti-hermitian matrices, while anti-hermiticity (and thus unitarity) is preserved by an appropriate choice of parameterization. We optimize the TTN by minimizing $\Pr(\text{error}) = 1 - \sum_{\ell\in L} \tr\hat{\rho}^\ell \hat{M}_\ell/|L|$, the probability of single-shot classification error in the training set, where $L$ is the set of labels and the $\hat{M}_\ell$ are the elements of a positive operator-valued measure (POVM). The $\hat{M}_\ell$ are implicitly defined by the TN and fixed projectors at the root of the tree TN, i.e., $\hat{M}_\ell = \mathcal{E}^\dagger(\ketbra{\ell}{\ell})$ for some completely-positive trace-preserving (CPTP) map $\mathcal{E}$ corresponding to the state evolution in the dissipative TN. For simplicity, we use the same objective for single-shot and multi-shot classification\footnote{The performance in the multi-shot case is not determined directly by the objective, but rather by numerically training and testing a linear classifier on the observed outcomes frequencies for multiple experimental shots on each training/testing example (i.e., 2 dimensional real-valued vectors)}, although this approach requires modification for multi-class multi-shot classification. Due to the normalization condition on the POVM, binary classification yields $\Pr(\text{error}) = \frac{1}{2}-\frac{1}{2}\tr\left[(\hat{\rho}_0-\hat{\rho}_1)\mathcal{E}^\dagger(\ketbra{0}{0})\right]$, so that we may take the maximization of $f=\tr\left[(\hat{\rho}_0-\hat{\rho}_1)\mathcal{E}^\dagger(\ketbra{0}{0})\right]$ as our objective instead (with similar expressions for the multi-class case). It is inconvenient to manipulate $\hat{\rho}_\ell$ directly, as these matrices have size that is exponential in $E$, but $c_i(\bar{\ell},\ell)=\tr\left[\hat\rho_i^{\bar{\ell}}\mathcal{E}^\dagger(\ketbra{\ell}{\ell})\right]$ can be computed by contracting the TN for any given training example $\hat{\rho}_i^{\bar{\ell}}$, which can be done on a classical computer by only manipulating tensors of a fixed dimension in $O(\chi)$ for $(\chi\ge|L|)$. On a quantum computer, $c_i(\bar{\ell},\ell)$ may be estimated by running the state preparation and TN as circuits, and estimating the probability of obtaining outcome $\ell$ at the final measurement for state preparation $\hat{\rho}_i^{\bar{\ell}}$. Computing the objective function requires contracting the entire TN for each training example, which is excessive in some cases. This can be replaced by considering only random mini-batches of the training set using stochastic variants of gradient descent (see supplementary material). Suppose we choose to optimize each tensor sequentially. In that case, we may partially contract the TN so that at each step of the optimization, the objective reduces to the product of three small matrices---the unitary tensor, its conjugate, and a non-unitary tensor corresponding to the partial contraction of the remainder of the network which remains fixed---in a manner that is reminiscent of {\em quantum combs}~\cite{Chiribella08}. However, we find that globally updating all tensors leads to faster convergence and lower computational overhead. \textit{Feature optimization}. In a departure from the pure randomization approach of kitchen sinks, here we also consider optimizing the features as well as the tensor network, using randomization only to initialize the QKS. This additional optimization step can also be approached with gradient descent. For an episode $e \in [E]$, we computed the gradient with respect to the global objective $f$ of the ansatz, $ \nabla f=\left[\frac{\partial f}{\partial \omega_{1}}, \frac{\partial f}{\partial \omega_{2}}, \cdots \frac{\partial f}{\partial \omega_{d}}, \frac{\partial f}{\partial \beta}\right], $ with respect to parameters $\{\mathbf{\Omega}_{e}, \boldsymbol\beta_{e}\}$ of the driving angles $\mathbf{\theta}_{e}(\mathbf{x})=\mathbf{\Omega}_{e} \mathbf{x}+\boldsymbol\beta_{e}$. For a batch of examples indexed by $n \in [N_{\ell}]$, the gradient elements are $ \frac{\partial f}{\partial \omega_{i}}=\frac{1}{N_{\ell}} \sum_{n}\left(\frac{\partial \rho_{n}}{\partial \theta_{n}} x_{n, i}\right) \cdot V_{n}$ and $\frac{\partial f}{\partial \beta}=\frac{1}{N_{\ell}} \sum_{n} \frac{\partial \rho_{n}}{\partial \theta_{n}} \cdot V_{n}$ where $\rho_n$ represents the density matrix of the feature being optimized and ``$\cdot~V_n$'' represents contraction with the remainder of the tensor network. With the gradient in hand, we sequentially optimize each feature with respect to the global objective. Additional architecture and training details can be found in the supplementary material. \textbf{Reducing model complexity via translational symmetry and sparsity constraints}. We explore two approaches for reducing model complexity by imposing (\textit{i}) translational symmetry on the TTN (see motivation in the supplementary material) and (\textit{ii}) sparsity on the features. Translational symmetry is enforced by requiring all tensors at a fixed distance from the root to be identical, while sparsity is enforced by setting a fixed (random) fraction of $\boldsymbol\Omega_{e}$ to 0. These constraints reduce the size of the hypothesis space and could offer better generalization, that is, up to a point where the model begins to underfit. Parsimonious models with minimal complexity could exist right before this transition. \begin{figure}[h!] \begin{center} \includegraphics[scale=0.75]{fig/fig4.eps} \end{center} \caption{Translational symmetry and sparsity may be imposed on the TN and features, respectively, thereby reducing the number of parameters in the model. Note the sparser connectivity between data features and quantum rotations (\textcolor{teal}{teal}), and the layer-wise symmetry of the tensors (\textcolor{orange}{orange}).} \label{fig:fig4} \end{figure} \section{Results} \label{sec:results} In this section, we present the results of several numerical experiments on QKS, QKS+TTN, and QKS+TTN+FO. We begin by establishing the baseline performance of QKS. Next, we combine QKS with a TTN---we found that simply combining QKS and TTN did not improve classification performance over the QKS baseline. However, the addition of feature optimization greatly improved the performance and yielded state-of-the-art quantum circuits for image classification. Finally, for the most complex networks, we show that imposing translational symmetry and sparsity constraints led to dramatic reductions in the number of free parameters while having minimal impact on classification performance. \textbf{Baseline performance of quantum kitchen sinks}. The original QKS design, as described by~\citet{Wilson2018}, was intentionally limited to circuits where the quantum encoding of the data features was fed into a linear post-processing layer. For the (3,5)-MNIST dataset, error rates between 3.3\% and 3.7\% were reported in the experiments that ran on noisy quantum hardware. Results obtained on a noiseless simulator showed error rates at or slightly below 2\%. While these error rates showed a lift over a linear SVMs (which was the stated intent of the original QKS research), the performance has been inferior compared to the state of the art for (3,5)-MNIST classification. \begin{figure}[h!] \begin{center} \includegraphics[width=\textwidth]{fig/fig5.eps} \end{center} \caption{(a) Classification error rates for (\textit{3,5})-MNIST over 100 realizations (median and 68\% credibility intervals) as a function of the number of QKS episodes. Allowing multiple shots at the one-qubit ansatz (multi-shot) led to better performance compared to single shots at the same circuits (single-shot). (b) Restricting the training dataset to a fraction of its original size ($f=0.1$ and $f=0.01$) limited the achievable classification performance, and (c) the observed dependency on the size of the training dataset was fit to a power law $y(f)=a~f^b$ where $(a,b) = (0.72 \pm 0.18, -0.48 \pm 0.06)$, consistent with the $O(1/\sqrt{N})$ predictions from theoretical results~\cite{Rahimi07,Rahimi08a,Rahimi08b}.} \label{fig:fig5} \end{figure} We established a more competitive baseline than the one given in the original QKS proposal by allowing multiple runs, or \textit{shots}, of the quantum circuits. Compared to the single-shot approach, the multi-shot approach offered better error rate scaling as a function of the number of episodes (\hyperref[fig:fig5]{Fig. 5a}). At the largest number of episodes we tested ($E=10,000$), the single-shot approach yielded a test error of $1.87 \pm 0.28$ (mean $\pm$ std.) on \textit{(3,5)-MNIST}, which was consistent previous observations~\cite{Wilson2018}, while the multi-shot approach offered a significant reduction to $0.78 \pm 0.12$. Further, using the multi-shot approach, we validated the scaling of the error rate as a function of the training set size ($N$) against theoretical guarantees for random feature algorithms. We simulated smaller datasets by taking fractions of the training dataset ($f=0.1$ and $f=0.01$), which shifted the noise floor higher at a large number of episodes \hyperref[fig:fig5]{Fig. 5b}. The empirical dependence of the noise floor on the size of the training set was fitted to a power law $y(f)=a~f^b$ via a non-linear least-squares procedure yielding $(a,b) = (0.72 \pm 0.18, -0.48 \pm 0.06)$,\footnote{The uncertainties are computed by linearization of the fit model in the neighborhood of the least-squares optimum, and correspond to a single standard deviation along each coordinate.} which is consistent with the $O(1/\sqrt{N})$ predictions from theoretical results~\cite{Rahimi07,Rahimi08a,Rahimi08b}. For the full dataset ($f=1$) and a large number of episodes ($E \rightarrow \infty$), the power law predicts an error of $a = 0.72 \pm 0.18$, which is consistent with the error $0.78 \pm 0.12$ observed at $E=10,000$. Note that the one-qubit QKS ansatz corresponds to the random Fourier features of RKS; therefore, we treat QKS+SVM as the classical model and explore improvements against this baseline via coherent post-processing. \textbf{Coherent processing of non-local features yields compact networks}. From the results presented in the preceding section, it was clear that many episodes were required for QKS to perform well. Up to this point, the quantum states generated by QKS have been measured and classically post-processed with a linear SVM, and the parameters of QKS have been randomly drawn/fixed. Here we have replaced classical post-processing with quantum post-processing---QKS+SVM to QKS+TTN---and optimized the features along with the tensor network (\textit{feature optimization}), using randomization only to initialize QKS. We found that QKS+TTN yielded no qualitative improvements, however, the addition of feature optimization (QKS+TTN+FO) greatly boosted performance. \begin{figure}[h!] \begin{center} \includegraphics[width=\textwidth]{fig/fig6.eps} \end{center} \caption{(a) Performance of combined protocols on (\textit{3,5})-MNIST over 10 realizations (median and 68\% credibility intervals) as a function of the number of episodes or qubits. It shows that feature-optimized networks offer a significant performance lift over the QKS baseline while admitting a $20 \times$ reduction in qubit utilization. (b) Imposing translational symmetry constraints on the TN degrades the performance of $\chi=2$ networks but enhances the performance of $\chi=4$ networks.} \label{fig:fig6} \end{figure} We compared the performance of networks {\em without} feature optimization (QKS+TN) against those {\em with} feature optimization (QKS+TTN+FO). Networks without feature optimization (QKS+TTN) closely tracked the QKS+SVM baseline while networks with feature optimization (QKS+TTN+FO) outperformed QKS+SVM, while admitting a $20 \times$ reduction in the number of episodes/qubits (\hyperref[fig:fig6]{Fig. 6a}). Therefore, it appears that \textit{feature optimization is necessary for the formation of compact networks}. \textbf{Reducing model complexity: translational symmetry and sparsity constraints}. In the previous section, we showed that QKS+TTN+FO achieved better performance than QKS+SVM at a fraction of the number of episodes. We also reduced the model complexity, as measured by the number of free (trainable) parameters by imposing translational symmetry on the TTN and sparsity on the features. Translational symmetry constraints on the TTN had varying effects, depending on the bond dimension of the network (\hyperref[fig:fig6]{Fig. 6b}). While performance was degraded in the $\chi=2$ networks, the performance of $\chi=4$ networks was equivalent or slightly better (\hyperref[table:model-reduction]{Table 1})---while admitting an exponential reduction in the number of parameters in the TTN. \begin{table}[h!] \caption{Translational symmetry and sparsity constraints were imposed on most complex model we tested, a QKS+TTN+FO network with 512 qubits and $\chi=4$. Imposing these constraints yielded dramatic reductions in the number of free parameters. A combination of translational symmetry and 50\% sparsity constraints yielded the best test errors (mean $\pm$ std.) on (\textit{3,5})-MNIST, in \textbf{bold}.} \centering \begin{tabular}{ccc} \\ \hline \rule{0pt}{2ex}Model Reduction & Free Parameters (Fixed) & Test Error (\%)\\ \hline \rule{0pt}{2ex}No symmetry or sparsity & $466,945$ & $0.44 \pm 0.13$\\ Transl. Symmetry & $403,960$ & $0.35 \pm 0.07$\\ Transl. Symmetry + Sparsity (50\%) & $203,256$ & $\mathbf{0.35 \pm 0.04}$\\ Transl. Symmetry + Sparsity (25\%) & $102,904$ & $0.37 \pm 0.07$\\ Transl. Symmetry + Sparsity (13\%) & $52,728$ & $0.45 \pm 0.08$\\ Transl. Symmetry + Sparsity (6\%) & $27,640$ & $0.65 \pm 0.18$\\ \hline \end{tabular} \label{table:model-reduction} \end{table} Further, in the most complex model we tested, a QKS+TTN+FO network with with 512 episodes and $\chi=4$, imposing both translational symmetry on the TTN and \textit{sparsity} in the features revealed a regime of parsimonious models (\hyperref[table:model-reduction]{Table 1}). The design of the best performing model on (\textit{3,5})-MNIST had a combination of constraints: a TTN with translational symmetry and features with 50\% sparsity. Note, however, that all parsimonious models had test errors below the QKS baseline. \textbf{State-of-the-art quantum circuits for image classification}. We performed benchmarks on a broader set of classification tasks. On the hardest MNIST binary classification tasks, the non-local feature maps of QKS led to significantly lower test errors compared to local feature maps, when the pre-processing and architecture/training configurations were matched (Table~\ref{table:state-of-the-art}). The best performing QKS+TTN+FO model (with translational symmetry) from the previous section led to the lowest test errors we observed. These performance gains motivated a larger investigation into the full MNIST dataset and the general multi-class problem. \begin{table}[h!] \caption{Non-local feature maps led to state-of-the-art quantum circuits for image classification, as demonstrated on the hardest MNIST binary classification problems. We ran a \textit{control} experiment that matched the pre-processing and configuration (64 qubits, $\chi=2$, a mini-batch size of 222, and 30 epochs) of a similar quantum tensor network approach employing local feature maps~\cite{Huggins2019}. Additionally, we tested the \textit{best} performing QKS+TTN+FO model (with translational symmetry), which involved no pre-processing and the following configuration: 512 qubits, $\chi=4$, a mini-batch size of 32, and 40 epochs.} \centering \begin{tabular}{P{0.4\linewidth}P{0.07\linewidth}P{0.07\linewidth}P{0.07\linewidth}P{0.07\linewidth}P{0.07\linewidth}} \\ \hline \rule{0pt}{2ex}Quantum Encoding & 3 vs. 5 & 4 vs. 9 & 7 vs. 9 & 3 vs. 9 & 2 vs. 7\\ \hline \rule{0pt}{2ex}Local feature maps~\cite{Huggins2019} & $12.4\%$ & $12.0\%$ & $10.7\%$ & $5.9\%$ & $4.3\%$ \\ Non-local feature maps (control) & $5.1\%$ & $4.2\%$ & $4.5\%$ & $2.5\%$ & $2.2\%$ \\ Non-local feature maps (best) & $0.4\%$ & $1.1\%$ & $0.8\%$ & $0.8\%$ & $0.9\%$\\ \hline \end{tabular} \label{table:state-of-the-art} \end{table} For the multi-class classification of MNIST, we established a baseline with QKS+SVM by constructing a multi-class model composed of one-versus-one (OvO) binary classifiers: at 10,000 episodes, the multi-class test error was 2.31\%. In comparison, the QKS+TTN+FO with translational symmetry, trained as OvO binary classifiers, yielded a multi-class test error of 1.16\% at 512 episodes/qubits. Taken together with the complete set of classification error rates (see supplemental material), the QKS+TTN+FO offers state-of-the-art quantum circuits for image classification. \section{Discussion} \label{sec:discussion} We have demonstrated that combining the non-local feature maps (of QKS) with coherent processing (using a TN) and feature optimization can yield significant improvements in classification error rates over the original QKS proposal, leading to state-of-the-art quantum circuits for image classification with a qubit count and circuit depth that are within reach of near-term devices. While these gains do not translate to meaningful quantum advantage (the TTN can be efficiently contracted in a classical computer), they illustrate that variational optimization of quantum circuits can perform non-trivial tasks on relatively small devices. The tree structure of the TN considered here has also been argued by~\citet{Kim2017} to have favorable noise-resilience properties, which makes them promising candidates for interesting near-term demonstrations. We did not take into consideration the impact of connectivity constraints of any experimental realization of a quantum computer. It should be noted, however, that connectivity constrains impose at most a linear depth overhead as a function of width~\cite{Bhattacharjee2017}, but more detailed (and likely architecture-dependent) analysis would be needed to pin down concrete overhead numbers. Although the classification error rates reported here are state-of-the-art for quantum circuits~\cite{Grant2018,Huggins2019,Farhi2018,Chen2021,Hur2022} and competitive against quantum-inspired tensor networks~\cite{Stoudenmire2016,Stoudenmire2018,Liu2019}, they are comparable to classical methods~\cite{LeCun1998}. Thus, modules of the ansatz would likely benefit from further exploration of different feature maps~\citep{Simard2003} and other tensor network structures~\cite{Orus2019}. The upshot would be a class of efficient unitary networks that could fundamamentally resolve the exploding/vanishing gradient problem in deep neural networks and the detection of long-term dependencies in dynamical systems~\cite{Arjovsky2016,Jing2017}. \begin{ack} NXK acknowledges support from a Microsoft Quantum Internship, where this collaboration started. AB and MPS thank Martin Roetteler, Dave Wecker, and Stephen Jordan for fruitful discussions and encouragement. \end{ack} \bibliographystyle{unsrtnat}
1,108,101,565,845
arxiv
\section{Conclusion} \label{conclusion} There are still many difficulties for large-vocabulary speech recognition under adverse conditions, but the fusion of acoustic and visual information can bring a significant benefit to these challenging and interesting tasks. In this paper, we propose a new architecture, the decision fusion net (DFN), in which we consider state posteriors of acoustic and visual models as appropriate stream representations for fusion. These are combined by the DFN, which uses stream reliability indicators to estimate the optimal state-posteriors for hybrid speech recognition. It comes in two flavors, a BLSTM-DFN with optimal performance, as well as an LSTM-DFN, which provides the option of real-time decoding. We compare the performance of our proposed model to early integration as well as to conventional dynamic stream weighting models. In experimental results on noisy as well as on reverberant data, our proposed model shows significant improvements, with the BLSTM version giving a relative word-error-rate reduction of 42.18\% over audio-only recognition, and outperforming all baseline models. The hybrid architecture with the proposed DFN clearly outperforms the end-to-end WLAS model, which we attribute to its factorization of stream evaluation, stream integration, and subsequent, language-model-supported, search. It is worth mentioning that, on average, the hybrid DFN model is even superior to a hybrid model with \emph{oracle} stream weighting, which is an interesting result on its own, given that the latter provides a theoretical upper bound for instantaneous stream weighting approaches. The natural next goal of our work is to focus on end-to-end audio-visual speech recognition models. Here, we are specifically interested in investigating reliability-supported fusion within the attention mechanism in CTC and transformer systems and in the possibilities that come with probabilistic intermediate representations for these architectures. \section{Experimental Setup} \label{setup} \subsection{Dataset} \label{dataset} The Oxford-BBC Lip Reading Sentences (LRS2) corpus is used for all experiments. It contains more than 144k sentences from British television. Table~\ref{table:dataset} gives an overview of the dataset size and partitioning. The pre-train set is usually used in AVSR tasks for video or audio-visual model pretraining. In this work, we combine the pre-train and training set to train all acoustic, visual, and AV models. \begin{table}[htbp] \centering \caption{Size of subsets within the LRS2 Corpus} \label{table:dataset} \setlength\tabcolsep{2.0pt} \centering \begin{tabular}{|c|c|c|c|} \hline Subset & Utterances & Vocabulary &\begin{tabular}[c]{@{}l@{}}Duration\\ {[}hh:mm{]}\end{tabular} \\ \hline pre-train set & 96,000 & 41,000 &196:25\\ training set & 45,839 & 17,660 &28:33\\ validation set & 1,082 & 1,984 &00:40\\ test set & 1,243 & 1,698 & 00:35 \\ \hline \end{tabular} \end{table} For the AVSR task, the results are often dominated by the acoustic model. To analyze the performance in different noisy environments and to counter the audio-visual model imbalance, we add acoustic noise to the LRS2 database. The ambient subset of the MUSAN corpus~\cite{musan2015} is used as the noise source. It contains noises such as wind, footsteps, paper rustling, rain, as well as indistinct crowd noises. Seven different SNRs are selected randomly, from -9 dB to 9 dB in steps of 3 dB. We also generated data for a far-field AVSR scenario. As the LRS2 database does not contain highly reverberant data, we artificially reverberate the acoustic data by convolutions with measured impulse responses. These impulse responses also come from the MUSAN corpus. Both types of augmentation use Kaldi's Voxceleb example recipe. \subsection{Feature extraction} \label{features} The audio model uses 40 log Mel features together with two pitch features ($f_0$, $\Delta f_0$) and the probability of voicing, yielding 43-dimensional feature vectors. The audio features are extracted with 25~ms frame size and 10~ms frameshift. The video features are extracted per frame, i.e., every 40~ms. The video appearance model (VA) uses 43-dimensional IDCT coefficients of the grayscale mouth region of interest (ROI) as features. The video shape model (VS) is based on the 34-dimensional non-rigid shape parameters described in~\cite{amos2016openface}. Since the audio and video features have different frame rates, Bresenham\textquotesingle s algorithm~\cite{sproull1982using} is used to align the video features before training the visual models. This algorithm gives the best first-order approximation for aligning audio and video frames given only a difference in frame rates. \subsection{Implementation details} \label{impdetails} All our experiments are based on the Kaldi toolkit~\cite{povey2011kaldi}. As mentioned in Section~\ref{dataset}, both pre-train and training sets are used together to train the acoustic and visual models. The initial HMM-GMM training follows the standard Kaldi AMI recipe, namely, monophone training followed by triphone training. A linear discriminant analysis (LDA) is applied to a stacked context of features to obtain discriminative short-term features. Finally, speaker adaptive training (SAT) is used to compensate for speaker variability. Each step produces a better forced alignment for later network training. HMM-DNN training uses the nnet2 p-norm network~\cite{zhang2014improving} recipe, which is efficiently parallelizable. Once HMM-DNN training has been performed, the acoustic model DNN and two visual observation models are available. They output estimated log-posteriors $\textrm{log }{p}(\textbf{s} |\textbf{\textrm{o}}_t^{i})$ for each stream, which are then used in our proposed DFN. Its input consists of all stream-wise state-posteriors ${p}(\textbf{s} |\textbf{\textrm{o}}_t^{i})$ and the reliability measures. As mentioned in Section~\ref{systemoverview}, the decoder obtains the best word sequence by graph search through a decoding graph using the estimated log-pseudo-posteriors $\textnormal{log }\widetilde{p}(\textbf{s} |\textbf{o}_t)$. To ensure that all experiments and modalities search through the same decoding graph, we share the phonetic decision tree between all single modalities. Thus, the number of states for each modality is identical, specifically 3,856. In addition, there are 41 reliability indicators, which leads to an overall input dimension of $(3 \times 3856 + 41) =$11,609. The first three hidden layers have 8,192, 4,096, and 1,024 units, respectively, each using the ReLU activation function, layer normalization (LN), and a dropout rate of 0.15. They are followed by 3 BLSTM layers with 1,024 memory cells for each direction, using tanh as the activation function. Finally, a fully connected (FC) layer projects the data to the output dimension of 3,856. A log-softmax function is applied to obtain the estimated log-posteriors. Early stopping is used to avoid overfitting. We check for early stopping every 7,900 iterations, using the validation set. The training process is stopped if the validation loss does not decrease for 23,700 iterations. Finally, the performance is evaluated on the test set. We performed two experiments with the proposed DFN strategy. The first uses the BLSTM-DFN, exactly as described above, while the second is an LSTM-DFN, replacing the BLSTM layers by purely feed-forward LSTMs. The learning rate is initialized to $5\times10^{-4}$ and reduced by 20\% whenever the validation loss does not decrease in early stopping checking. The batch size is set to 10. DNN training utilizes the PyTorch library~\cite{paszke2019pytorch} with the ADAM optimizer. \section{Introduction} \label{introduction} Audio-visual speech recognition (AVSR) is motivated by the natural ability of humans to integrate cross-modal information. When people are listening to speech in a noisy environment, they often unconsciously focus on the speaker\textquotesingle s lips, which is of great benefit to human listening and comprehension~\cite{crosse2016eye}. Even in clean speech, seeing the lips of the speaker influences perception, as demonstrated by the McGurk effect~\cite{mcgurk1976hearing}. It has been shown in many studies~\cite{Wand2017,meutzner2017improving, gurban2008dynamic} that machine AVSR systems can also successfully improve performance on small-vocabulary tasks, when compared to their audio-only speech recognition (ASR) counterparts with otherwise equivalent setups. However, large-vocabulary tasks are still difficult for lipreading, because many phoneme pairs correspond to identical visemes, which makes certain words virtually indistinguishable to a vision-only system, as for example "do" and "to". This problem also leads to an inherent difficulty of AVSR on large-vocabulary tasks~\cite{thangthai2018building, sterpu2020}, which is acerbated by the fact that many multi-stream fusion approaches perform badly, when the performance of the streams varies widely. In this work, we address this shortcoming by introducing a new stream fusion strategy that is impervious to such disparate single-stream recognition rates and can still benefit from low-quality streams in improving the results of highly reliable, clean audio data. To evaluate it in a realistic manner, we use a large-vocabulary dataset---the Lip Reading Sentences (LRS2) corpus~\cite{Afouras2018}---for all experiments, which we further augment by adding realistic noise and reverberation. An effective fusion strategy for AVSR is decision fusion, which combines the decisions of multiple classifiers into a common decision. Decision fusion comes in different forms, such as dynamic stream-weighting~\cite{stewart2013robust}, or state-based decision fusion (SBDF), e.g.~in \cite{Abdelaziz2015, potamianos2003recent, luettin2001asynchronous, nefian2002dynamic}. An alternative fusion approach is the idea of fusing \emph{representations} rather than decisions, e.g. via multi-modal attentions~\cite{Zhou2019}. Another example in this direction is that of gating, e.g.~in~\cite{Yu2020Overlapped} or in~\cite{arevalo2020gated}, where a newly designed \emph{Gated Multimodal Unit} is used for dynamically fusing feature streams within each cell of a network. In this work, we argue that the ideas of representation fusion and decision fusion can be unified in a different fashion, namely, by using the posterior probabilities $p(\textbf{s} |\textbf{o}_t^{\mathrm{i}})$ of $i = 1 \ldots M$ single-modality hybrid models as our representation of the uni-modal streams. This viewpoint opens up a range of new possibilities, centered around these single-modality representations. On the one hand, we can base the multi-modal model on pre-trained hybrid ASR models. On the other hand, we can learn recurrent and dynamic fusion networks, which can benefit from the reliability information that is inherent in the posterior probabilities, such as instantaneous entropy and dispersion~\cite{gurban2008dynamic}, as well as from temporal context. Overall, in the following, we compare our new approach with the performance of 4 baseline and oracle fusion strategies, which are detailed in Section~\ref{related}. The proposed fusion strategy is introduced in Section~\ref{systemoverview}. Section~\ref{reliabilitymeasures} describes the set of reliability measures that are employed in all of the dynamic fusion approaches. The experiments are presented in Section~\ref{setup}, while Section~\ref{results} introduces and analyzes the results. Finally, in Section~\ref{conclusion}, we discuss the lessons learned and give an outlook on future work. \section{introduction} \label{introduction} Audio-visual speech recognition (AVSR) is motivated by the natural ability of humans to integrate cross-modal information. When people are listening to speech in a noisy environment, they often unconsciously focus on the speaker\textquotesingle s lips, which is of great benefit to human listening and comprehension \cite{crosse2016eye}. It has been shown in many studies \cite{Wand2017,meutzner2017improving, gurban2008dynamic} that machine AVSR systems can also successfully improve performance on small-vocabulary tasks, when compared to their audio-only speech recognition (ASR) counterparts with otherwise equivalent setups. However, large-vocabulary tasks are still difficult for lip reading, because many phoneme pairs correspond to identical visemes, which makes certain words virtually indistinguishable to a vision-only system, as for example "do" and "to". This problem also leads to an inherent difficulty of AVSR on large-vocabulary tasks \cite{thangthai2018building, sterpu2020}, which is acerbated by the fact that many multi-stream fusion approaches perform badly, when the performance of the streams varies widely. In this work, we address this shortcoming by introducing a new stream fusion strategy that is impervious to such disparate single-stream recognition rates and can still benefit from low-quality streams in improving the results of highly reliable, clean audio data. To evaluate it in a realistic manner, we use a large-vocabulary dataset---the LRS2 corpus \cite{Afouras2018}---for all experiments, which we further augment by adding realistic noise and reverberation. AVSR fusion strategies can be divided into three groups: early integration, middle integration, and late integration. Early integration fuses audio and video information directly at the input of the system \cite{neti2000}. Middle integration, in contrast, fuses the audio and video information at an intermediate stage of the system. Separate encoders extract high-level audio and video information, this information is combined, and then decoded in a single decoder \cite{Yu2020Overlapped}. This method is often used in end-to-end (E2E) systems such as \cite{chung2017lip}. The final group is that of late integration, which is also called \emph{decision fusion}. Here, the decisions of multiple classifiers are fused, for example in the ROVER approach \cite{fiscus1997post}. Stream-weighting is an effective method to fuse different streams of information, which can be employed at all levels: at the feature level, in embedding spaces and at the decision level. It delivers a solution to the problem that the various streams may be reliable and informative in very different ways. Hence, a number of researchers have implemented the strategy of weighting different modalities \cite{gurban2008dynamic}. Many of them utilize static weights \cite{yang2005multimodal} in hybrid recognition setups, where audio and video speech recognition models are trained separately and the DNN state posteriors of all modalities are combined by constant weights according to \begin{equation} \label{stateequalfusion} \textnormal{log }\widetilde{p}(\textbf{s} |\textbf{o}_t)=\sum_{i}^{}\lambda^i\cdot \textrm{log }{p}(\textbf{s} |\textbf{o}_t^{i}). \end{equation} Here, $\textrm{log }{p}(\textbf{s} |\textbf{\textrm{o}}_t^{i})$ is the log-posterior in stream $i$ and $\textnormal{log}\,\widetilde{p}(\textbf{s} |\textbf{\textrm{o}}_t)$ is its estimated combined log-posterior. The problem of weight determination, however, cannot be neglected \cite{kankanhalli2006experiential}. It is clear that in good lighting conditions, the visual information may be more useful, while audio information is most beneficial in frames with a sufficiently high SNR. Therefore, the weight should be dynamically estimated to obtain optimal fusion results. As a baseline approach, we therefore consider \emph{dynamic} stream weighting, which is often used as the fusion strategy in AVSR research, due to its high effectiveness and its ability to cope with large performance differences of the single constituent streams. In dynamic stream-weighting-based AVSR \cite{Abdelaziz2015}, the DNN state posteriors of all modalities are combined by the estimated weights according to \begin{equation} \label{statefusion} \textnormal{log }\widetilde{p}(\textbf{s} |\textbf{o}_t)=\sum_{i}^{}\lambda_t^i\cdot \textrm{log }{p}(\textbf{s} |\textbf{o}_t^{i}), \end{equation} where the stream weights $\lambda_t^i$ are typically predicted by appropriate reliability measures. An alternative to such fusion approaches is the idea of intermediate integration, fusing \emph{representations} rather than decisions, e.g. via multi-modal attentions \cite{Zhou2019}. Another example that proceeds in this direction is \cite{arevalo2020gated}, where a newly designed \emph{Gated Multimodal Unit} (GMU) is used for dynamically fusing feature streams within each cell of a network. {\color{green!55!blue}Alternatively, \cite{zhang2019robust} suggests the combination of deep feed-forward sequential memory networks (DFSMN) and decision fusion approaches ROVER \cite{fiscus1997post}. However, this model can not improve the recognition rates for a high speech quality at Signal-to-Noise Ratios (SNRs) above 5dB, when the model is trained with noisy acoustic signal.} {\color{red}Alternatively, \cite{zhang2019robust} suggests the use of deep feedforward sequential memory networks (DFSMN) to firstly create and secondly fuse audio and video representations. However, this model can not improve the recognition rates for a high speech quality at Signal-to-Noise Ratios (SNRs) above 5dB.} In this work, we argue that the ideas of representation fusion and decision fusion can be unified in a different fashion, namely, by considering the posterior probabilities $p(\textbf{s} |\textbf{o}_t^{\mathrm{i}})$ of all $i = 1 \ldots M$ single-modality networks as our representation of the uni-modal streams. This viewpoint opens up a range of new possibilities, centered around these single-modality representations. On the one hand, we can utilize the model with pre-trained hybrid ASR models, which can be learned from and adapted to comparatively little data, and which can easily integrate with task-specifc WFST language models \cite{povey2011kaldi}. On the other hand, we can learn recurrent and dynamic fusion networks, which can benefit from the reliability information that is inherent in the posterior probabilities, such as instantaneous entropy and dispersion \cite{gurban2008dynamic}, as well as from temporal context. Overall, in the following, we compare this new approach in two variants with the performance of 4 baseline and oracle fusion strategies, which are introduced in Section \ref{systemoverview}. Section \ref{reliabilitymeasures} describes the set of reliability measures that are employed in all of the dynamic fusion approaches. The experimental setup is shown in Section \ref{setup}, while Section \ref{results} introduces and analyzes the results. Finally, in Section \ref{conclusion}, we discuss the lessons learned and give an outlook on future work. \section{Related work} \label{related} There are many different fusion strategies in AVSR research. In this section, we give a brief introduction to the fusion strategies that are considered as baseline models in this work. In these baselines as well as in our own model, M = 3 single-modality models are combined, one acoustic and two visual, where $\textbf{\textrm{o}}_t^\mathrm{A}$ are our audio features, and $\textbf{\textrm{o}}_t^\mathrm{VS}$ and $\textbf{\textrm{o}}_t^\mathrm{VA}$ are shape-based and appearance-based video features; see Section~\ref{features} for details. \subsection{Early integration} Early integration simply fuses the audio and visual information at the level of the input features via \begin{equation} \label{eq:concat} \textbf{\textrm{o}}_t=[(\textbf{\textrm{o}}_t^\mathrm{A})^T,(\textbf{\textrm{o}}_t^\mathrm{VS})^T,(\textbf{\textrm{o}}_t^\mathrm{VA})^T]^T. \end{equation} Superscript $T$ denotes the transpose. \subsection{Dynamic stream weighting} \label{DSW} Stream weighting is an effective method to fuse different streams. It is a solution to the problem that the various streams may be reliable and informative in very different ways. Hence, a number of works employ the strategy of weighting different modalities~\cite{gurban2008dynamic,heckmann2002noise,nefian2002dynamic}. Many utilize static weights; for example in~\cite{yang2005multimodal}, audio and video speech recognition models are trained separately and the DNN state posteriors of all modalities are combined by constant stream weights $\lambda^i$ according to \begin{equation} \label{stateequalfusion} \textnormal{log }\widetilde{p}(s |\textbf{o}_t)=\sum_{i}^{}\lambda^i\cdot \textrm{log }{p}(s |\textbf{o}_t^{i}). \end{equation} Here, $\textrm{log }{p}(s |\textbf{\textrm{o}}_t^{i})$ is the log-posterior of state $s$ in stream $i$ at time $t$ and $\textnormal{log}\,\widetilde{p}(s |\textbf{\textrm{o}}_t)$ is its estimated combined log-posterior. The problem of weight determination, however, cannot be neglected~\cite{kankanhalli2006experiential}. It is clear that in good lighting conditions, the visual information may be more useful, while audio information is most beneficial in frames with a sufficiently high SNR. Therefore, the weight should be dynamically estimated to obtain optimal fusion results. As a baseline approach, we therefore consider \emph{dynamic} stream weighting, which implements this idea. Specifically, we use dynamic stream weighting as described in~\cite{yu2020multimodal} as the baseline. Here, the DNN state posteriors of all modalities are combined by estimated dynamic weights according to \begin{equation} \label{statefusion} \textnormal{log }\widetilde{p}(s |\textbf{o}_t)=\sum_{i}^{}\lambda_t^i\cdot \textrm{log }{p}(s |\textbf{o}_t^{i}). \end{equation} The stream weights $\lambda_t^i$ are estimated by a feedforward network from a set of reliability measures, introduced in detail in Sec.~\ref{reliabilitymeasures}. Reliability information has proven beneficial for multi-modal integration in many studies~\cite{meutzner2017improving, gurban2008dynamic, hermansky2013multistream}, where it is used to inform the integration model about the degree of uncertainty in all information streams over time. In~\cite{yu2020multimodal}, the authors also consider different criteria to train the integration model. In this paper, we use two of them as our baselines, namely the mean squared error (MSE) and the cross-entropy (CE). This learning-based approach to weighted stream integration can effectively and significantly improve the recognition rates in lower SNR conditions. Also, in contrast to many other stream integration strategies, such as~\cite{seymour2005new,stewart2013robust,receveur2016turbo}, it does not suffer from a loss of performance relative to the best single modality when the modalities differ widely in their performance, but it rather gains accuracy even from the inclusion of less informative streams. This is a feature of great importance for the case at hand, as we need to design a system that will even allow for the inclusion of the visual modality under clean conditions, where audio is far more informative than video data, without loosing---or, ideally, even still gaining---performance. \subsection{Oracle weighting} \label{sec:OW} We also compute optimal, or \emph{oracle} stream weights, as described in~\cite{yu2020multimodal}. These optimal dynamic stream weights are computed in such a way as to minimize the cross-entropy with respect to the ground-truth forced alignment information. Since a known text transcription of the test set is therefore needed in this method, it is only useful to obtain a theoretical upper performance bound for standard stream-weighting approaches. To minimize the cross-entropy, we use convex optimization via CVX~\cite{cvx}. The obtained oracle stream weights $\lambda_t^i$ are then used to calculate the estimated log-posterior through Equation~\eqref{statefusion}. As oracle stream weights yield the minimum cross-entropy between the fused posteriors and the ground-truth one-hot posteriors of the reference transcription computed by forced alignment, the corresponding results can be considered as the best achievable word error rate (WER) of a stream-weighting-based hybrid recognition system. \subsection{End-to-end model} In recent years, end-to-end speech recognition has quickly gained widespread popularity. The end-to-end model predicts character sequences directly from the audio signal. Comparing the end-to-end model and the hybrid speech recognition model, the end-to-end model has a lower complexity and is more easily amenable to multi-lingual ASR. But there are also some advantages to using a hybrid model. For example, the hybrid model can be learned from and adapted to comparatively little data and it can easily integrate with task-specific WFST language models~\cite{povey2011kaldi}. Importantly for this work, hybrid models allow for integration at the level of the pseudo-posteriors, which is a place for interpretable stream integration. Hence, in this work, we use the hybrid approach to train the single modality models. To compare the performance of our proposed system to that of end-to-end AVSR, we consider the end-to-end ``Watch, Listen, Attend and Spell'' model (WLAS)~\cite{Chung17} as a baseline. In this model, the audio and video encoders are LSTM networks. The decoder is an LSTM transducer, which fuses the encoded audio and video sequences through a dual attention mechanism. \section{Reliability measures} \label{reliabilitymeasures} To support the estimation of the dynamic stream weights, we extract a range of model-based and signal-based reliability measures (see Tab.~\ref{table:RMS}), generally computed as in~\cite{yu2020multimodal}. All of these reliability indicators are used in the dynamic stream weighting baseline as well as in our proposed model. \begin{table}[htbp] \centering \caption{Overview of reliability measures} \label{table:RMS} \footnotesize \setlength\tabcolsep{2.0pt} \renewcommand{1.3}{1.2} \begin{tabular}{|c|c|l|}\hline \multirow{3}{*}{Model-based }& \multicolumn{2}{c|}{\multirow{2}{*}{Signal-based }}\\& \multicolumn{2}{c|}{}\\ \cline{2-3} & Audio-based & \multicolumn{1}{c|}{Video-based } \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[c]{@{}l@{}}~~\llap{\textbullet}~~ Entropy\\ ~~\llap{\textbullet}~~ Dispersion \\ ~~\llap{\textbullet}~~ Posterior difference\\ ~~\llap{\textbullet}~~ Temporal divergence\\ ~~\llap{\textbullet}~~ Entropy and \\ \ \ \ dispersion ratio\end{tabular}} & \multicolumn{1}{l|}{\begin{tabular}[c]{@{}l@{}}~~\llap{\textbullet}~~ MFCC\\ ~~\llap{\textbullet}~~ $\Delta$MFCC\\ ~~\llap{\textbullet}~~ SNR\\ ~~\llap{\textbullet}~~ $f_0$ \\ ~~\llap{\textbullet}~~ $\Delta f_0$ \\ ~~\llap{\textbullet}~~ voicing probability \end{tabular}} & \begin{tabular}[c]{@{}l@{}}~~\llap{\textbullet}~~ Confidence\\ ~~\llap{\textbullet}~~ IDCT\\ ~~\llap{\textbullet}~~ Image \\ distortion\end{tabular} \\ \hline \end{tabular} \end{table} The model-based measures are entropy, dispersion, posterior difference, temporal divergence, entropy- and dispersion-ratio. All model-based measures are computed from the log-posterior outputs of their respective single-modality models, $\textnormal{log }{p}(\textbf{s} |\textbf{o}_t)$. Signal-based reliability measures for the audio data comprise the first 5 MFCC coefficients with their temporal derivatives $\Delta$MFCC, again as in~\cite{yu2020multimodal}. The SNR is strongly related to the intelligibility of an audio signal. However, due to the realistic, highly non-stationary environmental noises (discussed in Sec.~\ref{setup}) used in data augmentation and testing, conventional SNR estimation algorithms are not showing a robust performance. Instead, therefore, the deep learning approach DeepXi~\cite{nicolson2019deep} is used to estimate the frame-wise SNR. The pitch $f_0$ and its temporal derivative, $\Delta f_0$, are also used as reliability indicators. It has been shown that high pitch of a speech signal negatively affects the MFCC coefficients~\cite{dharanipragada2006robust}, due to insufficient smoothing of the pitch harmonics in the speech spectrum by the filterbank. The probability of voicing~\cite{ghahremani2014pitch} is used as an additional cue. It is computed from the Normalized Cross-Correlation Function (NCCF) values for each frame. OpenFace~\cite{amos2016openface} is used for face detection and facial landmark extraction. This allows us to use the confidence of the face detector in each frame as an indicator of the visual feature quality. The other signal-based video reliability measures, the Inverse Discrete Cosine Transform (IDCT), and the image distortion estimates, are the same as in~\cite{yu2020multimodal}. \section{Results} \label{results} In this section, we compare the performance of all baseline models and fusion strategies. Figure~\ref{fig:baseline} gives an overview of the results of the audio-only model and compares the results of all baselines and our proposed BLSTM-DFN. \vspace{-1mm} \begin{figure}[htbp] \centering \includegraphics[scale = 0.5]{Sections/tab4.pdf} \caption{WER (\%) on the test set of the LRS2 corpus. \label{fig:baseline} \end{figure} \vspace{-1mm} Comparing the audio-only model and the BLSTM-DFN integration, our fusion strategy is able to reduce the Word Error Rate (WER) for every SNR, even for clean acoustic data. For lower SNRs, the DFN can improve the absolute WER by over 10\%. Our proposed BLSTM-DFN is also capable of achieving better results in many cases than the--realistically unachievable---oracle weighting (OW), that is based on ground-truth transcription information of the test set and can be considered as the upper limit for the dynamic stream-weighting approach of Equation \eqref{statefusion}. The end-to-end WLAS model is not able to improve the recognition rates comparing to the audio-only model, which may in part be due to the fact that it does not employ an explicit language model. Table~\ref{table:results} lists all the results of our experiments under additive noise. As expected, the audio-only model (AO) has a much better performance than the video-appearance (VA) and video-shape (VS) models. The average WERs of the visual models are over 80\%, which illustrates that lipreading is still hard in large-vocabulary tasks. We have also employed the pre-trained spatio-temporal visual front-end from~\cite{stafylakis2017combining} to extract high-level visual features, without seeing improvements. We assume that this is due to insufficient training data as well as to insufficient generalization across speakers and recording conditions. \begin{table}[htbp] \centering \setlength\tabcolsep{1.75pt} \caption{ Word error rate (\%) on the LRS2 test set under additive noise. OW describes the realistically unachievable performance bound for dynamic, instantaneous stream weighting. \begin{tabular}{@{}cccccccccc@{}} \toprule SNR & -9 & -6 & -3 & 0 & 3 & 6 & 9 & clean & avg. \\ \midrule AO &48.96 &41.44 &33.07 &30.81 &22.85 &18.89 &16.49 &10.12 &27.83\\ VA &85.83 &87.00 &85.26 &88.10 &87.03 &88.44 &88.25 &88.10 &87.25\\ VS &88.11 &90.27 &87.29 &88.88 &85.88 &85.33 &88.58 &87.10 &87.68\\ \midrule EI &40.14 &32.47 &23.96 &26.59 &20.67 &16.68 &14.76 &10.02 &23.16\\ \midrule MSE &46.48 &37.79 &27.45 &27.47 &19.52 &16.58 &15.09 &9.42 &24.98\\ CE &45.79 &37.14 &26.32 &28.03 &19.40 &16.68 &14.76 &9.42 &24.65\\ OW &30.33 &26.47 &\textbf{15.41} &21.25 &\textbf{13.66} &11.66 &\textbf{10.45} &\textbf{7.54} &17.10\\ \midrule WLAS &64.74& 59.87& 49.03& 50.60& 41.17& 39.89& 43.72& 29.32 &47.29\\ \midrule LSTM-DFN &33.30 &27.22 &21.26 &21.25 &19.17&13.97 &15.84 &10.32 &20.29\\ BLSTM-DFN &\textbf{27.55} &\textbf{23.11} &17.89 &\textbf{16.35} &14.93 &\textbf{10.25} &10.78 &7.84 &\textbf{16.09}\\ \bottomrule \\ \end{tabular} \label{table:results} \end{table} \vspace{-2mm} Early integration (EI) can also improve the results, but the improvement is not as significant as that of the proposed DFN approach. Comparing the BLSTM-DFN and the LSTM-DFN, the former shows a notable advantage in accuracy, albeit at the price of non-real-time performance. Both the LSTM-DFN and the BLSTM-DFN use recurrent layers with 1024 memory cells. As the number of parameters in a BLSTM layer are almost double that of the LSTM layer, we also trained a BLSTM-DFN using 512 memory cells per layer. The average WER of this model is 16.14\%, still better than that of the LSTM-DFN with a similar number of parameters. If we increase the number of cells for the LSTM-DFN to 2048, with the same learning rate, the model suffers from convergence issues The dynamic stream weighting results, using the MSE or CE loss, are better than shown in~\cite{yu2020multimodal} for three reasons. Firstly, improved reliability measures are used in this work. Secondly, \cite{yu2020multimodal} trains acoustic and visual models only on the training set, whereas here, they are trained on both the pre-train and training data. This gives a significant performance boost to the single-modality systems and also to early integration, but is not of as much added benefit to the dynamic stream-weight estimation, though the weight estimator from~\cite{yu2020multimodal} was trained on the validation set, whereas here, it is also trained on the pre-train and training sets. We assume that its relatively small performance gain is due to the limited flexibility of the composition function in dynamic stream weighting. Comparing the average WER over all acoustic conditions, the proposed BLSTM-DFN is greatly beneficial, outperforming the not realistically achievable OW system, and surpassing all feasible stream integration approaches by a clear margin. Thus, our proposed method outperforms even optimal dynamic stream weighting and therefore provides a fundamentally superior architecture compared to instantaneous stream weighting. \begin{threeparttable}[] \centering \caption{Far-field AVSR WER (\%) on LRS2. \label{table:reverbresults} \footnotesize \setlength\tabcolsep{4.5pt} \begin{tabular*}{\linewidth}{ccccccccc} \toprule & AO & EI & MSE & CE & OW &WLAS & \begin{tabular}[c]{@{}c@{}}\small LSTM-\\\small DFN\end{tabular} & \begin{tabular}[c]{@{}c@{}}\small BLSTM-\\\small DFN\end{tabular}\\ \midrule &23.61 &19.15 & 19.54 & 19.44 &\textbf{12.70} & 44.24 &15.67 &15.28 \\ \bottomrule \\ \end{tabular*} \end{threeparttable} We also checked the case of far-field AVSR by using data augmentation to produce artificially reverberated speech, see Table~\ref{table:reverbresults} for the results. The BLSTM-DFN still outperforms the other fusion strategies, but it is not as close to the OW. We suspect the reason is an insufficient amount of reverberant acoustic training signals. Overall, it can be concluded that the introduced DFN is generally superior to instantaneous dynamic stream weighting. The latter can be considered as fusion at the \emph{frame level}. Frame-by-frame, it sums log-posteriors of each stream in a weighted fashion. Hence, its estimated combined log-posterior is a linear transformation of the single-modality log-posteriors. In contrast, the DFN can be considered as a cross-temporal fusion strategy at the \emph{state level}, as the combined log-posterior is estimated through a non-linear transformation with memory. This allows for a more accurate estimation, in which the BLSTM-DFN gives an added advantage to the LSTM-DFN, since it has access to both past and future contextual information. In this work, the BLSTM-DFN shows a relative WER reduction of 42.18\% compared to the audio-only system, while the LSTM-DFN yields a relative WER improvement of 27.09\%, showing the benefit of being able to lipread even for noisy LVCSR. \section{System overview} \label{systemoverview} In the following, we propose an architecture that centers around a decision fusion net (DFN), which learns to combine all modalities dynamically. As shown in Fig.~\ref{fig:DNN}, it bases on the state posteriors of each modality, derived from one hybrid recognition model per stream, which we consider as our representation of instantaneous feature inputs. In addition, we provide the DFN with multiple reliability indicators as auxiliary inputs, which help in estimating the multi-modal log-posteriors $\textnormal{log }\widetilde{p}(\textbf{s} |\textbf{o}_t)$ for the decoder. As mentioned above, we consider $M = 3$ single-modality models, one acoustic and two visual. The fused posterior $\textnormal{log }\widetilde{p}(\textbf{s} |\textbf{o}_t)$ is computed via \begin{multline} \label{eq:concatprob} \textnormal{log }\widetilde{p}(\textbf{s} |\textbf{o}_t)=\textrm{DFN}([{p}(\textbf{s} |\textbf{o}_t^{\mathrm{A}})^T,{p}(\textbf{s} |\textbf{o}_t^{\mathrm{VA}})^T, \\ {p}(\textbf{s} |\textbf{o}_t^{\mathrm{VS}})^T,\textbf{R}_t^T]^T). \end{multline} Here, ${p}(\textbf{s} |\textbf{o}_t^{\mathrm{A}})$, ${p}(\textbf{s} |\textbf{o}_t^{\mathrm{VA}})$ and ${p}(\textbf{s} |\textbf{o}_t^{\mathrm{VS}})$ are the state posteriors of the audio model, and of the appearance-based, and a shape-based video model, respectively. $\textbf{R}_t$ is a vector composed of the reliability measures at time $t$, which we describe in Sec.~\ref{reliabilitymeasures}. As an alternative to the posteriors of each stream, we have also considered a fusion of the log posteriors $\textnormal{log}\;{p}(\textbf{s} |\textbf{o}_t^{\mathrm{i}})$, but settled on the linear posteriors due to a better model convergence. \vspace{-1mm} \begin{figure}[htbp] \centering \includegraphics[scale=0.9]{Sections/DNN.pdf} \caption{Audio-visual fusion via the DFN, applied to one stream of audio and two streams of video features} \label{fig:DNN} \end{figure} \vspace{-1mm} DFN training is then performed with the cross-entropy loss \begin{equation} \label{ce} \mathcal{L}_\textnormal{CE}= -\frac{1}{T}\sum_{t=1}^{T} \sum_{s=1}^{S}p^*(s | \textbf{\textrm{o}}_t)\cdot \textnormal{log }\widetilde{p}(s |\textbf{\textrm{o}}_t). \end{equation} Here, $p^*(s | \textbf{\textrm{o}}_t)$ is the target state probability of state $s$, obtained by the forced alignment for the clean acoustic training data. The estimated vector of log-posteriors $\textnormal{log }\widetilde{p}(\textbf{s} |\textbf{o}_t)$ is obtained from Eq.~\eqref{eq:concatprob}. Finally, the decoder uses these estimated log-posteriors to find the optimum word sequence by a graph search through the decoding graph~\cite{mohri2008speech} \subsection{Model-based reliability measures} The \textbf{\textit{entropy}} is indicative of the uncertainty of the model about the state $s$, given the current observation $\textbf{o}_t$. \begin{equation} \label{entropy} \textnormal{H}^i_t=-\sum_{s=1}^{S\,^i}p(s| \textbf{o}_t^i)\cdot \textnormal{log }p(s | \textbf{o}_t^i), \end{equation} where $S\,^i$ is the number of states in stream model $i$. The \emph{\textbf{dispersion}} shows the decoder\textquotesingle s discriminative power. It is computed by: \begin{equation} \label{dispersion} \textnormal{D}^i_t=\frac{2}{K(K-1)}\sum_{l=1}^{K}\sum_{m=l+1}^{K}\textnormal{log }\frac{\widehat{p}(l | \textbf{o}_t^i)}{\widehat{p}(m | \textbf{o}_t^i)}, \end{equation} where $\widehat{p}$ contains a re-ordered version of the probabilities $p$, sorted in descending order. $K$ is set to 15. The K-largest \emph{\textbf{posterior difference}} shows the average ratio between the largest posterior and the next $K-1$ values. It is computed via \begin{equation} \label{diff} \textnormal{Diff}^{\,i}_t=\frac{1}{K-1}\sum_{k=2}^{K}\textnormal{log }\frac{\widehat{p}(1 | \textbf{o}_t^i)}{\widehat{p}(k | \textbf{o}_t^i)}. \end{equation} The \textit{\textbf{temporal divergence}} is computed as the Kullback-Leibler divergence between two posterior vectors $p(s | \textbf{o}_t^i)$ and $p(s | \textbf{o}_{t+\Delta t}^i)$, i.e.~ \begin{equation} \textnormal{Div}_{\Delta t}^i(t)=\textnormal{D}_{KL}(p(s | \textbf{o}_t^i) ||p(s | \textbf{o}_{t+\Delta t}^i)). \end{equation} $\Delta t$ is set to $250\ $ms. As shown in \cite{hermansky2013multistream}, the mean temporal divergence is a helpful measure of reliability. Here, we obtain the mean by averaging $\textnormal{Div}_{\Delta t}^i(t)$ over segments of $50\ $ms length. In some studies, the \textbf{\textit{entropy ratio}} is used as the estimated stream weights for stream-weighting based AVSR system, see e.g.~\cite{misra2003new}. Here we use this indicator as another reliability measure. \begin{equation} \omega_{h,t}^i= \frac{\widetilde{\textnormal{H}}_t^i}{\sum_{k=\mathrm{A,VA,VS}}^{}\widetilde{\textnormal{H}}_t^k}, \end{equation} where \begin{equation} \widetilde{\textnormal{H}}_t^i = \left\{\begin{matrix} \ 10000 \ \ \textnormal{H}_t^i > \overline{\textnormal{H}}_t\\[2pt] \ \ \ \textnormal{H}_t^i \ \ \ \ \textnormal{H}_t^i \leq \overline{\textnormal{H}}_t. \end{matrix}\right. \end{equation} The strongly related \textbf{\textit{dispersion ratio}} $\omega_{D,t}^i$ is also considered. \begin{equation} \omega_{D,t}^i= \frac{\widetilde{\textnormal{D}}_t^i}{\sum_{k=\mathrm{A,VA,VS}}^{}\widetilde{\textnormal{D}}_t^k}, \end{equation} where \begin{equation} \widetilde{\textnormal{D}}_t^i = \left\{\begin{matrix} \frac{1}{10000} \ \ \textnormal{D}_t^i < \overline{\textnormal{D}}_t\\[2pt] \ \ \ \textnormal{D}_t^i \ \ \ \ \textnormal{D}_t^i \geq \overline{\textnormal{D}}_t. \end{matrix}\right. \end{equation} A, VA, and VS represent the audio, video appearance and video shape stream, respectively. $\textnormal{H}_t^i$ and $\textnormal{D}_t^i$ is obtained from Eq.~\eqref{entropy} and Eq.~\eqref{dispersion}. $\overline{\textnormal{H}}_t$ and $\overline{\textnormal{D}}_t$ are the average entropy and dispersion. \vspace{-5pt} \subsection{Signal-based reliability measures} \label{signalrms} The low order MFCC coefficients are related to the audio quality. Here the first 5 MFCC coefficients and their temporal derivatives, $\Delta$MFCC are used. The estimated Signal-to-Noise Ratio (SNR) also represents the quality of the audio signal. \begin{equation} \mathrm{SNR\,_t}=10\ \textnormal{log}\left(\cfrac{\textnormal{S}_t}{\textnormal{N}_t}\right). \end{equation} Due to the special noise type (discussed in Sec. \ref{setup}) in data augmentation, the conventional SNR estimation algorithms is not working satisfactorily. A deep learning approach DeepXi \cite{nicolson2019deep} is used here as a better performing framewise SNR estimator. The Pitch $f_0$ and its temporal derivative, $\Delta f_0$ are also used as reliability indicators, it has beed proven that high pitch of speech signal affect the MFCC coefficients \cite{dharanipragada2006robust, ghai2011study}. The \textbf{\textit{probability of voicing}} is used as an additional soft voice-activity detection (soft VAD) cue. The OpenFace \cite{amos2016openface} is used for face detection and face landmark extraction. The \textbf{\textit{Confidence}} of the face detection for each frame is related to the visual feature quality. We use the first 5 Inverse Discrete Cosine Transform (\textbf{\textit{IDCT}}) coefficients of the grayscale mouth region to represent low-level image properties. The \textbf{\textit{image distortion}} measures comprise the lighting condition, the degree of blurring and the rotation, all computed as in \cite{schonherr2016environmentally}. The lighting condition is considered as the mean brightness of the image. The degree of blurring is computed as the variance of the image after high-pass filtering. The rotation is considered as the cross-correlation between the original image and its horizontally mirrored version. \fi \section{Conclusion} \label{conclusion} There are still many difficulties for large-vocabulary speech recognition under adverse conditions, but the fusion of acoustic and visual information can bring a significant benefit to these challenging and interesting tasks. In this paper, we propose a new architecture, the decision fusion net (DFN), in which we consider state posteriors of acoustic and visual models as appropriate stream representations for fusion. These are combined by the DFN, which uses stream reliability indicators to estimate the optimal state-posteriors for hybrid speech recognition. It comes in two flavors, a BLSTM-DFN with optimal performance, as well as an LSTM-DFN, which provides the option of real-time decoding. We compare the performance of our proposed model to early integration as well as to conventional dynamic stream weighting models. In experimental results on noisy as well as on reverberant data, our proposed model shows significant improvements, with the BLSTM version giving a relative word-error-rate reduction of 42.18\% over audio-only recognition, and outperforming all baseline models. The hybrid architecture with the proposed DFN clearly outperforms the end-to-end WLAS model, which we attribute to its factorization of stream evaluation, stream integration, and subsequent, language-model-supported, search. It is worth mentioning that, on average, the hybrid DFN model is even superior to a hybrid model with \emph{oracle} stream weighting, which is an interesting result on its own, given that the latter provides a theoretical upper bound for instantaneous stream weighting approaches. The natural next goal of our work is to focus on end-to-end audio-visual speech recognition models. Here, we are specifically interested in investigating reliability-supported fusion within the attention mechanism in CTC and transformer systems and in the possibilities that come with probabilistic intermediate representations for these architectures. \section{Experimental Setup} \label{setup} \subsection{Dataset} \label{dataset} The Oxford-BBC Lip Reading Sentences (LRS2) corpus is used for all experiments. It contains more than 144k sentences from British television. Table~\ref{table:dataset} gives an overview of the dataset size and partitioning. The pre-train set is usually used in AVSR tasks for video or audio-visual model pretraining. In this work, we combine the pre-train and training set to train all acoustic, visual, and AV models. \begin{table}[htbp] \centering \caption{Size of subsets within the LRS2 Corpus} \label{table:dataset} \setlength\tabcolsep{2.0pt} \centering \begin{tabular}{|c|c|c|c|} \hline Subset & Utterances & Vocabulary &\begin{tabular}[c]{@{}l@{}}Duration\\ {[}hh:mm{]}\end{tabular} \\ \hline pre-train set & 96,000 & 41,000 &196:25\\ training set & 45,839 & 17,660 &28:33\\ validation set & 1,082 & 1,984 &00:40\\ test set & 1,243 & 1,698 & 00:35 \\ \hline \end{tabular} \end{table} For the AVSR task, the results are often dominated by the acoustic model. To analyze the performance in different noisy environments and to counter the audio-visual model imbalance, we add acoustic noise to the LRS2 database. The ambient subset of the MUSAN corpus~\cite{musan2015} is used as the noise source. It contains noises such as wind, footsteps, paper rustling, rain, as well as indistinct crowd noises. Seven different SNRs are selected randomly, from -9 dB to 9 dB in steps of 3 dB. We also generated data for a far-field AVSR scenario. As the LRS2 database does not contain highly reverberant data, we artificially reverberate the acoustic data by convolutions with measured impulse responses. These impulse responses also come from the MUSAN corpus. Both types of augmentation use Kaldi's Voxceleb example recipe. \subsection{Feature extraction} \label{features} The audio model uses 40 log Mel features together with two pitch features ($f_0$, $\Delta f_0$) and the probability of voicing, yielding 43-dimensional feature vectors. The audio features are extracted with 25~ms frame size and 10~ms frameshift. The video features are extracted per frame, i.e., every 40~ms. The video appearance model (VA) uses 43-dimensional IDCT coefficients of the grayscale mouth region of interest (ROI) as features. The video shape model (VS) is based on the 34-dimensional non-rigid shape parameters described in~\cite{amos2016openface}. Since the audio and video features have different frame rates, Bresenham\textquotesingle s algorithm~\cite{sproull1982using} is used to align the video features before training the visual models. This algorithm gives the best first-order approximation for aligning audio and video frames given only a difference in frame rates. \subsection{Implementation details} \label{impdetails} All our experiments are based on the Kaldi toolkit~\cite{povey2011kaldi}. As mentioned in Section~\ref{dataset}, both pre-train and training sets are used together to train the acoustic and visual models. The initial HMM-GMM training follows the standard Kaldi AMI recipe, namely, monophone training followed by triphone training. A linear discriminant analysis (LDA) is applied to a stacked context of features to obtain discriminative short-term features. Finally, speaker adaptive training (SAT) is used to compensate for speaker variability. Each step produces a better forced alignment for later network training. HMM-DNN training uses the nnet2 p-norm network~\cite{zhang2014improving} recipe, which is efficiently parallelizable. Once HMM-DNN training has been performed, the acoustic model DNN and two visual observation models are available. They output estimated log-posteriors $\textrm{log }{p}(\textbf{s} |\textbf{\textrm{o}}_t^{i})$ for each stream, which are then used in our proposed DFN. Its input consists of all stream-wise state-posteriors ${p}(\textbf{s} |\textbf{\textrm{o}}_t^{i})$ and the reliability measures. As mentioned in Section~\ref{systemoverview}, the decoder obtains the best word sequence by graph search through a decoding graph using the estimated log-pseudo-posteriors $\textnormal{log }\widetilde{p}(\textbf{s} |\textbf{o}_t)$. To ensure that all experiments and modalities search through the same decoding graph, we share the phonetic decision tree between all single modalities. Thus, the number of states for each modality is identical, specifically 3,856. In addition, there are 41 reliability indicators, which leads to an overall input dimension of $(3 \times 3856 + 41) =$11,609. The first three hidden layers have 8,192, 4,096, and 1,024 units, respectively, each using the ReLU activation function, layer normalization (LN), and a dropout rate of 0.15. They are followed by 3 BLSTM layers with 1,024 memory cells for each direction, using tanh as the activation function. Finally, a fully connected (FC) layer projects the data to the output dimension of 3,856. A log-softmax function is applied to obtain the estimated log-posteriors. Early stopping is used to avoid overfitting. We check for early stopping every 7,900 iterations, using the validation set. The training process is stopped if the validation loss does not decrease for 23,700 iterations. Finally, the performance is evaluated on the test set. We performed two experiments with the proposed DFN strategy. The first uses the BLSTM-DFN, exactly as described above, while the second is an LSTM-DFN, replacing the BLSTM layers by purely feed-forward LSTMs. The learning rate is initialized to $5\times10^{-4}$ and reduced by 20\% whenever the validation loss does not decrease in early stopping checking. The batch size is set to 10. DNN training utilizes the PyTorch library~\cite{paszke2019pytorch} with the ADAM optimizer. \section{Introduction} \label{introduction} Audio-visual speech recognition (AVSR) is motivated by the natural ability of humans to integrate cross-modal information. When people are listening to speech in a noisy environment, they often unconsciously focus on the speaker\textquotesingle s lips, which is of great benefit to human listening and comprehension~\cite{crosse2016eye}. Even in clean speech, seeing the lips of the speaker influences perception, as demonstrated by the McGurk effect~\cite{mcgurk1976hearing}. It has been shown in many studies~\cite{Wand2017,meutzner2017improving, gurban2008dynamic} that machine AVSR systems can also successfully improve performance on small-vocabulary tasks, when compared to their audio-only speech recognition (ASR) counterparts with otherwise equivalent setups. However, large-vocabulary tasks are still difficult for lipreading, because many phoneme pairs correspond to identical visemes, which makes certain words virtually indistinguishable to a vision-only system, as for example "do" and "to". This problem also leads to an inherent difficulty of AVSR on large-vocabulary tasks~\cite{thangthai2018building, sterpu2020}, which is acerbated by the fact that many multi-stream fusion approaches perform badly, when the performance of the streams varies widely. In this work, we address this shortcoming by introducing a new stream fusion strategy that is impervious to such disparate single-stream recognition rates and can still benefit from low-quality streams in improving the results of highly reliable, clean audio data. To evaluate it in a realistic manner, we use a large-vocabulary dataset---the Lip Reading Sentences (LRS2) corpus~\cite{Afouras2018}---for all experiments, which we further augment by adding realistic noise and reverberation. An effective fusion strategy for AVSR is decision fusion, which combines the decisions of multiple classifiers into a common decision. Decision fusion comes in different forms, such as dynamic stream-weighting~\cite{stewart2013robust}, or state-based decision fusion (SBDF), e.g.~in \cite{Abdelaziz2015, potamianos2003recent, luettin2001asynchronous, nefian2002dynamic}. An alternative fusion approach is the idea of fusing \emph{representations} rather than decisions, e.g. via multi-modal attentions~\cite{Zhou2019}. Another example in this direction is that of gating, e.g.~in~\cite{Yu2020Overlapped} or in~\cite{arevalo2020gated}, where a newly designed \emph{Gated Multimodal Unit} is used for dynamically fusing feature streams within each cell of a network. In this work, we argue that the ideas of representation fusion and decision fusion can be unified in a different fashion, namely, by using the posterior probabilities $p(\textbf{s} |\textbf{o}_t^{\mathrm{i}})$ of $i = 1 \ldots M$ single-modality hybrid models as our representation of the uni-modal streams. This viewpoint opens up a range of new possibilities, centered around these single-modality representations. On the one hand, we can base the multi-modal model on pre-trained hybrid ASR models. On the other hand, we can learn recurrent and dynamic fusion networks, which can benefit from the reliability information that is inherent in the posterior probabilities, such as instantaneous entropy and dispersion~\cite{gurban2008dynamic}, as well as from temporal context. Overall, in the following, we compare our new approach with the performance of 4 baseline and oracle fusion strategies, which are detailed in Section~\ref{related}. The proposed fusion strategy is introduced in Section~\ref{systemoverview}. Section~\ref{reliabilitymeasures} describes the set of reliability measures that are employed in all of the dynamic fusion approaches. The experiments are presented in Section~\ref{setup}, while Section~\ref{results} introduces and analyzes the results. Finally, in Section~\ref{conclusion}, we discuss the lessons learned and give an outlook on future work. \section{introduction} \label{introduction} Audio-visual speech recognition (AVSR) is motivated by the natural ability of humans to integrate cross-modal information. When people are listening to speech in a noisy environment, they often unconsciously focus on the speaker\textquotesingle s lips, which is of great benefit to human listening and comprehension \cite{crosse2016eye}. It has been shown in many studies \cite{Wand2017,meutzner2017improving, gurban2008dynamic} that machine AVSR systems can also successfully improve performance on small-vocabulary tasks, when compared to their audio-only speech recognition (ASR) counterparts with otherwise equivalent setups. However, large-vocabulary tasks are still difficult for lip reading, because many phoneme pairs correspond to identical visemes, which makes certain words virtually indistinguishable to a vision-only system, as for example "do" and "to". This problem also leads to an inherent difficulty of AVSR on large-vocabulary tasks \cite{thangthai2018building, sterpu2020}, which is acerbated by the fact that many multi-stream fusion approaches perform badly, when the performance of the streams varies widely. In this work, we address this shortcoming by introducing a new stream fusion strategy that is impervious to such disparate single-stream recognition rates and can still benefit from low-quality streams in improving the results of highly reliable, clean audio data. To evaluate it in a realistic manner, we use a large-vocabulary dataset---the LRS2 corpus \cite{Afouras2018}---for all experiments, which we further augment by adding realistic noise and reverberation. AVSR fusion strategies can be divided into three groups: early integration, middle integration, and late integration. Early integration fuses audio and video information directly at the input of the system \cite{neti2000}. Middle integration, in contrast, fuses the audio and video information at an intermediate stage of the system. Separate encoders extract high-level audio and video information, this information is combined, and then decoded in a single decoder \cite{Yu2020Overlapped}. This method is often used in end-to-end (E2E) systems such as \cite{chung2017lip}. The final group is that of late integration, which is also called \emph{decision fusion}. Here, the decisions of multiple classifiers are fused, for example in the ROVER approach \cite{fiscus1997post}. Stream-weighting is an effective method to fuse different streams of information, which can be employed at all levels: at the feature level, in embedding spaces and at the decision level. It delivers a solution to the problem that the various streams may be reliable and informative in very different ways. Hence, a number of researchers have implemented the strategy of weighting different modalities \cite{gurban2008dynamic}. Many of them utilize static weights \cite{yang2005multimodal} in hybrid recognition setups, where audio and video speech recognition models are trained separately and the DNN state posteriors of all modalities are combined by constant weights according to \begin{equation} \label{stateequalfusion} \textnormal{log }\widetilde{p}(\textbf{s} |\textbf{o}_t)=\sum_{i}^{}\lambda^i\cdot \textrm{log }{p}(\textbf{s} |\textbf{o}_t^{i}). \end{equation} Here, $\textrm{log }{p}(\textbf{s} |\textbf{\textrm{o}}_t^{i})$ is the log-posterior in stream $i$ and $\textnormal{log}\,\widetilde{p}(\textbf{s} |\textbf{\textrm{o}}_t)$ is its estimated combined log-posterior. The problem of weight determination, however, cannot be neglected \cite{kankanhalli2006experiential}. It is clear that in good lighting conditions, the visual information may be more useful, while audio information is most beneficial in frames with a sufficiently high SNR. Therefore, the weight should be dynamically estimated to obtain optimal fusion results. As a baseline approach, we therefore consider \emph{dynamic} stream weighting, which is often used as the fusion strategy in AVSR research, due to its high effectiveness and its ability to cope with large performance differences of the single constituent streams. In dynamic stream-weighting-based AVSR \cite{Abdelaziz2015}, the DNN state posteriors of all modalities are combined by the estimated weights according to \begin{equation} \label{statefusion} \textnormal{log }\widetilde{p}(\textbf{s} |\textbf{o}_t)=\sum_{i}^{}\lambda_t^i\cdot \textrm{log }{p}(\textbf{s} |\textbf{o}_t^{i}), \end{equation} where the stream weights $\lambda_t^i$ are typically predicted by appropriate reliability measures. An alternative to such fusion approaches is the idea of intermediate integration, fusing \emph{representations} rather than decisions, e.g. via multi-modal attentions \cite{Zhou2019}. Another example that proceeds in this direction is \cite{arevalo2020gated}, where a newly designed \emph{Gated Multimodal Unit} (GMU) is used for dynamically fusing feature streams within each cell of a network. {\color{green!55!blue}Alternatively, \cite{zhang2019robust} suggests the combination of deep feed-forward sequential memory networks (DFSMN) and decision fusion approaches ROVER \cite{fiscus1997post}. However, this model can not improve the recognition rates for a high speech quality at Signal-to-Noise Ratios (SNRs) above 5dB, when the model is trained with noisy acoustic signal.} {\color{red}Alternatively, \cite{zhang2019robust} suggests the use of deep feedforward sequential memory networks (DFSMN) to firstly create and secondly fuse audio and video representations. However, this model can not improve the recognition rates for a high speech quality at Signal-to-Noise Ratios (SNRs) above 5dB.} In this work, we argue that the ideas of representation fusion and decision fusion can be unified in a different fashion, namely, by considering the posterior probabilities $p(\textbf{s} |\textbf{o}_t^{\mathrm{i}})$ of all $i = 1 \ldots M$ single-modality networks as our representation of the uni-modal streams. This viewpoint opens up a range of new possibilities, centered around these single-modality representations. On the one hand, we can utilize the model with pre-trained hybrid ASR models, which can be learned from and adapted to comparatively little data, and which can easily integrate with task-specifc WFST language models \cite{povey2011kaldi}. On the other hand, we can learn recurrent and dynamic fusion networks, which can benefit from the reliability information that is inherent in the posterior probabilities, such as instantaneous entropy and dispersion \cite{gurban2008dynamic}, as well as from temporal context. Overall, in the following, we compare this new approach in two variants with the performance of 4 baseline and oracle fusion strategies, which are introduced in Section \ref{systemoverview}. Section \ref{reliabilitymeasures} describes the set of reliability measures that are employed in all of the dynamic fusion approaches. The experimental setup is shown in Section \ref{setup}, while Section \ref{results} introduces and analyzes the results. Finally, in Section \ref{conclusion}, we discuss the lessons learned and give an outlook on future work. \section{Related work} \label{related} There are many different fusion strategies in AVSR research. In this section, we give a brief introduction to the fusion strategies that are considered as baseline models in this work. In these baselines as well as in our own model, M = 3 single-modality models are combined, one acoustic and two visual, where $\textbf{\textrm{o}}_t^\mathrm{A}$ are our audio features, and $\textbf{\textrm{o}}_t^\mathrm{VS}$ and $\textbf{\textrm{o}}_t^\mathrm{VA}$ are shape-based and appearance-based video features; see Section~\ref{features} for details. \subsection{Early integration} Early integration simply fuses the audio and visual information at the level of the input features via \begin{equation} \label{eq:concat} \textbf{\textrm{o}}_t=[(\textbf{\textrm{o}}_t^\mathrm{A})^T,(\textbf{\textrm{o}}_t^\mathrm{VS})^T,(\textbf{\textrm{o}}_t^\mathrm{VA})^T]^T. \end{equation} Superscript $T$ denotes the transpose. \subsection{Dynamic stream weighting} \label{DSW} Stream weighting is an effective method to fuse different streams. It is a solution to the problem that the various streams may be reliable and informative in very different ways. Hence, a number of works employ the strategy of weighting different modalities~\cite{gurban2008dynamic,heckmann2002noise,nefian2002dynamic}. Many utilize static weights; for example in~\cite{yang2005multimodal}, audio and video speech recognition models are trained separately and the DNN state posteriors of all modalities are combined by constant stream weights $\lambda^i$ according to \begin{equation} \label{stateequalfusion} \textnormal{log }\widetilde{p}(s |\textbf{o}_t)=\sum_{i}^{}\lambda^i\cdot \textrm{log }{p}(s |\textbf{o}_t^{i}). \end{equation} Here, $\textrm{log }{p}(s |\textbf{\textrm{o}}_t^{i})$ is the log-posterior of state $s$ in stream $i$ at time $t$ and $\textnormal{log}\,\widetilde{p}(s |\textbf{\textrm{o}}_t)$ is its estimated combined log-posterior. The problem of weight determination, however, cannot be neglected~\cite{kankanhalli2006experiential}. It is clear that in good lighting conditions, the visual information may be more useful, while audio information is most beneficial in frames with a sufficiently high SNR. Therefore, the weight should be dynamically estimated to obtain optimal fusion results. As a baseline approach, we therefore consider \emph{dynamic} stream weighting, which implements this idea. Specifically, we use dynamic stream weighting as described in~\cite{yu2020multimodal} as the baseline. Here, the DNN state posteriors of all modalities are combined by estimated dynamic weights according to \begin{equation} \label{statefusion} \textnormal{log }\widetilde{p}(s |\textbf{o}_t)=\sum_{i}^{}\lambda_t^i\cdot \textrm{log }{p}(s |\textbf{o}_t^{i}). \end{equation} The stream weights $\lambda_t^i$ are estimated by a feedforward network from a set of reliability measures, introduced in detail in Sec.~\ref{reliabilitymeasures}. Reliability information has proven beneficial for multi-modal integration in many studies~\cite{meutzner2017improving, gurban2008dynamic, hermansky2013multistream}, where it is used to inform the integration model about the degree of uncertainty in all information streams over time. In~\cite{yu2020multimodal}, the authors also consider different criteria to train the integration model. In this paper, we use two of them as our baselines, namely the mean squared error (MSE) and the cross-entropy (CE). This learning-based approach to weighted stream integration can effectively and significantly improve the recognition rates in lower SNR conditions. Also, in contrast to many other stream integration strategies, such as~\cite{seymour2005new,stewart2013robust,receveur2016turbo}, it does not suffer from a loss of performance relative to the best single modality when the modalities differ widely in their performance, but it rather gains accuracy even from the inclusion of less informative streams. This is a feature of great importance for the case at hand, as we need to design a system that will even allow for the inclusion of the visual modality under clean conditions, where audio is far more informative than video data, without loosing---or, ideally, even still gaining---performance. \subsection{Oracle weighting} \label{sec:OW} We also compute optimal, or \emph{oracle} stream weights, as described in~\cite{yu2020multimodal}. These optimal dynamic stream weights are computed in such a way as to minimize the cross-entropy with respect to the ground-truth forced alignment information. Since a known text transcription of the test set is therefore needed in this method, it is only useful to obtain a theoretical upper performance bound for standard stream-weighting approaches. To minimize the cross-entropy, we use convex optimization via CVX~\cite{cvx}. The obtained oracle stream weights $\lambda_t^i$ are then used to calculate the estimated log-posterior through Equation~\eqref{statefusion}. As oracle stream weights yield the minimum cross-entropy between the fused posteriors and the ground-truth one-hot posteriors of the reference transcription computed by forced alignment, the corresponding results can be considered as the best achievable word error rate (WER) of a stream-weighting-based hybrid recognition system. \subsection{End-to-end model} In recent years, end-to-end speech recognition has quickly gained widespread popularity. The end-to-end model predicts character sequences directly from the audio signal. Comparing the end-to-end model and the hybrid speech recognition model, the end-to-end model has a lower complexity and is more easily amenable to multi-lingual ASR. But there are also some advantages to using a hybrid model. For example, the hybrid model can be learned from and adapted to comparatively little data and it can easily integrate with task-specific WFST language models~\cite{povey2011kaldi}. Importantly for this work, hybrid models allow for integration at the level of the pseudo-posteriors, which is a place for interpretable stream integration. Hence, in this work, we use the hybrid approach to train the single modality models. To compare the performance of our proposed system to that of end-to-end AVSR, we consider the end-to-end ``Watch, Listen, Attend and Spell'' model (WLAS)~\cite{Chung17} as a baseline. In this model, the audio and video encoders are LSTM networks. The decoder is an LSTM transducer, which fuses the encoded audio and video sequences through a dual attention mechanism. \section{Reliability measures} \label{reliabilitymeasures} To support the estimation of the dynamic stream weights, we extract a range of model-based and signal-based reliability measures (see Tab.~\ref{table:RMS}), generally computed as in~\cite{yu2020multimodal}. All of these reliability indicators are used in the dynamic stream weighting baseline as well as in our proposed model. \begin{table}[htbp] \centering \caption{Overview of reliability measures} \label{table:RMS} \footnotesize \setlength\tabcolsep{2.0pt} \renewcommand{1.3}{1.2} \begin{tabular}{|c|c|l|}\hline \multirow{3}{*}{Model-based }& \multicolumn{2}{c|}{\multirow{2}{*}{Signal-based }}\\& \multicolumn{2}{c|}{}\\ \cline{2-3} & Audio-based & \multicolumn{1}{c|}{Video-based } \\ \hline \multicolumn{1}{|l|}{\begin{tabular}[c]{@{}l@{}}~~\llap{\textbullet}~~ Entropy\\ ~~\llap{\textbullet}~~ Dispersion \\ ~~\llap{\textbullet}~~ Posterior difference\\ ~~\llap{\textbullet}~~ Temporal divergence\\ ~~\llap{\textbullet}~~ Entropy and \\ \ \ \ dispersion ratio\end{tabular}} & \multicolumn{1}{l|}{\begin{tabular}[c]{@{}l@{}}~~\llap{\textbullet}~~ MFCC\\ ~~\llap{\textbullet}~~ $\Delta$MFCC\\ ~~\llap{\textbullet}~~ SNR\\ ~~\llap{\textbullet}~~ $f_0$ \\ ~~\llap{\textbullet}~~ $\Delta f_0$ \\ ~~\llap{\textbullet}~~ voicing probability \end{tabular}} & \begin{tabular}[c]{@{}l@{}}~~\llap{\textbullet}~~ Confidence\\ ~~\llap{\textbullet}~~ IDCT\\ ~~\llap{\textbullet}~~ Image \\ distortion\end{tabular} \\ \hline \end{tabular} \end{table} The model-based measures are entropy, dispersion, posterior difference, temporal divergence, entropy- and dispersion-ratio. All model-based measures are computed from the log-posterior outputs of their respective single-modality models, $\textnormal{log }{p}(\textbf{s} |\textbf{o}_t)$. Signal-based reliability measures for the audio data comprise the first 5 MFCC coefficients with their temporal derivatives $\Delta$MFCC, again as in~\cite{yu2020multimodal}. The SNR is strongly related to the intelligibility of an audio signal. However, due to the realistic, highly non-stationary environmental noises (discussed in Sec.~\ref{setup}) used in data augmentation and testing, conventional SNR estimation algorithms are not showing a robust performance. Instead, therefore, the deep learning approach DeepXi~\cite{nicolson2019deep} is used to estimate the frame-wise SNR. The pitch $f_0$ and its temporal derivative, $\Delta f_0$, are also used as reliability indicators. It has been shown that high pitch of a speech signal negatively affects the MFCC coefficients~\cite{dharanipragada2006robust}, due to insufficient smoothing of the pitch harmonics in the speech spectrum by the filterbank. The probability of voicing~\cite{ghahremani2014pitch} is used as an additional cue. It is computed from the Normalized Cross-Correlation Function (NCCF) values for each frame. OpenFace~\cite{amos2016openface} is used for face detection and facial landmark extraction. This allows us to use the confidence of the face detector in each frame as an indicator of the visual feature quality. The other signal-based video reliability measures, the Inverse Discrete Cosine Transform (IDCT), and the image distortion estimates, are the same as in~\cite{yu2020multimodal}. \section{Results} \label{results} In this section, we compare the performance of all baseline models and fusion strategies. Figure~\ref{fig:baseline} gives an overview of the results of the audio-only model and compares the results of all baselines and our proposed BLSTM-DFN. \vspace{-1mm} \begin{figure}[htbp] \centering \includegraphics[scale = 0.5]{Sections/tab4.pdf} \caption{WER (\%) on the test set of the LRS2 corpus. \label{fig:baseline} \end{figure} \vspace{-1mm} Comparing the audio-only model and the BLSTM-DFN integration, our fusion strategy is able to reduce the Word Error Rate (WER) for every SNR, even for clean acoustic data. For lower SNRs, the DFN can improve the absolute WER by over 10\%. Our proposed BLSTM-DFN is also capable of achieving better results in many cases than the--realistically unachievable---oracle weighting (OW), that is based on ground-truth transcription information of the test set and can be considered as the upper limit for the dynamic stream-weighting approach of Equation \eqref{statefusion}. The end-to-end WLAS model is not able to improve the recognition rates comparing to the audio-only model, which may in part be due to the fact that it does not employ an explicit language model. Table~\ref{table:results} lists all the results of our experiments under additive noise. As expected, the audio-only model (AO) has a much better performance than the video-appearance (VA) and video-shape (VS) models. The average WERs of the visual models are over 80\%, which illustrates that lipreading is still hard in large-vocabulary tasks. We have also employed the pre-trained spatio-temporal visual front-end from~\cite{stafylakis2017combining} to extract high-level visual features, without seeing improvements. We assume that this is due to insufficient training data as well as to insufficient generalization across speakers and recording conditions. \begin{table}[htbp] \centering \setlength\tabcolsep{1.75pt} \caption{ Word error rate (\%) on the LRS2 test set under additive noise. OW describes the realistically unachievable performance bound for dynamic, instantaneous stream weighting. \begin{tabular}{@{}cccccccccc@{}} \toprule SNR & -9 & -6 & -3 & 0 & 3 & 6 & 9 & clean & avg. \\ \midrule AO &48.96 &41.44 &33.07 &30.81 &22.85 &18.89 &16.49 &10.12 &27.83\\ VA &85.83 &87.00 &85.26 &88.10 &87.03 &88.44 &88.25 &88.10 &87.25\\ VS &88.11 &90.27 &87.29 &88.88 &85.88 &85.33 &88.58 &87.10 &87.68\\ \midrule EI &40.14 &32.47 &23.96 &26.59 &20.67 &16.68 &14.76 &10.02 &23.16\\ \midrule MSE &46.48 &37.79 &27.45 &27.47 &19.52 &16.58 &15.09 &9.42 &24.98\\ CE &45.79 &37.14 &26.32 &28.03 &19.40 &16.68 &14.76 &9.42 &24.65\\ OW &30.33 &26.47 &\textbf{15.41} &21.25 &\textbf{13.66} &11.66 &\textbf{10.45} &\textbf{7.54} &17.10\\ \midrule WLAS &64.74& 59.87& 49.03& 50.60& 41.17& 39.89& 43.72& 29.32 &47.29\\ \midrule LSTM-DFN &33.30 &27.22 &21.26 &21.25 &19.17&13.97 &15.84 &10.32 &20.29\\ BLSTM-DFN &\textbf{27.55} &\textbf{23.11} &17.89 &\textbf{16.35} &14.93 &\textbf{10.25} &10.78 &7.84 &\textbf{16.09}\\ \bottomrule \\ \end{tabular} \label{table:results} \end{table} \vspace{-2mm} Early integration (EI) can also improve the results, but the improvement is not as significant as that of the proposed DFN approach. Comparing the BLSTM-DFN and the LSTM-DFN, the former shows a notable advantage in accuracy, albeit at the price of non-real-time performance. Both the LSTM-DFN and the BLSTM-DFN use recurrent layers with 1024 memory cells. As the number of parameters in a BLSTM layer are almost double that of the LSTM layer, we also trained a BLSTM-DFN using 512 memory cells per layer. The average WER of this model is 16.14\%, still better than that of the LSTM-DFN with a similar number of parameters. If we increase the number of cells for the LSTM-DFN to 2048, with the same learning rate, the model suffers from convergence issues The dynamic stream weighting results, using the MSE or CE loss, are better than shown in~\cite{yu2020multimodal} for three reasons. Firstly, improved reliability measures are used in this work. Secondly, \cite{yu2020multimodal} trains acoustic and visual models only on the training set, whereas here, they are trained on both the pre-train and training data. This gives a significant performance boost to the single-modality systems and also to early integration, but is not of as much added benefit to the dynamic stream-weight estimation, though the weight estimator from~\cite{yu2020multimodal} was trained on the validation set, whereas here, it is also trained on the pre-train and training sets. We assume that its relatively small performance gain is due to the limited flexibility of the composition function in dynamic stream weighting. Comparing the average WER over all acoustic conditions, the proposed BLSTM-DFN is greatly beneficial, outperforming the not realistically achievable OW system, and surpassing all feasible stream integration approaches by a clear margin. Thus, our proposed method outperforms even optimal dynamic stream weighting and therefore provides a fundamentally superior architecture compared to instantaneous stream weighting. \begin{threeparttable}[] \centering \caption{Far-field AVSR WER (\%) on LRS2. \label{table:reverbresults} \footnotesize \setlength\tabcolsep{4.5pt} \begin{tabular*}{\linewidth}{ccccccccc} \toprule & AO & EI & MSE & CE & OW &WLAS & \begin{tabular}[c]{@{}c@{}}\small LSTM-\\\small DFN\end{tabular} & \begin{tabular}[c]{@{}c@{}}\small BLSTM-\\\small DFN\end{tabular}\\ \midrule &23.61 &19.15 & 19.54 & 19.44 &\textbf{12.70} & 44.24 &15.67 &15.28 \\ \bottomrule \\ \end{tabular*} \end{threeparttable} We also checked the case of far-field AVSR by using data augmentation to produce artificially reverberated speech, see Table~\ref{table:reverbresults} for the results. The BLSTM-DFN still outperforms the other fusion strategies, but it is not as close to the OW. We suspect the reason is an insufficient amount of reverberant acoustic training signals. Overall, it can be concluded that the introduced DFN is generally superior to instantaneous dynamic stream weighting. The latter can be considered as fusion at the \emph{frame level}. Frame-by-frame, it sums log-posteriors of each stream in a weighted fashion. Hence, its estimated combined log-posterior is a linear transformation of the single-modality log-posteriors. In contrast, the DFN can be considered as a cross-temporal fusion strategy at the \emph{state level}, as the combined log-posterior is estimated through a non-linear transformation with memory. This allows for a more accurate estimation, in which the BLSTM-DFN gives an added advantage to the LSTM-DFN, since it has access to both past and future contextual information. In this work, the BLSTM-DFN shows a relative WER reduction of 42.18\% compared to the audio-only system, while the LSTM-DFN yields a relative WER improvement of 27.09\%, showing the benefit of being able to lipread even for noisy LVCSR. \section{System overview} \label{systemoverview} In the following, we propose an architecture that centers around a decision fusion net (DFN), which learns to combine all modalities dynamically. As shown in Fig.~\ref{fig:DNN}, it bases on the state posteriors of each modality, derived from one hybrid recognition model per stream, which we consider as our representation of instantaneous feature inputs. In addition, we provide the DFN with multiple reliability indicators as auxiliary inputs, which help in estimating the multi-modal log-posteriors $\textnormal{log }\widetilde{p}(\textbf{s} |\textbf{o}_t)$ for the decoder. As mentioned above, we consider $M = 3$ single-modality models, one acoustic and two visual. The fused posterior $\textnormal{log }\widetilde{p}(\textbf{s} |\textbf{o}_t)$ is computed via \begin{multline} \label{eq:concatprob} \textnormal{log }\widetilde{p}(\textbf{s} |\textbf{o}_t)=\textrm{DFN}([{p}(\textbf{s} |\textbf{o}_t^{\mathrm{A}})^T,{p}(\textbf{s} |\textbf{o}_t^{\mathrm{VA}})^T, \\ {p}(\textbf{s} |\textbf{o}_t^{\mathrm{VS}})^T,\textbf{R}_t^T]^T). \end{multline} Here, ${p}(\textbf{s} |\textbf{o}_t^{\mathrm{A}})$, ${p}(\textbf{s} |\textbf{o}_t^{\mathrm{VA}})$ and ${p}(\textbf{s} |\textbf{o}_t^{\mathrm{VS}})$ are the state posteriors of the audio model, and of the appearance-based, and a shape-based video model, respectively. $\textbf{R}_t$ is a vector composed of the reliability measures at time $t$, which we describe in Sec.~\ref{reliabilitymeasures}. As an alternative to the posteriors of each stream, we have also considered a fusion of the log posteriors $\textnormal{log}\;{p}(\textbf{s} |\textbf{o}_t^{\mathrm{i}})$, but settled on the linear posteriors due to a better model convergence. \vspace{-1mm} \begin{figure}[htbp] \centering \includegraphics[scale=0.9]{Sections/DNN.pdf} \caption{Audio-visual fusion via the DFN, applied to one stream of audio and two streams of video features} \label{fig:DNN} \end{figure} \vspace{-1mm} DFN training is then performed with the cross-entropy loss \begin{equation} \label{ce} \mathcal{L}_\textnormal{CE}= -\frac{1}{T}\sum_{t=1}^{T} \sum_{s=1}^{S}p^*(s | \textbf{\textrm{o}}_t)\cdot \textnormal{log }\widetilde{p}(s |\textbf{\textrm{o}}_t). \end{equation} Here, $p^*(s | \textbf{\textrm{o}}_t)$ is the target state probability of state $s$, obtained by the forced alignment for the clean acoustic training data. The estimated vector of log-posteriors $\textnormal{log }\widetilde{p}(\textbf{s} |\textbf{o}_t)$ is obtained from Eq.~\eqref{eq:concatprob}. Finally, the decoder uses these estimated log-posteriors to find the optimum word sequence by a graph search through the decoding graph~\cite{mohri2008speech} \subsection{Model-based reliability measures} The \textbf{\textit{entropy}} is indicative of the uncertainty of the model about the state $s$, given the current observation $\textbf{o}_t$. \begin{equation} \label{entropy} \textnormal{H}^i_t=-\sum_{s=1}^{S\,^i}p(s| \textbf{o}_t^i)\cdot \textnormal{log }p(s | \textbf{o}_t^i), \end{equation} where $S\,^i$ is the number of states in stream model $i$. The \emph{\textbf{dispersion}} shows the decoder\textquotesingle s discriminative power. It is computed by: \begin{equation} \label{dispersion} \textnormal{D}^i_t=\frac{2}{K(K-1)}\sum_{l=1}^{K}\sum_{m=l+1}^{K}\textnormal{log }\frac{\widehat{p}(l | \textbf{o}_t^i)}{\widehat{p}(m | \textbf{o}_t^i)}, \end{equation} where $\widehat{p}$ contains a re-ordered version of the probabilities $p$, sorted in descending order. $K$ is set to 15. The K-largest \emph{\textbf{posterior difference}} shows the average ratio between the largest posterior and the next $K-1$ values. It is computed via \begin{equation} \label{diff} \textnormal{Diff}^{\,i}_t=\frac{1}{K-1}\sum_{k=2}^{K}\textnormal{log }\frac{\widehat{p}(1 | \textbf{o}_t^i)}{\widehat{p}(k | \textbf{o}_t^i)}. \end{equation} The \textit{\textbf{temporal divergence}} is computed as the Kullback-Leibler divergence between two posterior vectors $p(s | \textbf{o}_t^i)$ and $p(s | \textbf{o}_{t+\Delta t}^i)$, i.e.~ \begin{equation} \textnormal{Div}_{\Delta t}^i(t)=\textnormal{D}_{KL}(p(s | \textbf{o}_t^i) ||p(s | \textbf{o}_{t+\Delta t}^i)). \end{equation} $\Delta t$ is set to $250\ $ms. As shown in \cite{hermansky2013multistream}, the mean temporal divergence is a helpful measure of reliability. Here, we obtain the mean by averaging $\textnormal{Div}_{\Delta t}^i(t)$ over segments of $50\ $ms length. In some studies, the \textbf{\textit{entropy ratio}} is used as the estimated stream weights for stream-weighting based AVSR system, see e.g.~\cite{misra2003new}. Here we use this indicator as another reliability measure. \begin{equation} \omega_{h,t}^i= \frac{\widetilde{\textnormal{H}}_t^i}{\sum_{k=\mathrm{A,VA,VS}}^{}\widetilde{\textnormal{H}}_t^k}, \end{equation} where \begin{equation} \widetilde{\textnormal{H}}_t^i = \left\{\begin{matrix} \ 10000 \ \ \textnormal{H}_t^i > \overline{\textnormal{H}}_t\\[2pt] \ \ \ \textnormal{H}_t^i \ \ \ \ \textnormal{H}_t^i \leq \overline{\textnormal{H}}_t. \end{matrix}\right. \end{equation} The strongly related \textbf{\textit{dispersion ratio}} $\omega_{D,t}^i$ is also considered. \begin{equation} \omega_{D,t}^i= \frac{\widetilde{\textnormal{D}}_t^i}{\sum_{k=\mathrm{A,VA,VS}}^{}\widetilde{\textnormal{D}}_t^k}, \end{equation} where \begin{equation} \widetilde{\textnormal{D}}_t^i = \left\{\begin{matrix} \frac{1}{10000} \ \ \textnormal{D}_t^i < \overline{\textnormal{D}}_t\\[2pt] \ \ \ \textnormal{D}_t^i \ \ \ \ \textnormal{D}_t^i \geq \overline{\textnormal{D}}_t. \end{matrix}\right. \end{equation} A, VA, and VS represent the audio, video appearance and video shape stream, respectively. $\textnormal{H}_t^i$ and $\textnormal{D}_t^i$ is obtained from Eq.~\eqref{entropy} and Eq.~\eqref{dispersion}. $\overline{\textnormal{H}}_t$ and $\overline{\textnormal{D}}_t$ are the average entropy and dispersion. \vspace{-5pt} \subsection{Signal-based reliability measures} \label{signalrms} The low order MFCC coefficients are related to the audio quality. Here the first 5 MFCC coefficients and their temporal derivatives, $\Delta$MFCC are used. The estimated Signal-to-Noise Ratio (SNR) also represents the quality of the audio signal. \begin{equation} \mathrm{SNR\,_t}=10\ \textnormal{log}\left(\cfrac{\textnormal{S}_t}{\textnormal{N}_t}\right). \end{equation} Due to the special noise type (discussed in Sec. \ref{setup}) in data augmentation, the conventional SNR estimation algorithms is not working satisfactorily. A deep learning approach DeepXi \cite{nicolson2019deep} is used here as a better performing framewise SNR estimator. The Pitch $f_0$ and its temporal derivative, $\Delta f_0$ are also used as reliability indicators, it has beed proven that high pitch of speech signal affect the MFCC coefficients \cite{dharanipragada2006robust, ghai2011study}. The \textbf{\textit{probability of voicing}} is used as an additional soft voice-activity detection (soft VAD) cue. The OpenFace \cite{amos2016openface} is used for face detection and face landmark extraction. The \textbf{\textit{Confidence}} of the face detection for each frame is related to the visual feature quality. We use the first 5 Inverse Discrete Cosine Transform (\textbf{\textit{IDCT}}) coefficients of the grayscale mouth region to represent low-level image properties. The \textbf{\textit{image distortion}} measures comprise the lighting condition, the degree of blurring and the rotation, all computed as in \cite{schonherr2016environmentally}. The lighting condition is considered as the mean brightness of the image. The degree of blurring is computed as the variance of the image after high-pass filtering. The rotation is considered as the cross-correlation between the original image and its horizontally mirrored version. \fi
1,108,101,565,846
arxiv
\section{Introduction} Several recent studies have found that the oldest and most massive galaxies at high redshift have very small sizes (e.g., {Trujillo} {et~al.} 2006; {Daddi} {et~al.} 2005; {Toft} {et~al.} 2007; {Zirm} {et~al.} 2007; {van Dokkum} {et~al.} 2008; {Cimatti} {et~al.} 2008; {van der Wel} {et~al.} 2008; {Franx} {et~al.} 2008; {Damjanov} {et~al.} 2008; {Buitrago} {et~al.} 2008). Although these studies use different datasets and methodology they are in good agreement, finding that the effective radii of red, apparently quiescent galaxies of fixed mass evolved by a factor of $\sim 5$ since $z\sim 2.5$ (e.g., {van der Wel} {et~al.} 2008). Initially there were concerns about the quality of photometric redshifts, the depth of the imaging data, and the interpretation of the broad-band spectral energy distributions (SEDs), but these were recently addressed through deep Gemini/GNIRS near-infrared spectroscopy and deep HST/NICMOS imaging of a sample of massive quiescent galaxies at $z\sim 2.3$ ({Kriek} {et~al.} 2006; {van Dokkum} {et~al.} 2008). These small galaxies are remarkable when compared to nearby galaxies, as their average stellar densities are a factor of $\gtrsim 100$ higher than those of red SDSS galaxies of the same mass ({van Dokkum} {et~al.} 2008). Such massive, dense galaxies are very rare in the local Universe (e.g., {Trujillo} {et~al.} 2009) but they make up about half of galaxies with $M\gtrsim 10^{11}$\,\msun\ at $z\sim 2.3$ (e.g, {van Dokkum} {et~al.} 2006; {Kriek} {et~al.} 2006; {Williams} {et~al.} 2008). Various scenarios have been proposed to explain the observed properties of the compact galaxies and to describe their subsequent evolution. The most straightforward explanation is that the masses are overestimated and/or the sizes underestimated. The mass measurements currently rely on fitting stellar population synthesis models to the observed photometry and spectra, and these models have considerable systematic uncertainties. A significant uncertainty is the stellar initial mass function (IMF): a ``bottom-light'' IMF, such as proposed by, e.g., {van Dokkum} (2008), {Dav{\'e}} (2008), and {Wilkins}, {Trentham}, \& {Hopkins} (2008), would generally lower the implied masses, with the precise effect depending on the age of the stellar populations. The sizes of the galaxies can be underestimated in several ways. It may be that the galaxies have strong radial gradients in $M/L$ ratio, in which case the luminosity-weighted sizes are different from the mass-weighted sizes (e.g., {Hopkins} {et~al.} 2008). We also note that {Hopkins} {et~al.} (2008) predict smaller differences between nearby elliptical galaxies and their progenitors, due to contribution of the dark matter halos. Limitations in resolution and signal-to-noise ratio may also play a role, although this seems increasingly unlikely. Taking the measured masses and sizes at face value, three effects have been discussed to explain the observed evolution. The first is a variation on ``progenitor bias'' ({van Dokkum} \& {Franx} 2001), which states that early-type galaxies at high redshift are only a subset of all progenitors of today's early-type galaxies. As we discuss later, the number density of the compact galaxies at $z\sim 2.3$ is only $\sim 7$\,\% of the number density of galaxies with the same mass today (see \S\,5.1)\footnote{We note that this fraction is smaller than that found in Kriek et al.\ (2008). The reason is that we adopt a different IMF and therefore a different mass limit, and we assume that at $z=0$ all galaxies with $M\ge10^{11}$\,\msun\ are "red and dead" but only $\sim 50$\,\% at $z=2.5$.}. Therefore, the compact galaxies may be the progenitors of the most compact $\sim7$\,\% of today's galaxies with the same mass (see also {Franx} {et~al.} 2008). This explanation cannot be complete, as the compact galaxies are small even when compared to this subset of the present-day population. The second explanation is minor or major merging, which will increase the sizes but also the masses ({Khochfar} \& {Silk} 2006; {Naab} {et~al.} 2007; {Hopkins} {et~al.} 2008). Significant merging is expected for these massive galaxies, e.g., {White} {et~al.} (2007); {Guo} \& {White} (2008), and merging scenarios have been discussed in several papers (e.g., {Cimatti} {et~al.} 2008; {van der Wel} {et~al.} 2008). The third explanation that has been discussed is expansion of the galaxies as a result of dramatic mass loss due to quasar feedback ({Fan} {et~al.} 2008). In this paper we provide new constraints on the evolution of compact ``red and dead'' high redshift galaxies. In \S\,3 we compare the radial stellar density profiles of the compact galaxies to those of nearby elliptical galaxies. This allows us to determine whether the compact galaxies resemble the central regions of elliptical galaxies, and hence whether normal elliptical galaxies are plausible descendants via merging scenarios. In \S\,4 we present three simple models to explain the growth of compact galaxies into local elliptical galaxies. In \S\,5 we consider which of the modes is most likely to dominate galaxy growth by including constraints from the evolution of the mass function, and derive a lower bound on the amount of size growth for a given amount of mass growth. Throughout this paper, we assume a $\Lambda$CDM cosmology with $H_0 = 70 \unit{km} \unit{s^{-1} Mpc^{-1}}$, $\Omega_m= 0.3$, and $\Omega_{\Lambda} = 0.7$. \section{Density Profiles} Density profiles of nearby elliptical galaxies and the compact high redshift galaxies are constructed. For the compact galaxies we deproject the Sersic fits presented in vD08, and for the nearby galaxies we use a combination of new and literature data. \subsection{Surface Brightness Profiles} \subsubsection{High Redshift Galaxies} We use the sample of nine high redshift ``red and dead'' galaxies previously studied by {Kriek} {et~al.} (2006) and {van Dokkum} {et~al.} (2008) [hereafter vD08]. The redshifts of the galaxies were measured from deep rest-frame optical Gemini/GNIRS spectra ({Kriek} {et~al.} 2006). The spectra also demonstrate that the light comes from evolved stellar populations, as they exhibit prominent Balmer or 4000\,\AA\ breaks. The galaxies were imaged with the Hubble Space Telescope (HST) NICMOS2 camera, and with Keck/NIRC2 using laser guide star-assisted adaptive optics. As described in vD08, the galaxies were fit with {Sersic} (1968) profiles using GALFIT ({Peng} {et~al.} 2002). Structural parameters for the galaxies are listed in vD08. Surface brightness profiles in the $H_{160}$ band were constructed from the Sersic fits. The galaxies are barely resolved even with the NICMOS2 camera, and we have essentially no information on the form of the density profile within the effective radius ($0\farcs 1$, or $\approx 1$\,kpc). The average density within this radius is much better constrained, and this is the parameter that we will use in quantitative comparisons. We note that GALFIT effectively extrapolates the Sersic fits to the (resolved) structure at large radii inward while conserving the total flux, and that therefore the fits {\em may} also provide a good approximation of the form of the density profile within 1\,kpc. \subsubsection{Nearby Galaxies} Two sources are used for the nearby sample. The {Tal}, {van Dokkum}, \& {Nelan} (2009) [hereafter T09] sample is an absolute magnitude and volume-limited sample of local elliptical galaxies, selected from {Tully} (1988). All galaxies with morphological type ``E'', $M_B < -20$, within declinations of $-85$ and +10, galactic latitude $> 17^{\circ}$ or $<-17^{\circ}$ and at distances of $15 - 50\unit{Mpc}$ were observed with the Yale 1.0\,m telescope at CTIO, operated by the SMARTS consortium, in the $V$ band. The observing strategy was optimized for flat-fielding accuracy, and the surface brightness profiles can be reliably traced to $\approx 29$\,mag\,arcsec$^{-2}$. The galaxies were fit with isophotal ellipses using IRAF. Apparent magnitudes were calibrated using aperture photometry of {Prugniel} \& {Heraudeau} (1998) and then converted to $B$ magnitudes using published $B-V$ colors from the same catalogue (neglecting color gradients). Measurements were corrected for Galactic reddening using infrared dust maps from {Schlegel}, {Finkbeiner}, \& {Davis} (1998). We assume distance measurements from the Tully catalogue (corrected to our cosmology) to convert the luminosity profiles to physical units. The T09 sample has the advantage that it is complete down to a luminosity limit (which roughly corresponds to a mass limit for these luminous red ellipticals), but the disadvantage is that it only samples a limited range in mass and luminosity. We supplemented the T09 sample with photometry from {Franx}, {Illingworth}, \& {Heckman} (1989); {Peletier} {et~al.} (1990); {Jedrzejewski} (1987) [hereafter FPJ]. This sample is not complete but covers a larger range in luminosity. We limited the sample to all galaxies that have published $B$-band profiles. Again distances from {Tully} (1988) were used to convert the observed brightnesses to luminosities. \subsection{Deprojection} The intensity profiles of the nearby galaxies are fit to Sersic profiles of the form \begin{equation} I(r) = I_o \exp{\left[-b_n {\left(\frac{r}{r_e}\right)}^{1/n}\right]} \end{equation} with $n \le 4$ between radii of $4\unit{''}$ out to $20\unit{kpc}$, or the maximum extent of each profile, along the circularized axis, $r = a\sqrt{(1-\epsilon)}$, of the galaxy ({Ciotti} 1991). $b_n$ is defined as the solution to $\gamma(2n,b_n) = \Gamma(2n)/2$. We use the asymptotic approximation for $b_n$, which is accurate to a factor of $\mathcal{O} \sim 10^{-6}$: \begin{equation} b_n \approx 2n - \frac{1}{3}+\frac{4}{405n}+\frac{46}{25515n^2} \end{equation} ({Ciotti} \& {Bertin} 1999). For the high redshift galaxies we used the fits of vD08. We then performed an Abel Transform to deproject a circularized, three-dimensional light profile: \begin{equation} \rho_{L}\left(\frac{r}{r_e}\right) = \frac{b_n}{\pi}\frac{I_o}{r_e}{\left(\frac{r}{r_e}\right)}^{(1/n-1)} \int_{1}^{\infty}\frac{\exp{[-b_n{(\frac{r}{r_e})}^{(1/n)}t]}} {\sqrt{t^{2n}-1}}dt \end{equation} For both the high redshift sample of compact galaxies and the two samples of nearby elliptical galaxies we now have circularized radial luminosity density profiles in units of $L_{B,\odot}\,{\rm kpc}^{-3}$. \subsection{Light-to-Mass Conversions} In order to convert the luminosity density profiles to stellar mass density profiles we make the following assumptions about the mass-to-light ($M/L$) ratios. For the high redshift sample we use stellar masses from {Kriek} {et~al.} (2008) adjusted to a {Kroupa} (2001) IMF. For the nearby sample, we use the well-established relation between $M/L$ ratio and mass to convert luminosities to masses (e.g., {van der Marel} 1991). The normalization and slope of the relation in the $B$ band were determined by combining the information in Table 1 of {van der Marel} (1991) and Table 2 in {van der Marel} \& {van Dokkum} (2007): \begin{equation} \frac{M}{L_B} = (9.04\times 10^{-4}) {\left(\frac{L_{B}}{L_{B,\odot}}\right)}^{0.37} \end{equation} The conversion from luminosity to mass is the largest uncertainty in the methodology, in particular the lack of dynamical measurements that could calibrate the $M/L$ ratios of the high redshift galaxies. We will return to this issue in \S\,6. \section{Comparison of Density Profiles} \subsection{Average Profiles} The stellar density profiles of the compact high redshift galaxies are compared to those of nearby elliptical galaxies in Fig.\ \ref{fig:densprof}. The solid line is the average density profile of the 9 galaxies from vD08. We use a 1000 iteration bootstrap estimation to approximate errors of the average density profile due to the small sample size of the high redshift galaxies. The 1 $\sigma$ contour is shown in dark gray and the 2 $\sigma$ is shown in light gray. Broken lines are average profiles of nearby elliptical galaxies from the T09 sample, in three different mass bins. The lowest mass bin is $M \ge 10^{11}M_{\odot}$: this is the mass that the high redshift galaxies already have at the epoch of observation, and therefore the minimum mass of their descendants. \begin{figure}[h] \centering \includegraphics[scale=0.85]{f1.eps} \caption{Comparison of the mean stellar density profiles of high redshift compact galaxies (solid line) with nearby elliptical galaxies from the T09 sample (broken lines). High redshift 1 and 2 $\sigma$ contours are shown in gray. The average density profile for nearby galaxies with $M \ge 10^{11}M_{\odot}$ is represented by the red, dashed line; the green, short dashed line corresponds to galaxies with $M \ge 3\times 10^{11}M_{\odot}$; and the most massive local galaxies with $M \ge 5 \times 10^{11} M_{\odot}$ are shown by the blue, long-dashed line. Note that the profiles of the nearby galaxies are fairly similar to those of the compact galaxies at radii $r\lesssim 3$\,kpc, qualitatively consistent with expectations for inside-out growth.} \label{fig:densprof} \end{figure} Figure \ref{fig:densprof} shows that the discrepancy between the profiles of compact high redshift galaxies and nearby elliptical galaxies is mostly in the outer regions. Within $r \approx 1\unit{kpc}$ the average stellar density of the high redshift galaxies is greater than the density of nearby ellipticals by a factor of a few only, particularly for the more massive galaxies in the T09 sample. This discrepancy is much smaller than the factor of $\gtrsim 100$ difference when the density is measured within the effective radius (e.g., vD08). Furthermore, our error estimates only address the sample bias; this discrepancy is especially small considering the other sources of uncertainty in our measurements, which we will discuss further in \S\,6. Outside of this inner region, the difference grows significantly; the stellar density of nearby elliptical galaxies is a few hundred times higher than that of the compact high redshift galaxies at $r>10 \unit{kpc}$. We infer that in order to evolve into nearby galaxies, compact galaxies need not change significantly in the central regions, but must grow significantly in their outer regions. \begin{figure*}[t] \centering \includegraphics[scale=0.9]{f2.eps} \caption{Relative properties of nearby and $z\sim2.3$ galaxies. The panels show the relations between size and mass (a), density within the effective radius and density within 1\,kpc (b), density within the effective radius and mass (c), and density within 1\,kpc and mass (d). Open symbols are nearby galaxies, solid circles are high redshift compact galaxies from vD08. Light grey points are nearby galaxies with masses $< 10^{11}\unit{M_{\odot}}$, i.e., lower than the high $z$ compact galaxies. Arrows begin at mean values of high redshift sample and show predictions from simple models for the evolution of the compact galaxies: blue arrows shows the direction of evolution due to equal-mass mergers, green arrows for minor mergers and red arrows for the expansion model. Simple expansion or minor mergers can bring the distant galaxies close to the scaling relations defined by nearby galaxies, but equal-mass mergers do not produce galaxies of the right size.} \label{fig:allprop} \end{figure*} \subsection{Comparison of Masses, Sizes, and Densities} The relative properties of the high redshift galaxies and nearby galaxies are demonstrated in Figure \ref{fig:allprop}. The compact galaxies from vD08 are indicated by solid circles. The nearby samples are represented by open symbols: squares for T09 and triangles for FPJ. Only nearby galaxies with sufficient mass, $M \gtrsim 10^{11} M_{\odot}$, can be the descendants of the high redshift galaxies. Galaxies with lower masses are denoted with light grey symbols. The relative compactness of high redshift and low redshift galaxies is shown in Figure \ref{fig:allprop}(a). There is a clear trend showing the increasing effective radius with galaxy mass in the nearby galaxies. The high redshift galaxies, though in the middle of the nearby mass range, are smaller by a factor of $\sim 5$ in effective radius. This result confirms previous studies, which generally used the Sloan Digital Sky Survey (SDSS) as a low redshift comparison point (e.g., {Toft} {et~al.} 2007; {van Dokkum} {et~al.} 2008; {Cimatti} {et~al.} 2008; {van der Wel} {et~al.} 2008). {van der Wel} {et~al.} (2008) combine data from the literature (in addition to adding new data at $z\approx 1$) and derive an evolution of $r_e \propto (1+z)^{-1.20\pm 0.12}$ at fixed mass (for samples with photometrically determined masses), corresponding to a factor of $4.2\pm 0.6$ at $z=2.3$. The difference in size at fixed mass implies a significant difference in density contained within the effective radius of the high redshift and nearby galaxies. We calculated the average densities within the effective radius by integrating the stellar density profiles derived in the previous Section: \begin{equation} \rho(<r) = \frac{3}{r^3} \int_0^{r} \rho(r'){r'}^2 dr' \end{equation} with $r=r_e$. This difference is obvious in Figure \ref{fig:allprop}(b): the vertical axis of this panel demonstrates the factor of $>100$ differences in the average density within the effective radius. The horizontal axis of Fig.\ \ref{fig:allprop}(b) shows the average density integrated to $r=1$\,kpc rather than $r=r_e$. For convenience, we will refer to the average density within 1\,kpc as the ``central density''. Since the compact galaxies have effective radii $\sim 1 \unit{kpc}$, the density within $r_e$ is approximately equivalent to our definition of the central density, placing these galaxies along the dashed diagonal line representing the equality of the two densities. The nearby sample lies predominantly below this line, with $\rho(< r_e)$ much lower than $\rho(<1\,{\rm kpc})$ for all galaxies with masses $>10^{11}$\,\msun. We infer that, although the high redshift galaxies have higher densities than nearby ellipticals overall, the differences are much smaller within 1 kpc than within $1\,r_e$. Figure \ref{fig:allprop}(c) and (d) demonstrate the same point in the density versus mass plane. In (c), we show the relation between $\rho(<r_e)$ and total stellar mass. The compact high redshift galaxies are clearly much denser than nearby galaxies of the same mass. In (d), it is shown that the discrepancy in density becomes far less extreme in the central regions of the galaxies. The nearby sample shows opposite trends with mass in (c) and (d): the density within the effective radius decreases with increasing mass (reflecting the slope of the mass -- radius relation), but the density within 1\,kpc grows with increasing mass. Interestingly, the central densities of the high redshift compact galaxies are very similar to those of nearby elliptical galaxies with masses $\gtrsim 5 \times 10^{11}$\,\msun. The trends in Fig.\ \ref{fig:allprop} are consistent with models in which the compact galaxies make up the centers of present-day giant ellipticals. Such inside-out formation scenarios are not new, and have been explored by, e.g., {Loeb} \& {Peebles} (2003), {Bournaud}, {Jog}, \& {Combes} (2007), {Naab} {et~al.} (2007), and {Hopkins} {et~al.} (2008). The idea is that a compact core is formed through highly dissipative processes at $z\gtrsim 3$ (see, e.g., {Robertson} {et~al.} 2006a; {Dekel} {et~al.} 2008), which then grows through increasingly dissipationless mergers at lower redshift. Independently, Franx et al. 2008 argues that galaxy growth is mostly inside-out, based both on the regular evolution of the stellar mass-radius relation, and on the fact that star forming galaxies are larger than non-star forming galaxies of the same mass. \section{Predictions from Simple Models} As discussed in \S\,1, various models have been proposed to explain the apparent growth of massive galaxies since $z\sim 2.5$. Here we discuss three possible simple models in the context of the relations shown in Fig.\ \ref{fig:allprop}: equal-mass mergers, minor mergers and expansion at fixed mass. We investigate the effects of these models in Fig.\ \ref{fig:allprop} with arrows. The starting point of the arrows is always the mean of the high redshift compact galaxies, and they all imply a growth in effective radius of a factor of 5. We emphasize that we look to constrain the dominant mode of galaxy evolution; while individual galaxies in the sample will likely be affected by all of the processes discussed below, we focus on the overall trends in the larger context of the sample of galaxies. \subsection{Model 1: Growth via Equal-Mass Mergers} In this model, the growth is driven by (near-) equal mass mergers. These mergers will not only increase the size of the galaxies, but also their mass. Applying straightforward virial arguments implies \begin{equation} K_{1+2} = K_1 + K_2, \end{equation} with $K_{1+2}$ the kinetic energy of the remnant and $K_1$, $K_2$ the kinetic energy of the progenitors. With $K= \frac{1}{2}M\sigma^2$ we have \begin{equation} \frac{1}{2} M_{1+2} \sigma_{1+2}^2 = \frac{1}{2}M_1\sigma_1^2 + \frac{1}{2}M_2 \sigma_2^2, \label{eq:virial} \end{equation} and as $M_{1+2} = M_1+M_2$ and $M_1=M_2$, we have $\sigma_{1+2}^2 = \sigma_1^2$. Using $\sigma^2 \propto GM/r$, we arrive at \begin{equation} \frac{r_{1+2}}{r_1} = \frac{M_{1+2}}{M_1}, \end{equation} the familiar result that mergers lead to an increase in size and mass but no change in velocity dispersion (e.g., {Barnes} 1992). We note that these relations are simplifications, which are inconsistent with the observed slopes of the stellar mass -- radius relation and the stellar mass -- $\sigma$ relation. Simulations which take the initial orbits and effects of energy transfer to the dark matter halos into account generally imply a smaller increase in size for a given change in mass. {Boylan-Kolchin}, {Ma}, \& {Quataert} (2006) find that $r_{1+2}/r_1 \sim (M_{1+2}/M_1)^{0.6-1}$, depending on the orbital configuration. The blue arrows in Fig.\ \ref{fig:allprop}(a-d) show the effects of equal-mass mergers on the various relations between mass, size, and density. The density within 1\,kpc was calculated by assuming that the Sersic indices of the profiles of the compact galaxies do not change. The blue arrows imply that the descendants of the compact galaxies are the dominant galaxies in massive groups and clusters, with stellar masses of $\sim 10^{12}$\,\msun. As can be seen in panel {\em d}, the central densities of these galaxies are nearly identical to those of the compact galaxies. However, as can be seen in panel {\em a}, the effective radii of these giant, nearby galaxies are a factor of $\sim 10$ larger than the compact objects, not a factor of $\sim 5$. Therefore, this model is not a very good description of the required evolution in panels {\em a} -- {\em c}. \subsection{Model 2: Growth via Minor Mergers} In this mode of galaxy growth, the progenitor galaxies accumulate mass via minor mergers with small systems. The difference with the equal-mass merger model is that minor mergers are more effective in ``puffing up'' the size a galaxy for a given change in stellar mass. For minor mergers $\sigma_1^2 \gg \sigma_2^2$ in Eq.\ \ref{eq:virial}, and therefore \begin{equation} \frac{\sigma_{1+2}^2}{\sigma_1^2} \approx \frac{M_1}{M_{1+2}} \end{equation} Again using $\sigma^2 \propto GM/r$ we have \begin{equation} \frac{r_{1+2}}{r_1} = \left(\frac{M_{1+2}}{M_1}\right) \left( \frac{\sigma_1^2}{\sigma_{1+2}^2}\right) \approx \left(\frac{M_{1+2}}{M_1}\right)^2. \end{equation} The effective radius grows by the square of the change in mass (rather than linearly, which is the case for equal-mass mergers) and the velocity dispersion decreases by the square root of the change in mass (rather than remaining constant) (see also {Naab} {et~al.} 2009). As an example, eight successive $M_2:M_1=1:10$ mergers could lead to a factor of $\sim 5$ increase in effective radius, while the mass would grow by a factor of $\sim 2$ only. The effects of this scenario are shown by the green arrows in Fig.\ \ref{fig:allprop}. Again, the density within 1\,kpc was calculated by assuming that the Sersic index of the profiles remains unchanged. The compact galaxies have a median mass of $1.7\times10^{11}$\,\msun\, therefore the minor merger model predicts that their descendants are in galaxies with a median mass of $3-4\times 10^{11}$\,\msun\ today. The central densities of these galaxies are a very good match to those of the predicted descendants (panel {\em d}), and the effective radii are a much better match than in the equal-mass merger model (panel {\em a}). We note here that what matters is the direction of the arrows, as their length is arbitrarily determined by a growth of a factor of five in $r_e$. Extending the green arrows slightly would bring them very close to the distribution of nearby elliptical galaxies in all panels. \subsection{Model 3: Expansion at Fixed Mass} In the final model that we examine, a galaxy has accumulated most of its mass by $z \sim 2$ and then gradually expands over time while its mass stays roughly constant. The motivation for this class of models was provided by {Fan} {et~al.} (2008); they suggest that a QSO may blow out a large fraction of the mass, leading to a significant "puffing up" of the remnant. We will discuss whether such models are physically plausible in \S\,5.2 (see also {Trujillo} {et~al.} 2009). Predictions for these models are indicated by red arrows in Fig.\ \ref{fig:allprop}. By construction, these models predict the right amount of size evolution at fixed mass, and therefore produce the same values of $\rho(<r_e)$ as nearby elliptical galaxies. However, as can be seen in panels {\em b} and {\em d} they slightly under-predict the central densities of local ellipticals. The values of $\rho(<1\,{\rm kpc})$ that are predicted are a factor of $\sim 2$ lower than those of local ellipticals with the same mass. \subsection{Summary of Model Comparisons} We conclude that all three simple models bring the compact galaxies much closer to the relations defined by nearby elliptical galaxies. The equal-mass merger model provides the worst description of the three as it does not produce elliptical galaxies of the right size and is therefore probably not the dominant mode of growth. The minor merger is more effective in puffing up the compact galaxies and, despite its simplicity, provides a remarkably good description of the masses and densities of nearby elliptical galaxies. The expansion model provides a good description as well, although it slightly under-predicts the central densities of elliptical galaxies. \section{Discussion} The main result of the previous Section, and this paper, is that the properties of high redshift compact galaxies can be reconciled with those of nearby massive elliptical galaxies. The densities of the compact galaxies are similar to the central densities of elliptical galaxies, and simple ``toy models'' can be used to describe the evolution. \subsection{Independent Constraints on Mass Growth} The amount of mass growth in the models of \S\,4 is specified by the physical mechanism for growth (equal-mass merger, minor mergers, and expansion) and the choice of a factor of five increase in effective radius. To achieve this increase, the equal-mass merger model requires an increase in mass of a factor of $\sim 5$, the minor merger model requires an increase of a factor of 2--3, and the expansion model does not require an increase at all. The evolution of the galaxy mass function provides an independent constraint on the mass growth. The compact galaxies are common at the epoch of observation -- they constitute $\gtrsim 90$\,\% of ``red and dead'' galaxies at $z=2.3$ (vD08) and therefore some $\sim 50$\,\% of the general population of galaxies with stellar masses $\gtrsim 10^{11}$\,\msun\ (e.g., {Kriek} {et~al.} 2006; {Kriek} {et~al.} 2008; {Williams} {et~al.} 2008). The evolution of the galaxy mass function has been measured recently by several groups (e.g., {Drory} {et~al.} 2005; {Fontana} {et~al.} 2006; {Marchesini} {et~al.} 2008; {P{\'e}rez-Gonz{\'a}lez} {et~al.} 2008) . Here, we use the results from {Marchesini} {et~al.} (2008), who have combined data from both deep and wide surveys in a self-consistent way. Integrating the Schechter function fit given in {Marchesini} {et~al.} (2008) we find that the integrated number density of galaxies with stellar masses $M>10^{11} \unit{M_{\odot}}$ is $7.2^{+1.1}_{-1.1} \times 10^{-5}\unit{Mpc^{-3}}$ at $z=2.5$. The number density of compact, quiescent galaxies is therefore $3.6^{+0.9}_{-0.9}\times 10^{-5}\unit{Mpc^{-3}}$, where we assumed that the quiescent fraction is $0.5 \pm 0.1$. The stellar mass density in these galaxies is $4.8^{+1.6}_{-1.9} \times 10^6$\,\msun\,Mpc$^{-3}$. This number density and mass density provide strict bounds on the typical masses of the descendants of the compact galaxies. If the mass density of the compact galaxies exceeds that of local galaxies of a particular mass it is immediately clear that these local galaxies cannot constitute the (sole) descendants. Figure \ref{fig:numdens} shows the integrated Schechter stellar mass function in the local universe in dark grey, as well as the number density of compact galaxies with $M>10^{11}M_{\odot}$ at $z=2.5$ in light grey. For any descendant population at $z=0.1$, mass corresponds to a required growth factor, given on the lower axis. We first ignore mergers of compact galaxies with themselves and address that possibility later. In order for a model to be a feasible evolutionary path the implied descendant population of galaxies in the local universe must be at least as common as the progenitors at high redshift. \begin{figure}[t] \centering \includegraphics[scale=0.9]{f3.eps} \caption{Integrated number density of galaxies above a mass limit. The horizontal line is the number density of quiescent galaxies with $M>10^{11}$\,\msun\ at $z=2.5$. The diagonal relation is the number density at low redshift as a function of the mass limit. The mass limit is indicated in absolute units on the top axis, and as a growth factor compared to $z=2.5$ on the bottom axis. Vertical lines indicate the growth implied by the simple models discussed in \S\,4. Ignoring merging of compact galaxies with themselves, the mass functions at $z=2.5$ and $z\sim0$ do not allow for growth of more than a factor of 2--3. Strong merging of compact galaxies is ruled out by the integrated mass density at low redshift (see text). } \label{fig:numdens} \end{figure} The number density of nearby massive galaxies limits the mass growth to a factor of $2- 3$. For this mass growth each compact galaxy has one descendant. Lower mass growth implies that only a small fraction of massive galaxies today hosts a descendant of a compact galaxy. Higher mass growth is not allowed, as it would create too many descendants. Of course, these constraints are dependent on the masses at $z=2.5$, which we derived. If these masses are incorrect, this argument might change. Vertical lines in Fig.\ \ref{fig:numdens} indicate predictions from the three models discussed in \S\,4. The expansion model is obviously fully consistent with the constraints imposed by the evolution of the mass function, as it implies no mass growth. We note that only $\sim 7$\,\% of nearby galaxies with masses $>10^{11}$\,\msun\ are descendants of quiescent $z=2.5$ galaxies in this model; we will return to this point below. Remarkably, we can rule out the equal-mass merger model as the main mode of growth based on Fig.\ \ref{fig:numdens}, as it implies a mass growth of a factor of $\sim 5$. The number density of nearby galaxies with $M>5\times 10^{11}$\,\msun\ is lower by more than an order of magnitude than the number density of compact galaxies with $M>10^{11}$\,\msun\ at $z=2.5$. In the equal-mass merger model, compact galaxies can obviously merge with each other, which will lower their number density. However, a factor of $\sim 5$ mass growth is not allowed even when compact galaxies are {\it only} permitted to merge with each other: the stellar mass density in galaxies with $M>5\times 10^{11}$\,\msun\ at $z=0.1$ is $8.1^{+2.1}_{-1.6}\times 10^5$\,\msun\,Mpc$^{-3}$, a factor of 6 lower than the mass density in compact galaxies with $M>10^{11}$\,\msun\ at $z=2.5$. Also remarkably, the growth in the minor merger model is close to the cross-over point, where each compact galaxy has one descendant. A plausible explanation is that the central parts of many elliptical galaxies formed at $z>2.5$, after which they grew through minor, mostly dry mergers. More generally, we can combine panel {\em a} of Fig.\ \ref{fig:allprop} with Fig.\ \ref{fig:numdens} to derive an empirical constraint on the amount of size growth for a given amount of mass growth. Parameterizing the relation between size growth and mass growth as \begin{equation} \frac{r_{1+2}}{r_1} = \left(\frac{M_{1+2}}{M_1}\right)^{\alpha}, \label{eq:sizemass} \end{equation} we find that $\alpha \gtrsim 2$ to simultaneously satisfy the constraints from the evolution of the size -- mass relation (Fig.\ \ref{fig:allprop}{\em a}; van der Wel et al.\ 2008), and from the evolution of the mass function. This limit for $\alpha$ is similar to naive expectations from minor mergers, which is why we obtain a good correspondence between progenitors and descendants for this class of models. The equal-mass merger model has $\alpha \sim 1$ (or even $<1$; see {Boylan-Kolchin} {et~al.} 2006); for the expansion model $M_{1,f}/M_{1} = 1$ (or even $<1$ ) and Eq.\ \ref{eq:sizemass} is not well defined. \subsection{Which Models are Physically Plausible?} We expect that each of these toy models is responsible at some level for the growth and evolution of galaxies from $z \sim 2.5$ until today. Observational evidence of merging events, both equal-mass and minor, exists at intermediate redshifts and can definitely produce growth in galaxy mass and size. Mass loss from the central regions of galaxies should also occur and would therefore cause increases in galaxy sizes. Given this complexity, we hope to identify which of our simple models best describes the mechanism responsible for the majority of the growth of the compact galaxies at high redshifts into descendant galaxies in the nearby universe. In \S\,5.1 we found that the equal-mass merger model is inconsistent with the number density of massive galaxies today. We are therefore left with two feasible models, growth via ``in-situ'' expansion or via minor mergers. Both of these modes of galaxy growth have the effect of puffing up the galaxies without extreme mass growth. Number densities of the implied descendants of galaxies that have grown via either mode correspond to sufficiently common galaxies in the local universe. Although number density arguments do not immediately discredit the expansion model of galaxy evolution, they do lead to uncomfortable questions. The implication of no mass growth is that only a very small number of nearby galaxies with mass $>10^{11}$\,\msun\ was already formed at $z=2.5$: approximately 7\,\% if only quiescent galaxies at $z=2.5$ are considered, and $\sim 14$\,\% if all galaxies with $M>10^{11}$\,\msun\ are considered. This raises the question where the progenitors of the remaining $\sim 90$\,\% of today's massive galaxies are at $z=2.5$. In a hierarchical growth scenario, one expects that the most massive galaxies today have always been the most massive galaxies. Instead, the expansion model implies that the most massive galaxies at $z \sim 2.5$ evolve into a small fraction of average-mass elliptical galaxies today. Furthermore, the most massive galaxies in the local universe, with masses $M \gtrsim 3\times10^{11}M_{\odot}$ must then have formed rapidly in the later universe, implying an extremely active merging history of smaller objects. One might conclude that they formed through star formation at lower redshift, but this would be inconsistent with the stellar ages of massive ellipticals (e.g., {Thomas} {et~al.} 2005; {van Dokkum} 2008). There are other potential problems with the physical model proposed by {Fan} {et~al.} (2008). The growth relies on strong heating of the inner regions of the galaxy, such as that produced by a central active galactic nucleus (AGN). However, the high redshift galaxies in our sample are already shown to be quiescent, with old stellar populations. If there was an active central engine at one point in the galaxies' histories, it would have already blown out gas and led to expansion of the galaxy. While growth through mass loss may have played a role in the evolution of such galaxies, it is unlikely to do so again between $z=2.5$ and $z=0$, except possibly through stellar winds and supernovae. Based on simulations of open clusters, {Fan} {et~al.} (2008) argue that there could be a long delay between the expulsion of gas and the response of the stellar distribution to the new potential, but it is not clear whether these simulations can easily be applied to massive galaxies. Finally, the expansion model requires significant fine-tuning of the amount of mass that is removed from the galaxies: removing a small fraction of the mass does not have an appreciable effect, and removing too much would destroy the galaxies. Minor mergers (or rather, "un-equal mass mergers") are expected in galaxy formation models, and are predicted to dominate the mass growth of massive galaxies at late times (e.g., {Naab} {et~al.} 2007; {Guo} \& {White} 2008). Simulations have shown that the central regions of a galaxy can be minimally affected by dry mergers but that an envelope of newly accreted material is formed that grows with time ({Naab} {et~al.} 2007). They have also been observed (e.g., {Schweizer} \& {Seitzer} 1992). {van Dokkum} (2005) infers that visible tidal features around nearby elliptical galaxies are caused by red mergers with median mass ratio $1:4$. It is an open question whether the merger rate is sufficiently high to produce a factor of 2--3 growth in mass since $z=2.5$. Models do predict high accretion rates (e.g., {De Lucia} {et~al.} 2006; {Naab} {et~al.} 2007; {Guo} \& {White} 2008), but some observations suggest that mass growth may be small for the highest masses (e.g., {Cool} {et~al.} 2008). Minor merger models are also qualitatively consistent with the uniform and gradual evolution of the size -- mass relation (e.g., {Franx} {et~al.} 2008; {van der Wel} {et~al.} 2008), and the apparent lack of old massive compact galaxies in the local Universe (e.g., {Trujillo} {et~al.} 2009). If equal-mass mergers were a dominant mechanism one might expect to find some galaxies that did not experience a major merger and are therefore left intact at the present day, but this is very unlikely in a minor merger model. \subsection{Implied Velocity Dispersions} \begin{figure}[t] \centering \includegraphics[scale=0.9]{f4.eps} \caption{Implied velocity dispersions of high and low redshift galaxies, along with approximate predictions from the three models discussed in \S\,4. } \label{fig:dynprop} \end{figure} As has been pointed out in several studies, the small sizes and high masses of the compact red galaxies imply very high velocity dispersions ({Toft} {et~al.} 2007; {Cimatti} {et~al.} 2008; {van Dokkum} {et~al.} 2008). Figure \ref{fig:dynprop} demonstrates the implied dynamical properties of the nearby and high redshift galaxy population as well as the possible evolutionary tracks of these galaxies. We calculated the velocity dispersions from the equation given in {van Dokkum} \& {Stanford} (2003): \begin{equation} \log M \equiv 2 \log \sigma + \log r_e +6.07, \label{eq:sigma} \end{equation} with $r_e$ in kpc and $M$ in Solar masses. This expression is not very accurate as it does not take the relation between $M$ and $M/L$ or the effects of a dark halo into account, but it does allow a comparison in a self-consistent way. Predictions from the simple models of \S\,4 are shown by arrows. The expansion model predicts that the dispersions decrease over time, as the total mass of the galaxy remains constant and the effective radius increases. As discussed in \S\,4.1 the equal-mass merger model predicts that the velocity dispersions remain constant as the mass grows, which implies that the descendants have velocity dispersions that are higher than are implied by the galaxies in the local sample. Growth by minor mergers presents a possible method of decreasing the velocity dispersion of the galaxies, as the expansion is a stronger factor than mass growth. This mechanism, again shown by the green arrow on Figure \ref{fig:dynprop}(a), evolves the compact galaxies onto the velocity dispersion trend for local galaxies. This assumes mass growth by a factor of 2.1, based on the same specific minor merging history described in \S\,4.2, and size growth by a factor of 5. \section{Summary and Conclusions} The main result from our study is that nearby elliptical galaxies have similar average densities within 1\,kpc as the recently discovered compact ``red and dead'' galaxies at high redshift. The descendants of the compact "red and dead" galaxies at z>2 could therefore simply constitute the central parts of today's massive elliptical galaxies. Models dominated by minor mergers (where ``minor'' implies ``not equal mass'') can increase the sizes of the galaxies efficiently, without violating constraints from the evolution of the evolution of the mass function as measured by Marchesini et al.\ (2008). Interestingly, the evolution of the mass -- size relation and the mass function together imply that $\sim 50$\%\footnote{In a minor merger model the exact fraction could range from $\sim 10\% -- 100\%$, depending on the order and mass ratio of mergers.} of elliptical galaxies with mass $\gtrsim 2-3 \times 10^{11}$\,\msun\ may have the remnant of a compact $z=2.5$ galaxy with mass $\gtrsim 1\times 10^{11}$\,\msun\ in its center. Models which require energy input by a central engine to ''puff up'' the galaxies can also adequately evolve compact galaxies into sufficiently common local counterparts, but these models require significant fine-tuning and may not be physically plausible as the primary growth mechanism. We note that we did not consider star formation as a way to grow the compact $z\sim 2.3$ galaxies. Although it is possible that star formation re-starts at lower redshifts, newly formed stars can only account for a small fraction of the final mass given the stellar ages inferred for massive ($\gtrsim 2 \times 10^11 M_{\odot}$) galaxies at $z=0$ (e.g., Thomas et al 2005, van Dokkum \& van der Marel 2007). Nevertheless, a small amount of star formation could help increase the sizes between $z\sim 2.3$ and $z=0$ (see also Franx et al.\ 2008). The minor merger model predicts evolution in the {Magorrian} {et~al.} (1998) relation between black hole mass and velocity dispersion. The central black hole will grow from $z=2.3$ to the present, with the amount of growth determined by the black hole masses of the infalling galaxies. However, the velocity dispersion will decrease by a factor of $\sim 1.5$. Therefore, even if the black hole growth is insignificant, black hole masses at fixed velocity dispersion will be significantly lower at $z\sim 2.5$ than at $z=0$. {Robertson} {et~al.} (2006b) came to the same conclusion using merger simulations, but this prediction contrasts with several other studies (e.g., {Cen} 2007; {Woo} {et~al.} 2008). The galaxy growth models that we describe here are simple and the empirical findings are preliminary, highlighting the need for further study. On the modeling side, the main uncertainties are whether the merger rate is sufficiently high to produce the required growth, and whether a realistic treatment of the dark matter and orbital configurations retains the high efficiency of minor mergers to ``puff up'' a galaxy. Whatever the dominant physical mechanism turns out to be, we find that $\alpha \gtrsim 2$ if the relation between size growth and mass growth is parameterized as $r_{1+2}/r_1 = (M_{1+2}/M_1)^{\alpha}$. Inside-out formation via mergers predicts that stars in the central regions of a nearby elliptical galaxy are qualitatively different from stars at larger radii. Elliptical galaxies do have color- and metallicity gradients, which could reflect differences in stellar populations between stars formed in-situ and those accreted from other systems (e.g., {Peletier} {et~al.} 1990). While it is not yet clear whether these gradients are consistent with such accretion scenarios, it may be difficult to reconcile them with an expansion model alone (see, e.g., {Pipino} \& {Matteucci} 2008). It is tempting to identify kinematically decoupled cores (e.g., {Franx} \& {Illingworth} 1988; {Bender} 1988) with the descendants of the compact galaxies, but the scales of these features are typically a few 100 pc rather than $\sim 1$\,kpc. More information on color gradients and the inner $\sim 1$\,kpc of the compact high redshift galaxies will provide important additional constraints. Our determinations of stellar density profiles and masses can be improved. The calculated density profiles and integrated masses are based on Sersic profile fits to the galaxy light distributions, not on the actual light profiles themselves. Furthermore, for the high redshift galaxies the profiles within $\sim 1$\,kpc are extrapolations, as the galaxies are not resolved on smaller scales. The conversion from light to mass is also very uncertain. The conversion for the local samples ignores scatter in the $M/L$ versus $L$ relation, and ignores gradients in $M/L$ ratio. The mass estimates of the high redshift galaxies are based on stellar population models and are very sensitive to the assumed IMF and to possible contributions from dark matter. As noted in \S\,1, bottom-light IMFs would change the masses and alter the required amount of size- and mass evolution to bring the galaxies to local relations. Measurements of absorption-line kinematics of high redshift compact galaxies would provide a direct test of the IMF, and of several of the other assumptions that enter the analysis (see, e.g., {Cimatti} {et~al.} 2008). van der Wel et al.\ (2008) find that the observed size evolution at $0<z<1$ is similar when dynamical masses rather than photometric masses are used, but this needs to be verified at higher redshifts. \begin{acknowledgements} We thank Avishai Dekel and Jeremiah Ostriker for helpful discussions. Support from NASA grants HST-GO-10808 and HST-GO-10809 is gratefully acknowledged. \end{acknowledgements}
1,108,101,565,847
arxiv
\section{Mitigating catastrophic forgetting} \label{section:method} In \S\ref{section:challenge_wikilingua0}, we show that increasing model scale and decreasing tunable parameter capacity are both effective in improving \ssc{XGen} performance. Can we obtain further improvements by devising methods that explicitly tackle catastrophic forgetting? In this section, we investigate three methods: (1) mixing unlabeled training data with the supervised data, (2) first tuning the model on an intermediate task, and (3) factorizing the learned prompts into composable language and task modules. We show that while \ssc{XGen} performance hardly improves with intermediate tuning, the other two methods can provide substantially better results when there is severe catastrophic forgetting. Below, we describe each method and then analyze our findings in more detail. \subsection{Methods} \label{section:method_description} \paragraph{Mixing in unlabeled training data:} A simple method involving multi-task learning by mixing an unsupervised training task (\bssc{Unsup}) into the \ssc{Wikilingua-0} data. Mixing is controlled by a mixing rate $\kappa$, resulting in a final mixture that is $\kappa$\% \ssc{Unsup} data and $(100-\kappa)$\% \ssc{Wikilingua-0}. As a data augmentation scheme, this method can be applied in all settings. We use the span corruption pretraining objective from T5~\cite{CRaffel20} with {\textit{m}\ssc{C4}}\xspace data. We create separate multilingual datasets for each target language (\bssc{Mix-Unsup}) as well as a single multilingual dataset that includes all of the \ssc{Wikilingua-0} languages (\bssc{Mix-Unsup-All}). Our goal is to encourage the model not to forget about other languages during training on English summarization. In our experiments we use $\kappa=1$.\footnote{In our preliminary experiments, $\kappa=1$ performed best among a range of values $\{1, 5, 10, 30, 50\}$.} \paragraph{Intermediate tuning:} As an adaptation step, we perform model or prompt tuning on an intermediate task before training on \ssc{Wikilingua-0}. Intermediate tuning has been used to boost performance on English tasks for both {\ssc{ModelTuning}}\xspace~\cite{JPhang19,TVu20} and {\ssc{PromptTuning}}\xspace~\cite{TVu22}, and has been successfully applied to the zero-shot cross-lingual transfer setting~\cite{JPhang20,KMaurya21} for {\ssc{ModelTuning}}\xspace. \citet{KMaurya21} show that intermediate tuning on an auxiliary unsupervised task from the target language is helpful in conjunction with freezing some model components for \ssc{ModelTuning}. Previous work has used an auxiliary task designed to be close to the main task, while we simply use {\textit{m}\ssc{C4}}\xspace data. For each target language we create a causal, left-to-right LM task by providing no context, i.e., the encoder's input is empty (\bssc{IT-LM}). To further explore the effect of continued training on English data, we include an additional experiment where the \ssc{Gigaword}~\cite{DGraff03} summarization dataset is used as the intermediate task (\bssc{IT-Gigaword}).\footnote{We found that additional tuning was helpful for intermediate tuning on large datasets. As such, we performed $200{,}000$ steps during tuning on an intermediate task and selected the best prompt checkpoint based on validation performance on that task.} \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{figures/factorized-prompts.pdf} \caption{Our ``factorized prompts'' approach learns recomposable language and task sub-prompts by training on all language / task combinations from a set of unsupervised tasks covering all target languages.} \label{fig:factorized-prompts} \end{center} \end{figure} \paragraph{Factorized prompts:} Inspired by the \ssc{MAD-X}\cite{JPfeiffer20} adapter-based framework that learns modular language and task representations to adapt a multilingual model to arbitrary tasks and languages, we propose a novel method, dubbed ``factorized prompts'' (\bssc{FP}) and specifically designed for {\ssc{PromptTuning}}\xspace. We attempt to explicitly decompose a soft prompt into ``task'' and ``language'' components that can be recombined in novel pairings (see Figure \ref{fig:factorized-prompts}) with the goal of learning soft prompts that consist of disentangled and interpretable components. Unlike \ssc{MAD-X}, which learns language and task adapters separately for each language and each task, we perform a multi-task multilingual training procedure that learns language and task sub-prompts jointly for all languages and tasks, and directly targets our definition of disentanglement. We use {\textit{m}\ssc{C4}}\xspace data for all 18 {\mbox{\ssc{WikiLingua-0}}}\xspace languages to create 7 unsupervised tasks per language. We randomly initialize language and task sub-prompts, each $50$ tokens long. For each training example in our multi-task multilingual mixture, the relevant task and language sub-prompts are concatenated to form a full $100$-token prompt. This training yields a set of learned language and task sub-prompts.\footnote{As our mixture of tasks is large, we tuned for $200{,}000$ steps for this training procedure.} Next, we train a new task sub-prompt on {\mbox{\ssc{WikiLingua-0}}}\xspace English summarization data while using a frozen copy of the English language sub-prompt. Finally, when performing inference in another language, we replace the English sub-prompt with the target language sub-prompt, while continuing to use the learned summarization sub-prompt. To ablate the impact of the target language sub-prompt, we also report the performance using the English sub-prompt for all languages (\bssc{FP-En}). We use 7 unsupervised tasks per language, including: the \ssc{prefix LM}, \ssc{span corruption}, and \ssc{i.i.d.~denoising} tasks described in~\citet{CRaffel20}; \ssc{LM}, the causal \ssc{LM} task used for intermediate tuning,; \ssc{missing prefix prediction}, predicting a missing prefix from the input; \ssc{n-token prefix prediction}, copying the first $n$-tokens of the input; and \ssc{missing n-token prefix prediction}, predicting the missing $n$-token prefix of the input. When training on {\mbox{\ssc{WikiLingua-0}}}\xspace, we initialize the task sub-prompt with the learned \ssc{span corruption} task sub-prompt. \input{tables/results} \subsection{Results and Discussion} \paragraph{Mixing in multilingual data prevents catastrophic forgetting:} In Table \ref{tbl:challenge_wikilingua0}, we observe that mixing in unsupervised multilingual data helps to prevent catastrophic forgetting in all conditions---increasing the likelihood of the model outputting text in the target language. With {\ssc{ModelTuning}}\xspace, this improved language accuracy reliably translates into higher end task performance (\ssc{SP-Rouge}). For {\ssc{PromptTuning}}\xspace, mixing provides a benefit for non-Latin script languages (\ssc{Ru} and \ssc{Th}) where catastrophic forgetting is more severe; for Latin-script languages (\ssc{Fr} and \ssc{Vi}), mixing harms the overall summarization quality, despite achieving higher language accuracy Mixing in multilingual data in \emph{all} \ssc{Wikilingua} languages leads to similar results, with a marginal drop in performance. Thus, if the desired target language is known ahead of time, the simpler strategy of mixing in just that language should be preferred. However, in cases where the inference language is unknown, mixing many languages is also a viable strategy \paragraph{Intermediate tuning does not give reliable gains:} Intermediate tuning on English summarization (\mbox{\ssc{IT-Gigaword}}) improves English performance, but generally hurts \ssc{XGen} capabilities. For {\ssc{ModelTuning}}\xspace, it exacerbates catastrophic forgetting and harms overall performance across all model sizes. For {\ssc{PromptTuning}}\xspace, English intermediate tuning provides small gains at \ssc{Base} size, but is harmful at \ssc{XXL} size. Intermediate tuning on an \ssc{LM} task in the target language (\mbox{\ssc{IT-LM}}) has a neutral or negative effect in most cases, running somewhat counter to the findings of \citet{maurya-etal-2021-zmbart}.\footnote{Note, however that their unsupervised task was designed to be well-aligned with their downstream tasks of choice.} Compared to directly mixing in unlabeled multilingual data, intermediate tuning has little benefit on language accuracy. This smaller effect is to be expected, given that the final stage of English-only training is still ample opportunity to overfit on English and catastrophically forget other languages. \paragraph{Factorized prompts are helpful for overcoming severe catastrophic forgetting:} Factorized prompts are successful at improving target language accuracy in all conditions. However, this does not always translate to higher \ssc{SP-Rouge}. When language accuracy is already relatively high (for Latin-script languages, and for \ssc{XXL} models), factorized prompts are not helpful. However, in settings where vanilla {\ssc{PromptTuning}}\xspace shows the most severe forgetting (e.g., at \ssc{Base} size, on non-Latin script languages), factorized prompts provide large gains, similar to or exceeding our mixing approach \section{Conclusion} \label{section:conclusion} In this work, we explored how different adaptation methods fare on the challenging ``\ssc{XGen}'' task of zero-shot cross-lingual summarization. While many methods struggled with catastrophic forgetting (outputting English rather than the target language), we observed two factors helped to mitigate this problem. First, increasing model scale reliably reduces forgetting, perhaps indicating that larger models can to generalize from fewer examples, so are less prone to overfit on the training domain. Second, decreasing the number of parameters tuned during adaption is helpful. When all of a model's weight are tuned on English ({\ssc{ModelTuning}}\xspace), forgetting is quick and severe. By contrast, limiting the tunable parameters to a smaller soft prompt ({\ssc{PromptTuning}}\xspace) helps to combat forgetting, though prompt size is a key variable to control, as bigger prompts have more capacity to overfit on English. To further close the gap with supervised approaches, we explored three adaptation techniques---one entirely novel, and two that have been used before, but not in combination with parameter-efficient methods like {\ssc{PromptTuning}}\xspace. We find that mixing in unsupervised multilingual data is always helpful, and compares favorably to intermediate tuning. Our novel approach, ``factorized prompts'', is helpful at smaller model sizes, but has no benefit at larger sizes. We hope that future work will continue to explore \ssc{XGen} tasks including {\mbox{\ssc{WikiLingua-0}}}\xspace, and develop stronger zero-shot adaptation techniques to allow multilingual models to reliably generate coherent text in any target language. \section*{Acknowledgements} \label{section:acknowledgements} \todo{Anyone}{Write this section} \section{Related Work} \label{section:related_work} The mixing of unlabeled multilingual pre-training data into the training data can be viewed a simplified version of rehearsal \cite{ARobins95}, commonly used to mitigate catastrophic forgetting. Related work has used this mixing \cite{LXue21,shakeri-etal-2021-towards} to combat ``accidental translation'', a symptom of English overfitting, when using mT5. However, these works are concerned with {\ssc{ModelTuning}}\xspace while we apply this to {\ssc{PromptTuning}}\xspace. Related work has also explored intermediate adaptation of pre-trained language models to make them more amenable to downstream tasks. Generally these approaches target domain transfer \cite{SGururangan20}, adapting the model to the idiosyncrasies of text from a specific domain, or task oriented transfer, where unlabeled \cite{JHoward18} or labeled data \cite{JPhang19,TVu20} from the task or related tasks increases the final model performance. This has also shown to be effective for {\ssc{PromptTuning}}\xspace \cite{TVu22}. Like us, \citet{JPhang20} extends intermediate transfer into the multilingual domain, but they focus on transferring from an intermediate task in English to other languages while our intermediate task uses the target language. Similarly, \citet{KMaurya21} have a cross-lingual intermediate pre-training task. However, their task is designed to closely match the downstream task rather than the standard LM objective we use. Additionally, their intermediate task is used to update the whole model (with some parts frozen to mitigate English only overfitting), with the only language specific parameters being embeddings of special language tokens while our approach leverages parameter-efficient methods to explicitly create language specific parameters. Several works use intermediate tuning to create a model that is better in the zero (or few)-shot setting \cite{JWei22,VSanh22,SMin22}, but those works are concerned with generalization to new tasks while we focus on generalizations to new languages with a common task. Other work exploring cross-lingual transfer learning and parameter-efficient methods includes \citet{MZhao21}. They find that prompts can effectively be used in cross-lingual settings, but their work is constrained to classification while ours focuses on generative tasks. \citet{JPfeiffer20} explores both this setting, using adapters rather than our prompts, and the idea of leveraging parameter-efficient learning methods to create separate language and task understanding modules that can combined and applied to unseen \texttt{<language, task>} pairs. In their work, the language and task adapters are trained independently on different tasks before being combined at test time. In contrast, our work jointly trains language and task prompts via a multi-task multilingual training procedure where the appropriate language and task prompt pair is used for each training example. There has been recent interest in cross-lingual generation. \citet{Chi_Dong_Wei_Wang_Mao_Huang_2020} evaluate their method using cross-lingual generation, including summarization as we do. However, their methods use parallel data during pre-training to ``align'' representations across languages during pre-training while our approach does not. \citet{shakeri-etal-2021-towards} train mT5 on English passage to \texttt{<question, answer>} generation and use it for zero-shot generation in other languages. Additionally they mix unsupervised data from other languages into training, similar to one of our approaches. \citet{KMaurya21} also include cross-lingual summarization as part of their evaluation suite. \section*{Appendices} \label{section:appendices} \section{Evaluation on zero-shot cross-lingual benchmarks} \label{appendix:xlingeval} From Table~\ref{tbl:xnli} to Table~\ref{tbl:wikilingua}, we show our results for {\ssc{ModelTuning}}\xspace and {\ssc{PromptTuning}}\xspace across different zero-shot cross-lingual benchmarks. Overall, we find that \ssc{ModelTuning} typically performs better than \ssc{PromptTuning}, although \ssc{PromptTuning} at scale (i.e., \ssc{XXL}) matches the performance of \ssc{ModelTuning} on English and can yield better results on some languages. \input{tables/xnli_xquad} \input{tables/mlqa_tydiqa} \input{tables/pawsx_wikiann} \input{tables/wikilingua} \clearpage \newpage \section{Measuring the correlation between {\bssc{SP-RG}}\xspace and human judgments} \label{appendix:correlation} To evaluate how well our proposed {\ssc{SP-RG}}\xspace metric correlates with human judgments, we use the \ssc{MultiSumm Eval} dataset introduced by \citet{koto-etal-2021-evaluating}, which is a manually-annotated multilingual resource for summarization evaluation with $4{,}320$ human annotations on \ssc{Focus} (precision) and \ssc{Coverage} (recall) between machine-generated summaries and ground-truth summaries. We compare {\ssc{SP-RG}}\xspace to \ssc{BLEURT}~\cite{TSellam20}, which is a learned evaluation metric based on \ssc{BERT}~\cite{JDevlin19}. Table \ref{tbl:correlations} shows the Pearson correlation coefficient between these metrics and human judgments across 8 \ssc{MultiSumm Eval} languages, including German (\ssc{De}), English (\ssc{En}), Spanish (\ssc{Es}), French (\ssc{Fr}), Indonesian (\ssc{Id}), Russian (\ssc{Ru}), Turkish (\ssc{Tr}), and Mandarin Chinese (\ssc{Zh}). Overall, we found that {\ssc{SP-RG}}\xspace performed as well as the more computationally expensive \ssc{BLEURT} metric. Specifically, {\ssc{SP-RG}}\xspace achieved an average \ssc{Focus} score of 0.68 and an average \ssc{Coverage} score of 0.65, whereas \ssc{BLEURT} achieved 0.68 and 0.70, respectively. Additionally, the scatterplot in Figure \ref{figure:correlation} demonstrates the linear relationship between {\mbox{\ssc{SP-RG-LSUM}}}\xspace vs \ssc{Focus} scores on French. \input{tables/correlation} \input{figures/correlation} \section{Challenge of zero-shot cross-lingual generation} \label{section:challenge_wikilingua0} Much recent progress in multilingual NLP has been driven by zero-shot cross-lingual benchmarks that require a model to perform classification~\cite{AConneau18,YYang19}, extractive QA~\cite{MArtetxe20,PLewis20,JClark20}, or sequence labeling~\cite{XPan17}.\footnote{We refer the interested reader to Appendix~\ref{appendix:xlingeval} for a comprehensive comparison of {\ssc{ModelTuning}}\xspace and {\ssc{PromptTuning}}\xspace on these benchmarks. Overall, we find that \ssc{ModelTuning} typically performs better than \ssc{PromptTuning}, although \ssc{PromptTuning} at scale (i.e., \ssc{XXL}) matches the performance of \ssc{ModelTuning} on English and can yield better results on some languages.} Here, we are interested in a more challenging task of zero-shot cross-lingual generation (\ssc{XGen}) where a model is trained on a generative task in one language (i.e., English), and then asked to perform the equivalent task in another language during inference. We construct a novel zero-shot cross-lingual summarization task and show that state-of-the-art text-to-text models adapted using {\ssc{ModelTuning}}\xspace and {\ssc{PromptTuning}}\xspace techniques are not able to successfully perform our task. Our analysis reveals that both techniques suffer from catastrophic forgetting, causing them to often generate text in the wrong language. \subsection{Problem formulation} \label{subsection:problem_formultation} \paragraph{Defining \bssc{Wikilingua-0} zero-shot cross-lingual summarization:} We leverage the \ssc{Wikilingua} dataset~\cite{FLadhak20,SGehrmann21} to create a novel zero-shot cross-lingual summarization task, which we dub \mbox{\ssc{WikiLingua-0}}.\footnote{Note that the original \ssc{WikiLingua} task is not suitable for direct use in our \ssc{XGen} setting, as it aims to generate English summaries from non-English articles.} While \ssc{WikiLingua} provides labeled training data in 18 languages (including English), we are interested in a more realistic experimental setup where no training data is provided in non-English languages, as it is less practical to obtain labeled data for real low-resource languages.\footnote{Although one might rely on machine translation to obtain labeled data in a language of interest, this is not particularly appealing due to: (i) extra computation required, (ii) varied translation quality across languages~\cite{SRuder21}, (iii) potential loss of discourse structure~\cite{li-etal-2014-assessing}, and (iv) limited understanding of black box production translation systems.} As such, we discard all training data for non-English languages, with the exception of ablation experiments, and cast \ssc{WikiLingua} as training a model with English summarization data and feeding it non-English articles during zero-shot evaluation. \paragraph{Defining {\bssc{SP-RG}}\xspace for multilingual summarization evaluation:} The \textsc{rouge} metric~\cite{CLin04} has been extensively used in evaluation of summarization systems. However, it assumes that the input text uses spaces to separate words, which is not the case for many languages (e.g., Chinese, Japanese, and Thai).\footnote{In preliminary experiments, we found that a direct use of Rouge yielded extremely poor \ssc{Rouge} results in many languages, despite reasonably good summaries.} One possible solution is to use language-specific tokenizers, as done in~\citet{AConneau19}. To avoid language-specific preprocessing, we use SentencePiece sub-word tokenization~\cite{TKudo18}, which is data-driven and language independent.\footnote{\citet{NGoyal21} also use a similar approach for BLEU~\cite{KPapineni02}.} We call our metric SentencePiece-based \ssc{Rouge} or {\ssc{SP-RG}}\xspace for short, and report \mbox{{\ssc{SP-RG}}\xspace-\ssc{Lsum}} in all of our experiments.\footnote{\fssc{Rouge-Lsum} is the summary-level \fssc{Rouge-L} metric used in~\citet{ASee17}. } In Appendix \ref{appendix:correlation}, we demonstrate that {\ssc{SP-RG}}\xspace correlates well with human judgments, providing a similar correlation to a more computationally expensive metric, i.e., \ssc{BLEURT}~\cite{TSellam20}. \subsection{Experimental setup} \subsubsection{Baselines} In addition to performing vanilla {\ssc{ModelTuning}}\xspace and {\ssc{PromptTuning}}\xspace directly on \mbox{{\mbox{\ssc{WikiLingua-0}}}\xspace}, we consider the following baselines: \paragraph{\bssc{Lead-64}:} This baseline simply copies the first $64$ SentencePiece tokens from the input article.\footnote{In our preliminary experiments, $n=64$ performed best among a range of values $\{32, 64, 128, 256\}$.} \paragraph{\bssc{trans-train}:} We perform {\ssc{ModelTuning}}\xspace or {\ssc{PromptTuning}}\xspace on {\mbox{\ssc{WikiLingua-0}}}\xspace English summarization data that is translated into the target language using \ssc{Google Translate} \paragraph{\bssc{trans-test}:} We train on English summarization data and evaluate on validation data that is translated from the target language to English. \paragraph{\bssc{sup \& sup-all}:} To ablate the impact of using the labeled training data provided in the original \ssc{WikiLingua} dataset for all languages, we either train on supervised data for each individual target language (\ssc{sup}) or a mixture of supervised data from all languages (\ssc{sup-all}). \subsubsection{Training and implementation details} \label{para:hyper-params} We perform {\ssc{ModelTuning}}\xspace and {\ssc{PromptTuning}}\xspace on top of the pretrained {\textit{m}\ssc{T5}}\xspace~\cite{LXue21} of all sizes: \ssc{Small}, \ssc{Base}, \ssc{Large}, \ssc{XL}, \ssc{XXL}\footnote{With 300M, 580M, 1.2B, 3.7B, and 13B parameters, respectively.}. For {\ssc{PromptTuning}}\xspace, we create an \ssc{LM} adapted version of these checkpoints by further training them for 100K steps with the ``prefix LM'' objective~\cite{CRaffel20} using {\textit{m}\ssc{C4}}\xspace~\cite{LXue21} data for all languages.\footnote{A similar approach was used in~\citet{BLester21} for {\ssc{PromptTuning}}\xspace with \ssc{T5}.} Except for ablations, we use $100$ prompt tokens and initialize the prompt by sampling from the first $5{,}000$ {\textit{m}\ssc{T5}}\xspace's vocabulary embeddings. During training, inputs and targets are clipped to $1024$ and $512$ SentencePiece tokens, respectively. We always train for $100{,}000$ steps for both {\ssc{ModelTuning}}\xspace and {\ssc{PromptTuning}}\xspace. We save a checkpoint every $5{,}000$ steps and report results on the model checkpoint corresponding to the highest validation performance on a target language. All models are trained using \ssc{T5X} \cite{ARoberts22}. All reported metrics are calculated on $250$ examples from the validation set. \input{tables/challenge_wikilingua0} \subsection{Results and Discussion} \input{figures/learning_curves} \paragraph{\bssc{Wikilingua-0} is challenging for both {\bssc{ModelTuning}}\xspace and {\bssc{PromptTuning}}\xspace:} Our zero-shot evaluation results on \ssc{Wikilingua-0} for French (\bssc{Fr}), Russian (\bssc{Ru}), Vietnamese (\bssc{Vi}), and Thai (\bssc{Th}) are shown in Table~\ref{tbl:challenge_wikilingua0} (see Table~\ref{tbl:wikilingua} in Appendix~\ref{appendix:xlingeval} for results across all target languages). For reference, we also include evaluation results on English. Overall, we find that switching the language during inference results in a substantial performance drop for both model adaptation techniques, especially when feeding in articles in non-Latin script languages like Russian and Thai. Consistent with the findings in~\citet{SAn22} for other generative tasks, we find that {\ssc{PromptTuning}}\xspace, even with scale, falls far below {\ssc{ModelTuning}}\xspace on monolingual English summarization.\footnote{This is somewhat surprising since across the other tasks we tried above, {\ssc{PromptTuning}}\xspace at \ssc{XXL} can match the performance of {\ssc{ModelTuning}}\xspace when evaluated on English.} \paragraph{{\bssc{PromptTuning}}\xspace is better on larger domain shifts:} Interestingly though, {\ssc{PromptTuning}}\xspace is competitive with or beats {\ssc{ModelTuning}}\xspace when evaluated on other languages. For instance, at \ssc{XXL}, {\ssc{PromptTuning}}\xspace outperforms {\ssc{ModelTuning}}\xspace by a large margin of +7.3 {\ssc{SP-RG}}\xspace (37.4 vs.~30.1) on Thai. A closer look at these results reveals an interesting pattern: as model size increases, {\ssc{PromptTuning}}\xspace usually produces better results than {\ssc{ModelTuning}}\xspace when there is a significant language shift at inference time (e.g., from English to a non-Latin script language).\footnote{With the exception of a few languages (e.g., Chinese).} This corroborates the view in~\citet{BLester21} that {\ssc{ModelTuning}}\xspace may be over-parameterized and thus more prone to overfit the training task and less robust to domain shifts. \paragraph{Both {\bssc{ModelTuning}}\xspace and {\bssc{PromptTuning}}\xspace suffer from catastrophic forgetting:} When performing zero-shot evaluation on non-English languages, we discover a problematic behavior of both {\ssc{ModelTuning}}\xspace and {\ssc{PromptTuning}}\xspace, in which they partially summarize non-English articles into English instead of the target language. This suggests that both techniques suffer from overfitting on the training task. To probe more deeply into this problem, we evaluate performance for each saved checkpoint, and additionally measure: (i) \ssc{LID$_{lang}$}---the average confidence score given by \texttt{cld3}\footnote{\url{https://github.com/google/cld3}} when detecting the language \ssc{$lang$}, (ii) \ssc{ASCII}---the average percentage of \ssc{ASCII} characters present in the model's predictions, with a higher value indicating a larger amount of English in the model's output for non-Latin script languages. Figure \ref{figure:learning_curves} shows our evaluation results as training progresses. For {\ssc{PromptTuning}}\xspace, we observe a clear ``deteriorating'' trend, where the longer the prompt is tuned on English, the more unwanted English is generated, and the lower summarization quality becomes for Russian and Thai. For {\ssc{ModelTuning}}\xspace, even by the first checkpoint, the model has already heavily overfit to English, outputting >60\% \ssc{ASCII} for Russian and Thai inputs. There is a modest recovery later in training, but overall quality remains low. Our manual inspection suggests that more training on English typically leads to more English produced when they perform summarization in a non-English language. We note that this ``English overfitting'' problem is more pronounced for {\ssc{ModelTuning}}\xspace, i.e., {\ssc{ModelTuning}}\xspace is more prone to overfitting the training task. \paragraph{Bigger models are less prone to forget:} In the middle block of Table \ref{tbl:challenge_wikilingua0}, we observe that moving to larger model sizes mitigates catastrophic forgetting to a remarkable extent. This is true both for {\ssc{ModelTuning}}\xspace (in line with the findings of \citet{xue-etal-2021-mt5}), as well as for {\ssc{PromptTuning}}\xspace. For example, at \ssc{Small} size, {\ssc{ModelTuning}}\xspace and {\ssc{PromptTuning}}\xspace only successfully generate Russian text 0.0\% and 10.1\% of the time respectively, whereas at \ssc{XXL} size, these numbers jump to 57.5\% and 84.4\%. \paragraph{Too much capacity is harmful:} In the bottom block of Table \ref{tbl:challenge_wikilingua0}, we observe an interesting ``paradox of capacity'' with regard to prompt length. On the one hand, greater capacity (in the form of longer prompts) clearly helps to better learn the summarization task. On the other hand, the greater the capacity to learn from English training data, the more the model forgets other languages. For each language and model size, we observe a ``balance point'' past which adding extra capacity becomes harmful. For instance, in Thai at the \ssc{XXL} size, increasing capacity from $1$ to $10$ prompt tokens improves summarization quality (\mbox{{\ssc{SP-RG}}\xspace}~$+4.8$) despite a drop in language accuracy (\ssc{LID$_{\text{Th}}$}~$-8.0$), and increasing capacity further to $100$ tokens hurts both metrics. \paragraph{Significant headroom remains:} The supervised baselines in Table \ref{tbl:challenge_wikilingua0} highlight that significant headroom remains on this \ssc{XGen} task. When tuning the \ssc{XXL} model directly on supervised training data in all languages, \mbox{{\ssc{SP-RG}}\xspace} scores are between $+5.5$ (\ssc{Vi}) and $+10.0$ points (\ssc{Th}) higher than our highest zero-shot results. We also note that for some languages, like Thai, the supervised baseline greatly exceeds any approach using machine translation. This highlights that machine translation quality is still low is some languages, so pursuing stronger zero-shot solutions is worthwhile. \section{Introduction} \label{section:introduction} \input{figures/wikilingua0}% Cross-lingual language understanding is an important area of ongoing research \cite{AConneau20,JHu20,SRuder21}. With vastly differing amounts of data (both labeled and unlabeled) available across languages, there is significant value to developing techniques that can transfer knowledge from higher-resource languages to improve performance in lower-resource languages. \emph{Zero-shot} cross-lingual benchmarks push on the limiting case where no labeled data is available in the target language. Remarkable progress has been made on zero-shot cross-lingual tasks by scaling up the size of pre-trained multilingual models \cite{AConneau20,LXue21}. However prior work has focused nearly exclusively on \emph{non-generative tasks} (e.g., classification, extractive question answering, and sequence labeling). In this paper, we turn our attention to zero-shot cross-lingual \emph{generation}, or ``\ssc{XGen}'', which requires a model to learn a generative task from labeled data in one language (typically English), and then perform the equivalent generative task in another language. This problem is particularly challenging because generative models trained on one language are known to exhibit catastrophic forgetting, losing the ability to generate coherent text in other languages \cite{LXue21,KMaurya21,SShakeri21}. In particular, we focus on zero-shot cross-lingual summarization. We construct a new zero-shot evaluation \mbox{\ssc{WikiLingua-0}} from the \ssc{WikiLingua} dataset~\cite{FLadhak20}, allowing us to test \ssc{XGen} capabilities across 18 languages. We motivate a new evaluation metric for our task, \ssc{SP-Rouge}, and show that it correlates well with human judgments of summary quality. \citet{KMaurya21} show improved performance on \ssc{XGen} tasks by freezing model parameters in the input and output layers during fine-tuning. Inspired by recent parameter-efficient adaptation methods \cite{NHoulsby19,RMahabadi21a,BLester21,XLi21}, we take this approach further: can we overcome catastrophic forgetting by freezing \emph{all} of the pre-trained model parameters, and only tuning a much smaller set of task-specific parameters? Such parameter-efficient tuning methods are particularly appealing for multilingual NLP, since they would enable reuse of a single frozen model across many combinations of tasks and languages, thereby reducing storage and serving costs To this end, we conduct a detailed investigation of the \ssc{XGen} performance of {\ssc{PromptTuning}}\xspace~\cite{BLester21}, a simple parameter-efficient adaptation technique that limits the learned parameters to a set of virtual tokens prepended to the text input. We compare {\ssc{PromptTuning}}\xspace with standard fine-tuning (or {\ssc{ModelTuning}}\xspace, where all model weights are tuned) across different languages and model scales. We find that increasing model size and decreasing tunable parameter capacity are key for overcoming catastrophic forgetting. Despite its inferior performance on the training language (English), {\ssc{PromptTuning}}\xspace with scale typically outperforms {\ssc{ModelTuning}}\xspace when evaluated on non-English languages, especially on languages more distantly related to English, such as Thai. This corroborates previous findings~\cite{XLi21,BLester21} that parameter-efficient methods are more robust to domain shifts between training and inference. Motivated by our initial findings, we investigate three approaches to further improve the performance of {\ssc{PromptTuning}}\xspace and {\ssc{ModelTuning}}\xspace on \ssc{XGen} tasks. Our first approach involves mixing unlabeled data in the target language into the supervised training stage. We show this dramatically alleviates catastrophic forgetting on {\mbox{\ssc{WikiLingua-0}}}\xspace. A second approach is to perform ``intermediate tuning'' of the model or prompt before tuning on the final English-only dataset. We find this generally is not helpful. Our third approach, ``factorized prompts'', is specifically designed for {\ssc{PromptTuning}}\xspace. We train prompts on a multi-task multilingual mixture, where each prompt is factorized into composable language and task modules---the first half of the prompt encodes language knowledge, while the second half captures language-agnostic task knowledge. During inference in the zero-shot cross-lingual setting, the source language module is replaced with target language module while the task module remains unchanged. We demonstrate that factorized prompts provide an effective means of improving \ssc{XGen} performance. To summarize, our main contributions are: (1)~We present the first investigation of zero-shot cross-lingual generation (\ssc{XGen}) that tests parameter-efficient methods and ablates model scale. (2)~We present the first exploration of {\ssc{PromptTuning}}\xspace for zero-shot cross-lingual tasks. (3)~We propose the {\mbox{\ssc{WikiLingua-0}}}\xspace benchmark and associated \mbox{\ssc{SP-Rouge}} evaluation metric. (4)~We show that increasing model scale and decreasing tunable parameter capacity are key for overcoming catastrophic forgetting on \ssc{XGen}. (5)~We show that mixing in unsupervised multilingual data can boost \ssc{XGen} performance, and are the first to experiment with this method in combination with prompt tuning. (6)~We propose ``factorized prompts'' and show that this approach also helps on \ssc{XGen} performance
1,108,101,565,848
arxiv
\section{INTRODUCTION} \par It has been long claimed \citep[for e.g.,][and earlier]{1993ApJ...405..767G} that coronal loops consist of bundles of thin strands, to scales below the current instrumental resolution. Today, that statement continues to remain as prevalent as ever. Coronal loops were first detected in coronagraphic observations in the 1940s \citep{1991plsc.book.....B}. These loops are observed to extend into the low plasma-$\beta$ environment of the solar corona, arching over active regions, and are filled with relatively dense plasma (in the range of $\sim$10$^8$-10$^{10}$ cm$^{-3}$) and confined by a dipole-like magnetic field \citep[and references therein]{2004psci.book.....A, 2010LRSP....7....5R}. Coronal loops can have different temperatures and are observed in Extreme Ultra-Violet (EUV) from $\sim$10$^5$~K (cool loops) to a few 10$^6$~K (warm loops) and up to a few 10$^7$~K (flaring loops). In coronal loops, neighboring field lines are considered to be thermally isolated, hence, each field line can be considered independently which we call a "strand" herein. \par To explain the nature of coronal loops is to understand the origin of solar coronal heating. One fundamental issue is that we do not know the spatial scale of the coronal heating mechanism \citep{2010LRSP....7....5R}. It has been considered that in order to form stable over-dense, warm coronal loops it may be required to assume that coronal loops consist of unresolved magnetic strands, each heated impulsively, non-uniformly and sequentially \citep{1993ApJ...405..767G, 1994ApJ...422..381C, 2000SoPh..193...53K, 2001ApJ...553..440K, 2005ApJ...633..489R, 2006SoPh..234...41K, 2008ApJ...682.1351K}. At a typical spatial resolution (of most current space-based instruments observing from EUV to higher energies) of $\sim$1000~km ($\sim$1.5{\arcsec}: 1 arc second ({\arcsec}) \app720~km) it is likely that most observations represent superpositions of hundreds of unresolved strands at various stages of heating and cooling \citep{2006SoPh..234...41K}. Other studies based both on models and on analysis of observations independently suggest that elementary loop components should be even finer, with typical cross-sections of the strands on the order of 10-100~km \citep{2003SoPh..216...27B, 2004ApJ...605..911C, 2007ApJ...661..532D}. The space-based \citep[{\it Hinode}:][]{2007SoPh..243....3K} EUV Imaging Spectrometer \citep[EIS: ][]{2007SoPh..243...19C} has been used together with the Solar Dynamics Observatory (SDO) / Atmospheric Image Assembly \citep[AIA: ][]{2012SoPh..275...17L} to investigate the fundamental spatial scales of coronal loops and the results suggest that most coronal loops remain unresolved \citep{2012AAS...22030903U}, given the 864~km and 1440~km resolution of SDO/AIA and Hinode / EIS, respectively. \begin{figure*}[!ht] \includegraphics[scale=0.58, angle=0]{fig1.png} \caption[The SST active region mosaic.]{An overview of the CRISP 54 grid mosaic of 2$^{nd}$~July AR 11515 is presented in the context of SDO/AIA 170.0~nm (lower left) and 30.4~nm (upper left: with extended FOV boxed) passbands. The order of the 5~min scan sequence (which was repeated once over a 10 min interval) is depicted (lower left), as a series of overlapping segments corrected for solar tilt. The accurate co-alignment of bright points in 170.0~nm (contoured in green and yellow), with coincident bright points in the grey-scale H-$\alpha$ continuum image from CRISP, is presented for grid segment no. 54 (right).} \label{fig1} \end{figure*} \par However, in contrary to this \citet{2012ApJ...755L..33B} presented results of multi-stranded loop models calculated at a high resolution. They show that only five strands with a maximum radius of 280~km could reproduce the typical observed properties of coronal loops and a maximum of only eight strands where needed to reproduce all of the loops in their sample. More recently (11$^{th}$ July 2012), the High resolution Coronal Imager \citep[Hi-C: ][]{2013Natur.493..501C} took images of the 1.5~MK corona at a unprecedented resolution of 216---288~km \citep{2014ApJ...787L..10W}, which is unique for direct imaging of coronal loops in this passband. As a follow-up, \citet{2013ApJ...772L..19B} measured the Gaussian widths of 91 Hi-C loops observed in the solar corona and the resulting distribution had a peak width of 270~km. In other words, the finest-scale sub-structures of coronal loops are already observable. Other studies concerning the variations of intensity across a variety of hot loops, co-observed by AIA and Hi-C, have continued to speculate on whether or not strand sub-structures could potentially exist well below what Hi-C or AIA can resolve \citep{2013A&A...556A.104P}. Most recently, \citet{2014ApJ...787L..10W} performed a statistical analysis on how the pixel intensity scales from AIA resolution to Hi-C resolution. They claim that 70\% of the Hi-C pixels show no evidence for sub-structuring, except in the moss regions within the FOV and in regions of sheared magnetic field. \begin{figure*}[!ht] \includegraphics[scale=0.58, angle=0]{fig2.png} \caption[The SST active region mosaic.]{CRISP H$\alpha$ line scan images for the reconstructed 54 grid mosaic of 2$^{nd}$~July, for the time interval 08:15:05 - 08:20:00~UT. The line scan includes two far wing positions (panels {\it a} in the far blue-wing and {\it d} in the far red-wing) . Panel {\it c} samples the fast spicular structures in the near blue-wing of H$\alpha$ and {\it b} samples the upper chromospheric plasma in the H$\alpha$ core, revealing a complex network of chromospheric loops and dynamic fibrils. The line scan positions, relative to the solar atlas H$\alpha$ profile, are presented in the sub-figure of panel ({\it a}) and the specific wavelengths of the scan are detailed in the panel titles. The green and yellow boxes mark features of interest for our investigation.} \label{fig2} \end{figure*} \par There is strong evidence to suggest that coronal loops are, indeed, so finely structured when we consider loop legs from coordinated observations involving high resolution spectral imaging with ground-based instruments. \citet{2012ApJ...745..152A} performed a detailed and systematic study of coronal rain \citep{1970PASJ...22..405K, 2001SoPh..198..325S, 2004A&A...415.1141D} via the Swedish 1-m Solar Telescope \citep[SST: ][]{2003SPIE.4853..341S} / \citep[CRISP: ][]{2008ApJ...689L..69S} instrument at very high spatial and spectral resolution (0\farcs0597 image scale). They detected narrow clumps of coronal rain in H$\alpha$ down to the diffraction limit (129~km) in cross-sectional area, with average lengths between $\sim$310~km and $\sim$710~km and widths approaching the diffraction limit of the instrument. These measurements where repeated for on-disk coronal rain by \citep{2012SoPh..280..457A}. Coronal rain is considered to be a consequence of a loop-top thermal instability driving catastrophic cooling of dense plasma \citep[see ][and references therein]{2010ApJ...716..154A, 2012ApJ...745..152A}. Radiation cooling of dense evaporated plasma (filling coronal loops), leads to the onset the plasma depletion from the loops, slowly at first and then progressively faster. \begin{figure*}[!ht] \centering \includegraphics[clip=true,trim=0cm 0cm 0cm 0cm,scale=0.58, angle=0]{fig3.png} \caption[The SST active region mosaic.]{The large-scale, high velocity down-flow in the H$\alpha$ far wing image ($+0.1032$~nm) is presented in the 1$^{st}$ panel ({\it top row}) and its evolution after 5~min, is presented in the 1$^{st}$ panel ({\it bottom row}). The co-spatial and co-temporal warm coronal loop is visible in the SDO/AIA He~{\sc ii}~30.4~nm (2$^{nd}$ column), Fe~{\sc ix}~17.1~nm (3$^{rd}$ column) and Fe~{\sc xiv}~21.1~nm (4$^{th}$ column) images as shown. The blue dashed box ({\it top row}) represents the FOV of Fig.~\ref{fig4a}, for a closer inspection of the coronal {\bf loop-leg} sub-structures. The white dashed box in the ({\it bottom row}) represents the coronal {\bf loop-top} and the FOV of Fig.~\ref{fig4b}.} \label{fig3} \end{figure*} \par From an observational stand-point, the next step is to reveal evidence for sub-structuring along the full length of the coronal loop, from loop-top towards foot-point, and directly measure the threaded nature of coronal loop-tops using CRISP (with the most powerful resolving capability), in order to adequately test the existence of unresolved structure in the outer solar atmosphere. Observations from the ground-based instruments, such as CRISP, have obvious advantages over space-based facilities, with respect to resolving power given their much larger apertures. Analysis of the H$\alpha$ line core from such imaging spectro-polarimeters, provides an excellent tracer of the magnetic environment of the lower solar atmosphere \citep{2012ApJ...749..136L}. Through coordinated observations of coronal loops, with AIA and CRISP, we can use imaging in H$\alpha$, as a proxy for revealing the internal magnetic structure of coronal loops. As discussed in \citet{2012ApJ...745..152A}, it is possible that there is a strong dynamic coupling between neutrals and ions in coronal loops during the formation of coronal rain. As a result, the rain can become observable in the H$\alpha$ to reveal, in great detail, the topological structure of the local coronal magnetic field. The condensation process generates initially small rain clumps 'in situ' within coronal loop-tops, until the point where the mass density of the rain becomes large enough. leading to a flow of clumps towards the loop foot-points \citep{2013ApJ...771L..29F}. The lower limit (in spatial scales), with respect to the size distribution of these clumps, is dependent upon the magnetic fine-scale structuring of coronal loops. Momentarily and as a consequence of the thermal properties of the dense plasma undergoing rapid condensation, the H$\alpha$ signal is detectable within post-flare coronal loops, because the atmospheric conditions in the loops match that of the chromosphere. Fundamentally, the magnetic sub-structuring of the coronal loops near loop-tops, should remain the same or similar for all coronal loops (flaring and non-flaring). However, the possibility to probe the fine-scale magnetic structure of coronal loops, during the post-flare phase with H$\alpha$ via high resolution imaging, can present itself. \par In this letter, we report on the distribution of threaded sub-structures within a coronal loop from loop-top towards foot-point, via direct imaging of coronal loop cross-sections through a coordinated SST and SDO analysis, which comprises three datasets. \section{OBSERVATIONS} \par CRISP, installed at the SST, is an imaging spectro-polarimeter that includes a dual Fabry-P{\' e}rot interferometer (FPI), as described by \citet{2006A&A...447.1111S}. The resulting FOV is about 55{\arcsec}$\times$55{\arcsec}. CRISP allows for fast wavelength tuning ($\sim$50~ms) within a spectral range and is ideally suited for spectroscopic imaging of the chromosphere. For H$\alpha$~656.3~nm the transmission FWHM of CRISP is 6.6 pm and the pre-filter is 0.49~nm. The image quality of the time series data is greatly benefited from the correction of atmospheric distortions by the SST adaptive optics system \citep{2003SPIE.4853..370S} in combination with the image restoration technique Multi-Object Multi-Frame Blind Deconvolution \citep[MOMFBD: ][]{2005SoPh..228..191V}. Although the observations suffered from seeing effects, every image is close to the theoretical diffraction limit for the SST. We refer to \citet{2008A&A...489..429V} and \citet{2013ApJ...769...44S} for more details on the MOMFBD processing strategies applied to the CRISP data. We followed the standard procedures in the reduction pipeline for CRISP data \citep{2014arXiv1406.0202D}, which includes the post-MOMFBD correction for differential stretching suggested by \citet{2012A&A...548A.114H}. \begin{figure}[!h] \includegraphics[clip=true,trim=0cm 0cm 0cm 0cm,scale=0.4, angle=0]{fig4.png} \caption{Fine-scale, multi-stranded and multi-thermal sub-structures are detected within the coronal {\bf loop-leg} and presented here for the white-dashed box region.of Fig.~\ref{fig3}. The H$\alpha$ line position of +0.1032~nm image (grey-scale), is shown in panel-{\it A}, together with the near-simultaneous and co-spatial AIA 17.1~nm image. The coronal loop in 17.1~nm is contoured (solid red line) and overlaid in both images to compare with the H$\alpha$ multi-threaded component of the loop. A white-dashed diagonal slit and two pink-boxed regions are extracted and their normalized intensity profiles are plotted in panel-{\it B} for comparison of both spectral lines. The data cross-cuts for H$\alpha$ (solid curve) are overlaid with 17.1~nm (dotted curve). The blue solid lines presented the FWHM of the double peaked 17.1~nm profile and the green lines demarcate the locations of fine-scale strands which exist within the loop system.} \label{fig4a} \end{figure} \par We explore the fully processed datasets with \citet[CRISPEX][]{2012ApJ...750...22V}, a versatile code for analysis of multi-dimensional data-cubes. We have compiled, with these reduction methods, three datasets from excellent periods of seeing which contained active region coronal loops within the CRISP FOV. \\ \par {\bf Dataset A} - A mosaic observing sequence (presented in Fig.~\ref{fig1} and Fig.~\ref{fig2}) was set to repeat once, with a line scan in H$\alpha$ at 4 wavelength positions (as presented in Fig.~\ref{fig2}), for the 280{\arcsec}~$\times$~180{\arcsec}~FOV containing Active region (AR) 11515 on 2$^{nd}$~July~2012, centred at [-225{\arcsec},-275{\arcsec}] in solar-{\it x}/{\it y}. The pointing per position was preset to 5.5~s including 1.5-2~s for the telescope to change pointing. The total duration of the observation was 600~s so there were 108 pointing sequences in this interval with a repeat of 54 positions resulting in a cadence of 300~s between 08:10-08:20~UT. The co-alignment between CRISP and AIA for the mosaic observation is presented in Fig.~\ref{fig1}. \\ \par {\bf Dataset B} - A time series with 6 wavelength-point spectral scan in H$\alpha$ with an effective cadence of 19~seconds (after frame selection on the MOMFBD restored data) pointed at [-349{\arcsec},-329{\arcsec}] in solar-{\it x}/{\it y} on 1$^{st}$ July 2012, centering on AR 11515 between 15:08-16:31~UT. \\ \par {\bf Dataset C} - A time series with 43 wavelength-point spectral scan in H$\alpha$ with an effective cadence of 10.8~seconds pointed at [-818{\arcsec},179{\arcsec}] in solar-{\it x}/{\it y} on 24$^{th}$ Sept. 2011, centering on AR 11302 between 10:17-11:02~UT. \\ \par To achieve sub-AIA pixel accuracy in the co-alignment of H$\alpha$ with CRISP, and SDO/AIA, we cross-correlate photospheric bright points as observed in both instruments. Photospheric bright points exist as discrete, bright and relatively long lived features which exist in both quiet Sun, active regions and to a lesser extent in coronal holes. They are well distributed over the solar disk and can be clearly identified in the upper-photospheric AIA~170.0 nm channel (log T = 3.7). Our spectral line scan of H$\alpha$ includes nearby-continuum positions in both the blue wing ($-0.1032$~nm) and red wing ($+0.1032$~nm) for all our datasets. In the case of dataset {\bf A}, each grid in the mosaic sequence at the near-continuum spectral position was independently aligned to the corresponding SDO/AIA 170.0~nm (derotated FOV to compensate for solar rotation) in space and time. This accurate method for achieving a sub-AIA pixel accuracy in the co-alignment between the space-based SDO/AIA and ground-based SST/CRISP images is displayed in Fig.~\ref{fig1} (right). However, the same method was also applied in regard to the co-alignment of datasets {\bf B} and {\bf C}. \section{RESULTS} Dataset {\bf A}, composed of the mosaic coronal loop observation, contains primarily warm active region loops.. We examine the coronal loop multi-thermal sub-structures of active region loops in the following section. After that, we will focus on the sub-structures within loops in datasets {\bf B} and {\bf C}, which both contain hot, post-flare coronal loops. In that section, we will investigate the sub-structure of hot post-flare coronal loops which have experienced strong chromospheric evaporation. \newpage \subsection{ACTIVE REGION LOOPS} \par In Fig.~\ref{fig2} we present the reduced and reconstructed mosaic images for the H$\alpha$ line scan of AR~11502 from the 2$^{nd}$~July 2012. The line scan positions include $-0.1032$~nm ({\it a}) and $-0.0774$~nm ({\it c}) in the blue wing, relative to the line core ({\it b}) and one position in the red at $+0.1032$~nm ({\it d}). The green box in Fig.~\ref{fig2}({\it a}) indicates the location of a high speed chromospheric upflow with a strong Doppler shift with an equivalent velocity of $\sim$47km~s$^{-1}$. Interestingly, the trajectory of this high speed upflow, in the H$\alpha$ green box, coincides with the foot-point of a large-scale loop in apparent down-flow in the H$\alpha$ yellow box . The imposing loop-like structure, which extends above the sunspot group, is clearly observed in the red wing of H$\alpha$, as revealed in the yellow box in Fig.~\ref{fig2}({\it d}). This structure is co-spatial with EUV coronal loops as observed in SDO / AIA and its loop-top fine-scale structure is the focus of our investigation. \begin{figure}[!hb] \includegraphics[clip=true,trim=0cm 0cm 0cm 0cm,scale=0.4, angle=0]{fig5.png} \caption{Fine-scale, multi-stranded and multi-thermal sub-structures are detected within the coronal {\bf loop-top} and presented here for the blue-dashed box region.of Fig.~\ref{fig3}. The H$\alpha$ line position of +0.1032~nm image (grey-scale), is shown in panel-{\it A}, together with the near-simultaneous and co-spatial AIA 17.1~nm image. {\it Regions 1-3} in panel-{\it A} are selected for investigation of the H$\alpha$ intensity profile, as data cross-cuts along the loop-top system, which are represented in panel-{\it B}. As with Fig.~\ref{fig4a} parallel strands are identified using green lines separated by pink lines marking strand channels. {\it Regions 2} and {\it 3} are particularly highly structured in the H$\alpha$ line profiles.} \label{fig4b} \end{figure} \par In Fig.~\ref{fig3}, we present a zoom into the yellow box (2) from Fig.~\ref{fig2}({\it d}) to reveal a multi-thermal sub-structure within the coronal loop system. The mosaic sequence was repeated with a 5-min time lag (before: {\it top row} and after: {\it bottom row}). When we compare between these time frames in the H$\alpha$ far red wing we can immediately reveal the evolution of the flow from the loop-top arching along the loop-leg. This evolution of the plasma is also evident in the AIA EUV lines at the loop-top in the same time interval, which confirms our expectation that the H$\alpha$ signature must originate within the coronal loop structure and at the loop-top. The loop length, as observed in H$\alpha$, is $\sim$63~Mm from lower loop leg to the central part of the loop-apex. Along the loop leg, near to the foot-point, a high velocity chromospheric up-flow (green box from Fig.~\ref{fig2}({\it a})) can be identified as a hot explosive event, which remains in emission in all AIA channels through the duration of the observation. This explosive event continues even after the excessive cooling of the loop-top and onset of the return flow to account for the high concentration of dense plasma near the loop-top. The blue-dashed boxes in the {\it top row} correspond to the FOV of the loop-leg which will be examined in more detail. Similarly, the pink-dashed boxes correspond to the FOV of the loop-top which we can investigate for signatures of fine-scale structure. \par In Figs.~\ref{fig4a}~\&~\ref{fig4b} panel-{\it A}, we present the zoomed-in regions from Fig.~\ref{fig3} blue-dashed box (loop-leg).and white-dashed box (loop-top), respectively. Here we reveal in great detail, that both the loop-tops and loop-legs consist of bundles of fine-scale strand sub-structures, which can remain connected along the length of the loop. The fine strands, identifiable within the pink boxed regions in Fig.~\ref{fig4a} panel-{\it A} and white-dashed in Fig.~\ref{fig4b} panel-{\it A}, appear to be parallel with each other and exist / contained within the AIA-defined loop boundary (see the blue dashed line in Fig.~\ref{fig4b} panel-{\it A}). In Fig.~\ref{fig4a} panel-{\it B}, the data cross-cuts (extracted from the diagonal-dashed slit of Fig.~\ref{fig4a} panel-{\it A}) demonstrate the sub-structured nature of the loop in H$\alpha$ within, and not necessarily confined too, the double peak profile representing 17.1~nm normalised intensity. \begin{figure}[!h] \includegraphics[scale=0.4, angle=0]{fig6.png} \caption{Co-temporal and co-spatial H$\alpha$ near red-wing images (grey-scaled), together with overlaid contours (17.1~nm: yellow and 21.1~nm: red), are presented in panel-{\it A}. The observations consists of a snapshot of a post C8.2-class flare system from 1$^{st}$~July~2012 (dataset {\bf B}). Panel-{\it B} presents the normalised intensity cross-cuts of the post-flare {\bf loop-top} (solid green line {\it Region 4} in panel-{\it A}) for the associated H$\alpha$ signal (black curve) along with the respective curves of the 17.1~nm (yellow) and 21.1~nm (red) channels, as is contoured in panel-{\it A}. The shaded green-boxes represent examples of associated fine-scale structures in H$\alpha$ and the EUV lines from which we extract measurable strand cross-sections for our statistical sample. The blue-horizontal lines represent the well-defined and measurable cross-sections of the EUV loops in contrast with the fine-scale structuring in H$\alpha$.} \label{fig5a} \end{figure} The 17.1~nm loop system boundary (marked by the vertical red lines in Fig.~\ref{fig4a} panel-{\it B}) has a maximum cross-sectional width of 3-4 Mm and also appears structured down to the resolution limit of the AIA instrument. The AIA temperature response function for the 17.1~nm passband has its maximum around 0.9~MK. We find that the Full-Width-Half-Maximum (FWHM) of each of the AIA 17.1~nm loop peaks, from the cross-cut, is 870~km. In measuring the cross-sectional widths, we computed the FWHM of the data cross-cut as being the width of the bisector corresponding to half of the difference between the minimum (in the case of H$\alpha$) or maximum (in the case of 17.1~nm) intensity level and the background pixel intensity level. In Fig.~\ref{fig4a} panel-{\it B}, at the centre of this AIA loop double peak, we can identify a very finely structured bunch of strands in H$\alpha$. The H$\alpha$ data cross-cuts contain multiple, parallel strands with a variety of cross-sectional widths (individual strands are marked with the green solid lines). The broadest strand in this set has a FWHM of 6 SST-pixels which corresponds to 258~km. This relatively broad strand extends with a consistently uniform cross-section to a length of 4855~km. The left-most strand is narrower again corresponding to 4 SST-pixels which is 172~km. Other strands appear to exist within a range of spatial scales that can be as large as 516~km and as low as 129~km. On average, we can detect a maximum of 8 strands within this cross-section of the loop. \par Similarly, in Fig.~\ref{fig4b} panel-{\it B}, H$\alpha$ data cross-cuts are plotted in sequence depicting the strands parallel channels (see the connecting green and pink solid lines demarcating the channel of the strand), that run along the length of the loop within the {\it Regions 1, 2} and {\it 3}. {\it Regions 2} and {\it 3} are contained within, and bounded by, the curved blue-dashed lines of the AIA 17.1~nm loop boundary. This is the same loop system connecting the loop-leg from Fig.~\ref{fig4a}. The finest detectable strands exists within {\it Region 3} and they all extend uniformly in length, across the FOV for at least 1100~km. The right-most strand channel from {\it Region 2} has a cross-sectional width similar to that of the broadest strand from the loop-leg section. However, the most commonly occurring strand cross-section, from both loop-top and loop-leg sections, is 129~km. \begin{figure}[!h] \includegraphics[scale=0.4, angle=0]{fig7.png} \caption{Co-temporal and co-spatial H$\alpha$ far red-wing images (grey-scaled), together with overlaid contours (17.1~nm: yellow and 21.1~nm: red), are presented in panel-{\it A}. The observations consists of a snapshot of a post C8.2-class flare system from 1$^{st}$~July~2012 (dataset {\bf B}). Panel-{\it B} presents the normalised intensity cross-cuts of the post-flare {\bf loop-leg} (solid green line {\it Region 5} in panel-{\it A}) for the associated H$\alpha$ signal (black curve) along with the respective curves of the 17.1~nm (yellow) and 21.1~nm (red) channels, as is contoured in panel-{\it A}. The additional markers in these figures are previously described in Fig.~\ref{fig5a} for this dataset.} \label{fig5b} \end{figure} \par We have found multi-stranded, and multi-thermal fine-scale structuring within warm active region coronal loops in both loop-top and loop-leg sections. However, is this scenario consistent with hotter post-flare loops, which can undergo a more widespread and intense foot-point heating? \subsection{POST-FLARE LOOPS} \par Figs.~\ref{fig5a}~\&~\ref{fig5b} display the overlays of the hot post-flare loop system of dataset {\bf B}, which consists of CRISP observations centred on AR~11515 (same active region as dataset {\bf A}, observed one day later) on the 1$^{st}$~July~2012 and hosts a C8.2-class flare during the observation period. In both Figs.~\ref{fig5a}~\&~\ref{fig5b} panel {\it A}, the post-flare loops are presented 33~mins after the flare peaks at $\sim$15:41 UT in the GOES X-ray channel. The loop boundaries are contoured in 21.1~nm (red) and 17.1~nm (yellow) channels and overlaid on the H$\alpha$ red wing images. In Fig.~\ref{fig5a} panel-{\it A} the H$\alpha$ spectral line position is +0.0516~nm whereas in Fig.~\ref{fig5b} it is further into red wing (at +0.1032~nm) where we detect the faster moving components within the post-flare loop. Its is immediately obvious that in panel-{\it A} for both Fig.~\ref{fig5a}~\&~\ref{fig5b}, we detect a clear spatial correlation between the cooler fine-scale structures in H$\alpha$ and the hotter EUV signal. The condensing coronal rain (known to form during the post-flare cooling phase) is shown to be depleting from the loop-top (located near {\it Region 4}) and travels towards the loop foot-point. Using the spectral scans in H$\alpha$ we can sample the fine-scale structure within the cross-section of the multi-thermal loop system, and with very high accuracy since the EUV loops are very intense therefore very well defined in the images. The cross-cut data for {\it Regions 4} and {\it 5}, from Fig.~\ref{fig5a}~\&~\ref{fig5b} panel-{\it B}, displays similar fine-scale structuring very much confined within the post-flare loops. Again we detect double peak structures in many of the AIA channels (notably in the 21.1~nm red curves) and multiple strands in the H$\alpha$ line position, indicating the presence of narrower threads well below the instrumental resolution of AIA. The green-boxed regions in Fig.~\ref{fig5a}~\&~\ref{fig5b} panel-{\it B}, highlight the sections of the cross-cuts where we have an overlap between AIA loops and structure in H$\alpha$, which again implies a coincidence in the location of the formation of the lines to within the coronal loop itself. The cross-sectional width measurements of all the fine-scale strands, within the full length of the loop system from loop-top to foot-point, will be accumulated together with datasets {\bf A}, for statistical comparison of the variation in the range of scales present within the loop systems. \begin{figure}[!hb] \includegraphics[scale=0.4, angle=0]{fig8.png} \caption{Co-temporal and co-spatial H$\alpha$ red-wing images (grey-scaled), together with overlaid contours (17.1~nm: yellow and 21.1~nm: red), are presented in panel-{\it A}. The observations consists of a snapshot of a post X1.9-class flare loop system from 24$^{th}$~Sept.~2011 (dataset {\bf C}). Panel-{\it B} presents the normalised intensity cross-cuts of the post-flare loop-leg (solid green line {\it Region 7} in panel-{\it A}) for the associated H$\alpha$ signal (black curve) along with the respective curves of the 17.1~nm (yellow) and 21.1~nm (red) channels, also contoured in panel-{\it A}. Likewise, we also plot intensity profiles for {\it Region 6}, representing fine-scale structure close to the loop-top, in panel-{\it B}. The shaded green-boxes represent examples of associated fine-scale structures in H$\alpha$ and the EUV lines from which we extract measurable strand cross-sections for our statistical sample. The blue-horizontal lines represent the well-defined and measurable cross-sections of the EUV loops in contrast with the fine-scale structuring in H$\alpha$.} \label{fig6a} \end{figure} \par Dataset {\bf C} consists of CRISP observations centred on a region that hosted a GOES X1.9-class flare (post-impulsive phase close to the north-east solar limb) on the 24$^{th}$~Sept.~2011. In Figs.~\ref{fig6a}~\&~\ref{fig6b} panel-{\it A}, we present images of the H$\alpha$ red-wing (Fig.~\ref{fig6a}) and blue-wing (Fig.~\ref{fig6b}) grey-scaled images, which are almost coincident in observation time at 10:17~UT (within the same line scan with one line scan taking 4.2 seconds). These images are again overlaid with contours from AIA 17.1~nm (yellow), 21.1~nm (red) and also 19.3~nm (dark blue), for the post flare loop system 56~mins after the flare GOES X-ray peak. During this phase we again expect to see evidence of coronal rain formation, with a characteristic signature in H$\alpha$. In dataset~{\bf C}, catastrophic cooling is indeed present across the entire post-flare loop and the opposing flows, as the rapidly cooled coronal rain depletes from the loop-top under the action of gravity, is exquisitely revealed. We can clearly detect opposing, dark Doppler flows running along both legs of the loop system, as absorption in H$\alpha$ (as defined by AIA contours), in the red-wing for the left-side leg (Fig.~\ref{fig6a}) and in the blue-wing for the right-side leg (Fig.~\ref{fig6b}). The Doppler signature in the loop reveals the geometrical nature of the loop itself. In the line core images we can detect the structure of the loop-top where there is a net zero Doppler shift, which is in agreement with the location of the observed loop-top in the EUV channels and confirmation that the hot and cold plasma must be co-located within the same loop structure. \begin{figure}[!h] \includegraphics[scale=0.4, angle=0]{fig9.png} \caption{Co-temporal and co-spatial H$\alpha$ blue-wing images (grey-scaled), together with overlaid contours (17.1~nm: yellow and 21.1~nm: red), are presented in panel-{\it A}. The observations consists of a snapshot of a post X1.9-class flare system from 24$^{th}$~Sept.~2011 (dataset {\bf B}). Panel-{\it B} presents the normalised intensity cross-cuts of the other post-flare {\bf loop-leg} (solid green line {\it Region 8} in panel-{\it A}) for the associated H$\alpha$ signal (black curve) along with the respective curves of the 17.1~nm (yellow) and 21.1~nm (red) channels, as is contoured in panel-{\it A}. The additional markers in these figures are previously described in Fig.~\ref{fig6a} for this dataset.} \label{fig6b} \end{figure} As with the large-scale loop of dataset~{\bf A}, in the H$\alpha$ line core images of dataset~{\bf C}, many of these finely-structured strands extend from foot-point to foot-point (crossing the loop-top). As with dataset~{\bf B}, in Fig.~\ref{fig6a}~\&~\ref{fig6b} panel-{\it B}, we present sets of normalised intensity cross-cuts from both H$\alpha$ (black curve) and the EUV channels (17.1~nm: yellow and 21.1~nm: red curves) for slit {\it Regions 6, 7} and {\it 8}. Again, we can reveal fine-scale structuring within the H$\alpha$ intensity profiles (from within the green shaded boxes), in both Fig.~\ref{fig6a}~\&~\ref{fig6b}, that are co-spatial with singly-peaked profiles in the EUV channels. In each of the {\it Regions 7} and {\it 8}, we can detect similarly scaled strands and a variable range in the cross-section strand number density of, typically, 3-5 clearly defined parallel strands. The data cross-cuts from each of the AIA passbands contoured here are also overlaid. The EUV loop intensity profiles appear to have cross-sectional widths in the range of 25-40 SST pixels (approx. 5-6 times greater than fine-scale H$\alpha$ strands within them as marked by solid green lines), as marked with the solid blue lines, in panel-{\it B} of both Fig.~\ref{fig6a}~\&~\ref{fig6b}. These measurements are comparable with those profiles deduced from dataset~{\bf B} which was a substantially weaker post-flare loop system, and likewise, for dataset~{\bf A} for a warm active region loop system with indication of foot-point heating but no apparent flaring. \par A statistical comparison of the strands, using all of the examples from each of the datasets examined, is considered next. In summary, these coronal loop sub-structure samples comprise both loop-top and loop-leg sub-structures from both CRISP and AIA images, where we have a detectable and confident correspondence between H$\alpha$ features in CRISP and associated EUV loops in AIA. A total of 62 coronal loop sub-structures where measured from all three datasets. A resulting histogram of the number density of all the strands / loop cross-sections versus their cross-sectional widths (km units), is presented in Fig.~\ref{fig19}. Here we can show that the distribution of sub-structures within coronal loops appears to increase exponentially towards finer scales and the highest number density (10 instances representing almost a 6$^{th}$ of all sub-structures measured) appears to peak within the range of the CRISP resolution. In fact, the distribution would imply that we have not yet reached a peak in the dominant spatial fine-scale structure of coronal loops and furthermore, we are not yet within the observable range of the finest scales of structure. \section{DISCUSSION AND CONCLUSIONS} \begin{figure*}[!ht] \centering \includegraphics[scale=0.45, angle=0]{fig10.png} \caption[The SST active region mosaic.]{The histogram displays the distribution of all detectable strands and sub-structures within coincident H$\alpha$ and EUV coronal loops, as measured from all of the datasets sampled. The pale blue sections correspond to the H$\alpha$ only detections made via CRISP. The darker blue sections correspond to the SDO / AIA coronal loop cross-sections. The vertical dashed lines mark the resolution limit for CRISP, Hi-C and SDO / AIA. The number density of detected strands versus their cross-sectional FWHM widths is measured. The exponential curve is overlaid onto the plot to indicate the steeping distribution towards finer scales within the sub-structures of coronal loops.} \label{fig19} \end{figure*} \par Since the launch of Hi-C in 2012 there has been substantiative research into the fine-scale structure of coronal loops. Most efforts to address this issue are centred on multi-instrumental approaches / analysis, comparing statistical relationships of intensity variations between measured loop cross-sections, in co-incident Hi-C and AIA coronal loops. Ultimately, the conclusions from such studies, as with this study, are always going to be limited by the resolution of the instruments used and any conclusions on the existence of fine-scale structure will continue to be speculated upon, until the necessary improvements in instrumentation resolving power are met. In this study, we take this investigation further by exploiting the resolving power potential of the ground-based CRISP instrument together with SDO / AIA coronal loop detections to reveal the fine-scale structure. \par Our analysis of three datasets, which consist of large-scale coronal loops in various conditions (ranging from warm active region loops to hot post-flare loops), have been accurately co-aligned with very high resolution imaging in H$\alpha$. Interestingly, there is little difference in the distribution of strand / structure spatial scales that would lead one to be able to distinguish between datasets~{\bf A}, {\bf B} and {\bf C}, each of which depict loop-systems undergoing large variations in impulsive heating. This aspect may be hinting that the magnetic sub-structure of coronal loop cross-sections may not be so sensitive to variations in loop foot-point heating or, alternatively, the magnetic field is effectively and systematically reacting to changes in the thermal properties of the internal loop environment, in order to manage the heat transport and maintain stability. The formation of the coronal rain is a demonstration of the loop system reaching a new thermal equilibrium, as observed in the cool H$\alpha$ line, which acts as a tracer of the magnetic environment. This association of this rapid cooling condensation process and its temporary association with the EUV coronal loops has been exploited in this study, in order to examine the fine-scale structure of the loops at the loop apex. In Fig.~\ref{fig3}, we clearly demonstrate the loop-top depletion due to catastrophic cooling of plasma, which falls back to the lower solar atmosphere along the loop-leg. The longest continuous detectable strand (which largely features close to the resolution limit in CRISP at 129 km) was on the order of 26,100 km, extending from loop-top to close to the foot-point. This represents one of the longest and continuous fine-scale coronal loop substructures detected to date. \par \citet{2010ApJ...716..154A} observed coronal rain near coronal loop-tops with Hinode \citep[SOT: ][]{2008SoPh..249..167T} and measured cross-sectional widths on the order of 500~km. We detect similarly scaled coronal rain strands in coronal loop-tops with CRISP and also threads with finer scales, implying the existence of a range of finely scaled structures in the outer solar atmosphere. The draining of the dense plasma as it falls back towards the loop foot-points from the loop-top is most clear in dataset {\bf C}, represented in Figs.~\ref{fig6a}~\&~\ref{fig6b}. In the images, we demonstrate a clear association of the rain flowing within both legs of a post-flare coronal loop from its apparent source near the loop-top. There appears to be a distribution of scales within the coronal loop-top with respect to cross-sectional widths of strands. Likewise, there is a distribution in the strand lengths, all of which appear to follow the trajectory of the loop-top coronal field (as inferred from co-incident AIA loop trajectories), with some appearing to be very much extended towards the loop foot-point. This shows that the fine-scale structure is widespread along the full length of the loop and the coronal rain clumps can form within bunches of strands. We can conclude that the vast majority of fine-scale strand structures within coronal loop cross-sections exist well below the resolution of SDO /AIA (69.3\% of the potential strands, as returned by CRISP, are unresolved with AIA) and almost 50\% of fine-scale strands could potentially remain unresolved with imaging in an instrumental-resolution comparable to Hi-C. In summary, after considering 8 cross-cuts (representing one loop-top and two loop-legs for datasets {\bf B} and {\bf C} and one loop-top, one loop-leg for dataset {\bf A}) we find an average ratio of 5 : 2 for CRISP strand no. density to AIA strand no. density per loop system. \par Finally, we conclude that there is a cut-off in the peak of the distribution (from Fig.~\ref{fig19}), at the instrumental resolution of CRISP. We and others have assumed that the distribution of strand sizes should be a Gaussian or at least symmetric about some peak. From our histogram, we demonstrate that either we have not yet reached that peak and the actual fine-scale resolution is much below 100~km, or the spatial-scale distribution is in fact skewed away from being symmetric about some peak. This result clearly states that, even with the most powerful ground-based instrumentation available, we have not yet observed a true peak in the strand cross-sectional width measurements at the lowest limit within coronal loops. \citet{2013ApJ...771L..29F} demonstrated with numerical simulations of coronal rain formation that, when compared with observational statistics, a higher percentage of coronal rain clumps are expected in smaller scales. Here, we can confidently state that the peak (in other words the minimum) in cross-sectional width distribution of the finest structures within coronal loops, is most likely to exist beneath the 100~km mark. Henceforth, we look forward with great anticipation to the arrival of more powerful ground-based telescope facilities \citep[such as the 4-m Daniel K. Inouye Solar Telescope (DKIST): ][]{2013SPD....4440002B}, in order to fully probe even finer scales within coronal loop cross-sections. \acknowledgements We further thank N~Frej who co-observed the mosaic and 1$^{st}$~July~2012 datasets ({\bf A} \& {\bf B}), as well as, A.~O.~Carbonell and B.~Hole who observed at the SST on the 24$^{th}$~Sept.~2011 dataset ({\bf C}). The Swedish 1-m Solar Telescope is operated on the island of La Palma by the Institute for Solar Physics of Stockholm University in the Spanish Observatorio del Roque de los Muchachos of the Instituto de Astrofisica de Canarias.
1,108,101,565,849
arxiv
\section{Background} Although it is common knowledge that stress can hurt our health, scientific results regarding this are fairly recent. The first studies attempting to show this connection between the psychological and the physical state were developed in the first half of the 20th century \cite{Cannon32, Selye36}. In the second half of the century the corresponding field of psychoneuroimmunology (PNI) established the interplay of the central nervous system (CNS), the endocrine system, in other words the hormonal messenger system of the body, and the immune system \cite{Solomon64, Holmes67, Ader01, Glaser05}. The first goal of this field is to specify a good definition of stress to be able to work with and thus also a way to categorize it \cite{Elliot82}. The most common way to do this is mainly by the duration, e.g., one may contrast very brief public speaking to extremely long-time care of a spouse \cite{Segerstrom04}. Furthermore, it is important to quantify stress factors, e.g. how and to what extend we can see sports as positive or negative stressor in the same way as psychological ones \cite{Hoffman94}. Further, PNI wants to understand the biochemistry of how behavioral and psychological effects, in particular stress, are reflected in our bodies, i.e. how stress ``gets inside the body'' \cite{Segerstrom04}. Although there is still much to learn, we have now a basic understanding of the hormonal reactions of our body to stress and how they affect the immune system, e.g. by specific receptors on cells responsible for the immunity \cite{Glaser05}. In addition, there are several studies regarding immunological and medical consequences of stress~\cite{Kiecolt85, Andersen04}, where a key goal is to understand and counteract negative consequences. In this context, the difficulties of extensive clinical trials and the lack of a commonly accepted definition, categorisation or measurements of stressors are most evident. This is why Segerstrom and Miller performed in 2004 a meta-analytical study of numerous results going back 30 years \cite{Segerstrom04}. They showed that in general stress has a negative effect on the immune system by either decreasing the number of e.g. killer cells or simply disturbing the equilibrium between the different components. Nevertheless a brief stress, like public speaking, can also have a positive effect and even longer short-term stressors like examination periods for students can have a beneficial effect during but cause decrease immunity afterwards \cite{Dorian85, Burns02}. \section{The model} Having some fundamental understanding of how our immune system reacts to stress, we want to find a \emph{simple mathematical model} of differential equations which is able to represent the \emph{basic qualitative interaction} between an infection and our \emph{immune system} taking into account the \emph{effect of stress}.\\ Since we are only interested in a qualitative analysis of the system we are going to work with normalised dynamical variables $x$, representing the level of activity of the immune system, and $y$, as the level of infection or sickness, with values ranging between 0 and 1. The stress is included by a parameter $s$ also normalised between 0 and 1 where 0 corresponds to no stress at all and 1 is the maximal level of stress limited by the assumption that a person cannot feel infinitely strong stress. To specify the dynamics of the infection we are going to built upon principles of mathematical biology~\cite{Murray1,Mueller15}, where in multiple sub-disciplines such as ecology, neuroscience, or biomechanics, one aims to generic polynomial nonlinearities to represent basic effects. Based upon population dynamics, we consider logistic growth and Allee effect for the immune system intrinsic dynamics. On this regard, our first assumption is that, as long as our immune system has some activity, the infection should have two stable states, the ``sick''-state and the ``healthy''-state. Furthermore, since we are constantly coming in contact with different viruses and bacteria, we are interested in having a slightly raised base line instead of the ``healthy''-state being $y=0$. Our second assumption is that, if there is no immune system or if it is too weak then the infection should spread up to its maximal capacity. Combining everything we arrive at the equation \begin{equation} y'= -xy + r\sqrt{1-y}\left(q(y-y_1)(y-y_0)+(1-q)y\right) \end{equation} where we used a square root instead of the standard multiplicative term $(1-y)$ from population models. Finally we choose the parameter values $r=2$, $y_0=y_1=0.1$ and $q=0.95$.\\ In contrast to the previous derivation of the dynamics for $y$, our approach for finding an appropriate equation for the immune system is based purely on the geometry necessary to capture the effects we aim to model. We start with the case without stress. It is clear that without stress we should have a unique stable equilibrium with $0\ll x<1$ and $0<y\ll 1$ corresponding to a normal healthy state. Since too much stress generally induces an infection, this fixed point should only exist for $s<s_0$ with some threshold level of stress $0<s_0<1$.\\ Furthermore, we also want to reflect the fact that, while a moderate stress can make slightly more resistant to infections, at the same time, its sudden drop to normal levels can be detrimental increasing the likelihood of infections, e.g., this naturally occurs for short-term stressors. To capture this effect we want to have a second unstable equilibrium with $0<y\ll 1$. As $s$ increases the $x$-coordinates of both fixed points should slowly start decreasing and approaching each other such that for some $0<s_1<s_0$ the stable equilibrium crosses the original position of the unstable one. If we would now drop the stress back to $s=0$ it would in fact trigger the expected infection before converging to the healthy state.\\ Finally, it is reasonable to assume that an extreme (prolonged) stress can prevent the body from recovering from an infection, giving a ``burn-out'' state, such that for $s$ close to 1 we want to have a fixed point where $0\ll y<1$.\\ Again combining all the assumptions above, one simple polynomial nonlinear differential equation capturing these effects is given by \begin{equation} \label{eq:xeq} \begin{aligned} x' = & ~~y - 500\left(\frac{1}{5}(kx)^5 - \frac{1}{4}(3x_0+x_1)(kx)^4 + (x_0^2+x_0x_1)(kx)^3\right.\\ & ~~\left.-\frac{1}{2}(x_0^3+3x_0^2x_1)(kx)^2+x_0^3x_1(kx)\right)+0.2 \end{aligned} \end{equation} presenting an S-shaped nullcline with $x_0=0.3$ and the parameter $k=1.3-0.3(1-s)^4$ to control the shift to the right of the equilibria and $x_1=0.8-0.2s^2$ responsible for the disappearing of the stable healthy state. Of course, the actual numerical values in~\eqref{eq:xeq} are not the key aspect but the geometry of the dynamics and the qualitative description of the different aspects of immune reaction, infection level, and stress. In summary, the complete system we arrive at is \begin{equation}\label{eq:main} \begin{aligned} x' = & ~~y - 500\left(\frac{1}{5}(kx)^5 - \frac{1}{4}(3x_0+x_1)(kx)^4 + (x_0^2+x_0x_1)(kx)^3\right.\\ & ~~\left.-\frac{1}{2}(x_0^3+3x_0^2x_1)(kx)^2+x_0^3x_1(kx)\right)+0.2\\ y' = & ~~\alpha\left(-xy + 2\sqrt{1-y}\left(q(y-y_1)^2+(1-q)y\right)\right) \end{aligned} \end{equation} with the parameter values $x_0=0.3$, $q=0.95$, $y_1=0.1$ and $$k=1.3-0.3(1-s)^4~~~~\text{ and }~~~~ x_1=0.8-0.2s^2$$ integrating the effect of stress into the equations. The additional parameter $\alpha$ controls the scale of change of the infection $y$ with respect to the immune system $x$. Figure \ref{fig:phasePortrait} shows the corresponding phase portraits and time series for multiple values of $s$. And indeed, our model presents different dynamic regimes upon different stress levels, which we can observe in the phase portraits and time series shown in Figure~\ref{fig:phasePortrait}. \section{Bifurcation analysis} To analyse in more detail the change between the different dynamical regimes, we take a look at the corresponding bifurcation diagram~\cite{Kuznetsov} in Figure \ref{fig:bifurcation}. For $s=0$ we have 3 equilibria: an unstable spiral, a saddle and a stable node. As $s$ increases the stable node and the saddle collide and disappear in a saddle-node in cycle (SNIC) bifurcation for $s\approx 0.48$. At $s\approx 0.95$ we find a second SNIC, where another pair of a stable node and a saddle appear. In addition to the equilibria mentioned above, in the parameter regime between the two bifurcation points we find that all orbits except the unstable equilibrium have to converge to a limit cycle as $t\to\infty$. This can be easily shown using the Poincaré-Bendixson Theorem on the unit square. Note that SNIC bifurcations are of co-dimension one, i.e., they are generic/typical in planar systems with one parameter presenting a mechanism to obtain oscillations. They have been observed already in many models ranging from neuroscience \cite{Xie08, Maesschalck15} to mechanics \cite{Czaplewski18}. They are accompanied by a heteroclinic loop for parameter values $s<0.48$ (or $s>0.95$) since the unstable manifolds of the saddle converge to the stable node. As $s$ approaches the bifurcation the two equilibria collide, one of the heteroclinic connections disappears and the second one becomes a homoclinic orbit. By perturbing $s$ further this homoclinic gives rise to a family of stable limit cycles for $0.48<s<0.95$. \section{Discussion} In this work, we have initialized the qualitative mathematical model development for the interaction between immune system levels, infection dynamics, and stress level. Based upon established principles of theoretical biology, we developed a simple, yet powerful, planar dynamical system to analyze the influence of stress as an external parameter. Using only elementary nonlinearities, out model can represent (I) a stable healthy equilibrium for low stress, (II) the effect of sudden stress level drop from moderate to low levels inducing a single infection, (III) periodic outbreaks of infections under high stress, (IV) the transition to a burn-out state at very high stress. The key transition mechanism we have identified is a saddle-node in cycle (SNIC) bifurcation. This bifurcation transition actually makes the model predictive, e.g., it predicts that if stress levels are close to a burn-out stage but still below, then the oscillatory sick periods become longer. Although this looks like a natural prediction, it is one that we did not anticipate to be able to extract from our system at all during the modelling process. Having established the ability of conceptual mathematical models to contribute to the understanding of stress, we believe that further model development and connecting qualitative mathematical models to more detailed biophysical principles as well as to clinical trials, could forge a effective path mitigating, and probably even more importantly, predicting in advance, the positive and negative effects of stressors.
1,108,101,565,850
arxiv
\section*{Acknowledgements} The author would like to thank Mike Creutz for discussions related to his action.
1,108,101,565,851
arxiv
\section{Introduction \label{introduction}} The optical properties of a medium can be drastically modified by strong coherent interaction with a laser field, and one of the most prominent examples of the kind is electromagnetically induced transparency (EIT) \cite{fleischhauer2005electromagnetically}, which allows light transmission with large dispersion and gives rise to fascinating phenomena, such as extremely slow group velocity and light storage \cite{hau:99,kash:99,budker:99, chaneliere:05, eisaman:05}. Besides extensive investigations of the temporal dynamics, the spatial effects resulting from EIT have also been studied such as the focusing and de-focusing of transmitted probe light in the presence of a strongly focused coupling beam \cite{moseley1995spatial,moseley1996electromagnetically} and the deflection of probe light when passing through an EIT medium in the presence of a magnetic field gradient \cite{karpa2006stern,zhou2007deflection}. Recently, cancellation of optical diffraction was obtained for a specific detuning of the probe beam where the Doppler-Dicke effect compensates for diffraction \cite{firstenberg2009elimination,firstenberg2009eliminationB}. While studies of EIT generally focus on $\Lambda$ type energy level configurations, more recently, there has been considerable interest with EIT in a ladder scheme involving Rydberg energy levels \cite{mohapatra2007coherent,pritchard2010cooperative, petrosyan2011electromagnetically} (Rygberg EIT). Strong dipolar interaction between Rydberg atoms in such EIT schemes is responsible for the so-called photon blockade, which offers promising means to realize deterministic single photon sources \cite{dudin2012strongly,peyronel2012quantum}, to induce effective interactions between photons \cite{firstenberg2013attractive}, and to realize photonic phase gates \cite{paredes2014all}. Rydberg EIT has also attracted attention with the demonstration of interaction enhanced absorption imaging (IEAI) \cite{gunter2012interaction,gunter2013observing}. This imaging technique detects Rydberg excitations via their modification on EIT transparency due to the strong interaction between Rydberg atoms. It confers great potential for the study of many-body physics with Rydberg atoms \cite{low2012experimental,weimer2010rydberg}. Rydberg EIT experiments generally require strongly focused coupling fields in order to obtain sufficiently strong Rabi frequencies on the transition involving the Rydberg state. This focusing inevitably produces strongly inhomogeneous coupling fields. While lensing effect on the probe field associated with this inhomogeneity has been studied using a hot vapour \cite{moseley1995spatial,moseley1996electromagnetically}, until now this effect in cold Rydberg ensembles has received little attention. However, since the probe field is to be strongly modified by interaction induced nonlinearity in cold Rydberg ensembles, having a good understanding and control of the lensing effect is necessary. We present in this paper a precise study of the lensing effect on the probe light by a tightly focused coupling beam in a Rydberg EIT scheme and its dependence on the probe detuning. In contrast to most previous studies on Rydberg EIT, the spatial structures are imaged in our experiment by a diffraction limited optical system. We use $27s$ Rydberg state of $^{87}$Rb atoms so that the effect of interaction between Rydberg atoms is minimal hence the experimental results can be accurately compared with numerical solutions of Maxwell-Bloch equations. This study sets clear delimitation on the possibilities offered by Rydberg EIT. \section{Experiment \label{setup}} The preparation of an ultracold $^{87}$Rb atomic sample for our experiment starts with loading a magneto-optical trap (MOT) from a Zeeman-slowed atomic beam, followed by further molasses cooling of the atomic cloud. Subsequently, a guiding magnetic field of approximately 3.5 Gauss along the vertical direction pointing downwards, as shown in Fig.~\ref{ExperimentSetup}(b), is switched on to define the quantization axis, and the atoms in the molasses are optically pumped into $|5s_{1/2}, F=2, m_F=2\rangle$ state for experiment. The population in $|5s_{1/2}, F=2, m_F=2\rangle$ state is controlled by de-pumping a certain fraction of atoms into $|5s_{1/2}, F=1\rangle$ level during this optical pumping stage. This de-pumping scheme allows varying the atomic density without changing much the atomic cloud size \cite{JDPritchardThesis}. At this stage, the atomic cloud has a temperature in the range of 28$\mu$K to 40$\mu$K. A time of flight (TOF) of 6 ms following the optical pumping results in an atomic cloud that has a $1/e^2$ radius $w_r$=2.0 - 2.2 mm in the radial direction and a $1/e^2$ radius $w_z$=1.1 - 1.2 mm in the axial direction (along the quantization axis defined by the guiding B-field). The peak atomic density of $|5s_{1/2}, F=2,m_F=2\rangle$ state, $n_0$, can be varied from $0.3 - 1.6 \times10^{10} \mathrm{cm^{-3}}$ . \begin{figure}[hpb] \includegraphics[width=8.1cm]{Figure1.pdf}% \caption{(a) The diagram of energy levels involved in the ladder scheme EIT. A probe light of $\sigma^+$ polarization drives the transition from $|5s_{1/2}, F=2, m_F=2\rangle$ ($|g\rangle$) to $|5p_{3/2}, F=3, m_F=3\rangle$ ($|e\rangle$), while a coupling light of $\sigma^-$ polarization drives the transition from $|5p_{3/2}, F=3, m_F=3\rangle$ to $|27s_{1/2}, m_J=1/2, m_I = 3/2\rangle$, which is not distinguishable in energy from other hyperfine states of $|27s_{1/2}, J=1/2, m_J=1/2\rangle$ ($|r\rangle$) in our setup. The detuning of the probe (coupling) light, $\Delta_p$ ($\Delta_c$) is defined as $\Delta_p = \omega_p - \omega_e$ ($\Delta_c = \omega_c - \omega_r$), where $\omega_p$ ($\omega_c$) is the frequency of the probe (coupling) light and $\omega_e$ ($\omega_r$) is the resonance frequency of the $\ket{e}\leftrightarrow\ket{g}$ ($\ket{r}\leftrightarrow\ket{e}$) transition. (b) The schematics of the optical setup for EIT beams. The magnetic field $\vec{B}$ along the vertical direction is pointing from top to bottom. The probe beam and the coupling beam are counter-propagating along the quantization axis, which is also the axial axis of the atomic cloud (indicated as a solid ellipse). After passing through the atomic cloud, the probe beam is separated from the coupling beam by a dichroic mirror and goes through the rest of optical imaging system to be imaged onto an electron multiplying charge coupled device (EMCCD camera). The lens shown here has a focal length of 160 mm. The dimensions are not to scale, but only indicate their relative shapes and positions. \label{ExperimentSetup}} \end{figure} \begin{figure}[hpb] \includegraphics[width=8.1cm]{Figure2.pdf}% \caption{Images of the probe light from (a) experiment and (b) simulation. The probe detuning $\Delta_p$ for each set of images is given on the left side. The images in (a) are taken under the experimental conditions of $w_z$=1.1$\pm 0.1$ mm, $n_0$ = ($0.59\pm0.06)\times10^{10} \mathrm{cm^{-3}}$, $\Omega_{p0}/\Gamma_{e}=0.16\pm0.01$, $w_c$ = $49\pm1$ $\mathrm{\mu m}$, $\Delta_c/\Gamma_{e} = 0\pm0.05$, $\Omega_{c0}/\Gamma_{e}=1.98\pm0.05$. Each image in (a) is an average of 5 experimental shots. The same experimental conditions are also used as the inputs for solving the Maxwell-Bloch equations to generate the simulated images in (b), as detailed in the text. The thin dotted circles on the images of $\Delta_p/\Gamma_{e} = 0.05$ indicate the $1/e^2$ Gaussian size of the coupling beam. The color scale at the bottom right applies to all images. \label{Pic}} \end{figure} The states involved in the ladder scheme EIT are shown in Fig.~\ref{ExperimentSetup}(a), and the schematics of the optical setup for the EIT beams is shown in Fig.~\ref{ExperimentSetup}(b). The 780 nm laser beam for driving the $|g\rangle \rightarrow |e\rangle$ probe transition is generated from a Toptica DL pro diode laser, and the 480 nm laser beam for driving the $|e\rangle \rightarrow |r\rangle$ coupling transition is generated by a Toptica TA-SHG frequency-doubled diode laser system. Both the 780 nm laser and the 480 nm laser (via the fundamental light at 960 nm) are frequency locked to the same high-finesse Fabry-Perot cavity by Pound-Drever-Hall technique, which yields a linewidth of $\lesssim$ 30 kHz for the 780 nm laser and $\lesssim$ 60 kHz for the 480 nm laser. As illustrated in Fig.~\ref{ExperimentSetup}(b), the probe beam passing through the atomic cloud has a collimated $1/e^2$ radius $w_p$ of 3.45 mm, while the coupling beam is focused at the center of the atomic cloud with a $1/e^2$ radius $w_c$ in the range of 30 - 50 $\mu$m. When the incoming probe beam Rabi frequency $\Omega_{p0}$ is much smaller than the peak Rabi frequency of the coupling beam $\Omega_{c0}$, $\Omega_{p0} \ll \Omega_{c0}$, the coupling beam opens up a transparency window for the probe light to propagate through the otherwise opaque atomic cloud at the frequency around the probe transition resonance. It also induces a large index gradient along its transverse direction and results in a lensing effect. The intensity distribution of the probe beam at the exit of the atomic cloud, 1.1 mm below the center of the cloud, is directly imaged on the EMCCD camera through a diffraction limited optical system. In each experimental cycle, the atomic cloud is prepared in $|5s_{1/2}, F=2,m_F=2\rangle$ state as described above, and the probe and coupling beams are turned on simultaneously for 15 $\mu$s during which the camera is exposed to take the image of the transmitted probe beam. To obtain an EIT transmission spectrum, the probe detuning $\Delta_p$ is varied from shot to shot to scan through the probe resonance while the coupling beam detuning $\Delta_c$ is fixed throughout. Shown in Fig.~\ref{Pic}(a) are a set of sample images of the transmitted probe light taken at different probe detunings $\Delta_p$, and the detailed description and discussion on the images and the spectra extracted from them are given in the next section. \section{Results and discussion \label{results}} \begin{figure}[hpb] \includegraphics[width=8.1cm]{Figure3.pdf}% \caption{Transmission spectra of the transmitted probe light for different atomic densities and different coupling beam sizes. The black squares with error bar are experimental data, and the red lines are results of simulation that has only experimental parameters as input (please see the text). The spectra are taken at the conditions of (a) $w_z$=1.2$\pm 0.1$ mm, $n_0$ = ($1.40\pm0.15)\times10^{10} \mathrm{cm^{-3}}$, $w_c$ = 49 $\pm$ 1 $\mathrm{\mu m}$, $\Delta_c/\Gamma_{e}$ = $0.16\pm0.05$, $\Omega_{c0}/\Gamma_{e} =1.98\pm0.05$; (b) $w_z$=1.1$\pm 0.1$ mm, $n_0$ = $(0.59\pm0.06)\times10^{10} \mathrm{cm^{-3}}$, $w_c$ = 49 $\pm$ 1 $\mathrm{\mu m}$, $\Delta_c/\Gamma_{e} = 0\pm0.05$, $\Omega_{c0}/\Gamma_{e} = 1.98\pm0.05$; (c) $w_z$=1.1$\pm 0.1$ mm, $n_0$ = $(0.69\pm0.07)\times10^{10} \mathrm{cm^{-3}}$, $w_c$ = 34 $\pm$ 1 $\mathrm{\mu m}$, $\Delta_c/\Gamma_{e} = 0\pm0.05$, $\Omega_{c0}/\Gamma_{e}=3.18\pm0.05$. All three spectra are taken with $\Omega_{p0}/\Gamma_{e}=0.16\pm0.01$.\label{spectra}} \end{figure} While different models have been developed to give accurate descriptions of the spatial effects of inhomogeneous EIT media on the propagation of the probe light \cite{moseley1996electromagnetically,manassah1996induced,zhou2007deflection,zhang2009birefringence}, the essential physics can be qualitatively captured in the following argument. In EIT, the linear susceptibility for the probe light is given by \begin{equation} \chi^{(1)} \left ( \vec r \right) =-i \frac{n_{at}\left ( \vec r \right) \Gamma_{e} \sigma_0 \lambda}{ 4 \pi \left(\gamma _{ge}-i \Delta _p+\frac{\Omega _c \left ( \vec r \right) ^2}{4 (\gamma _{gr}-i (\Delta _c+\Delta _p))}\right)} \label{susceptibility}, \end{equation} where $\lambda$ is the wavelength of the probe transition, $\sigma_0=3 \lambda^2 /2 \pi$ the resonant cross-section of the probe transition, $\Gamma_{e}=2\pi\times6.067$ MHz the decay rate of intermediate state $|e\rangle$, $\Delta _p$ and $\Delta _c$ the detunings of probe and coupling lights as defined earlier, and finally $\gamma _{ge}\approx\Gamma_e/2$ and $\gamma _{gr}=(\Gamma_r+\gamma _p+\gamma_c)/2+\gamma_D$ the decay rates of atomic coherences. Here, $\Gamma_r \sim 2\pi \times 10$ kHz \cite{branden2010radiative} is the decay rate of the upper state $|r\rangle$, $\gamma_{p} ( \gamma_{c} )$ is the linewidth of the probe (coupling) laser, and $\gamma_D$ is the dephasing rate from all other sources. The refractive index is related to the linear susceptibility $\chi^{(1)} \left ( \vec r \right)$ via the expression \begin{equation} n(\vec r) \approx \left( 1+\frac{1}{2} \Re \left(\chi^{(1)} \left( \vec r \right) \right) \right)\label{refractiveindex}. \end{equation} Seen from Eqs.\eqref{susceptibility} and \eqref{refractiveindex}, the inhomogeneity in atomic density $n_{at}\left( \vec r \right)$ and Rabi frequency of coupling transition $\Omega_c \left( \vec r \right)$ can give rise to non-zero gradient in the refractive index, which results in the deflection of the probe light wave vector as it travels through such medium. For large $\Omega_c \left( \vec r \right)$, negligible $\gamma_{gr}$ and $\Delta_c\sim0$, the sign of the probe light detuning $\Delta_p$ decides the direction of the deflection either along or against the gradient of the refractive index. In our experimental configuration, the atomic density $n_{at}(\vec r)$ along the radial direction of the transparency window is constant. On the other hand, the rapid change of the coupling Rabi frequency $\Omega_c(\vec r)$ due to the Gaussian intensity profile gives rise to a large gradient in the refractive index $n(\vec r)$. The probe light passing through this transparency window experiences lensing effects due to the high gradient of the refractive index, as can be seen in Fig.~\ref{Pic}. The images in Fig.~\ref{Pic}(a) are acquired with the conditions detailed in the figure caption and from top to bottom, the probe detuning is varied from red to blue. The field of view of each image is centered around the coupling beam and is much smaller than the atomic cloud and the probe beam. The spot in the middle of each image is the transmitted probe light through EIT area while the uniform background indicates the absorption level of probe light by the atomic cloud with absence of the coupling light. It can be clearly seen that the intensity of the transmitted probe light at the red probe detuning $\Delta_p / \Gamma_e = -0.28$ is enhanced while the intensity on the blue side with a detuning $\Delta_p / \Gamma_e = 0.30$ is reduced, compared with the incoming probe intensity, which is about the same as the intensity of the transmitted probe beam on resonance ($\Delta_p / \Gamma_e = 0.05$ in Fig.~\ref{Pic}). Moveover, the spot size of the red-detuned probe light is smaller than that of the blue-detuned with a similar $|\Delta_p|$. Both the intensity and the size indicate the focusing of the red-detuned probe light and the defocusing of the blue-detuned one, since, if not due to the lensing effect, the transmitted spots would have similar intensity and size at detunings symmetric with respect to the resonance. It should be noted that the dark ring around the bright transmitted spots is not due to the lensing effect. Instead, it comes from the spatially varying coupling Rabi frequency as a result of the Gaussian intensity profile of the coupling beam. This spatial dependent Rabi frequency gives rise to a larger Autler-Townes splitting at the center of the coupling beam and smaller ones towards the edge of the beam. Consequently, the transmitted spot of on-resonance probe light ($\Delta_p / \Gamma_e = 0.05$ in Fig.~\ref{Pic}) has the largest size, since there is no Autler-Townes enhanced absorption throughout the whole EIT area, whereas the transmitted spots of off-resonance probe light have smaller sizes with surrounding dark rings due to enhanced absorption at Autler-Townes splitting frequencies. Because of this change of transmitted spot size vs. detuning, the focusing (defocusing) of the red (blue)- detuned probe light cannot be defined relative to the transmitted probe beam on resonance, but should rather be defined relative to the transmitted probe beam at that particular detuning with no lensing effect \footnote{The transmitted probe beam with no lensing effect can be simulated by removing the transverse gradient term of Eq.\eqref{maxS} in the appendix.}. In the experimental observation of Fig.~\ref{Pic} where the lensing effect is present, this can only be acknowledged by comparing the transmitted probe beam size and intensity at the detunings symmetric with respect to the resonance. In order to obtain EIT transmission spectra, the transmission of the probe light is extracted by taking the ratio between the probe intensity at the center of such images ($I$) and that of the incoming probe beam without the atomic cloud ($I_0$). The transmission spectra shown in Fig.~\ref{spectra} are generated by plotting the probe transmission ($I/I_0$) as a function of the probe detunings $\Delta_p$ for atomic densities and coupling beam sizes detailed in the figure caption. As expected, there is a transparency spectral window near the probe resonance due to the coherent interaction between the coupling light and atoms, and the two absorption peaks are from the Autler-Townes splitting. The lensing effect within this transparency spectral range can be clearly seen from the greatly enhanced transmission at the red detuning $\Delta_p <$ 0 and the somewhat reduced transmission at the blue detuning $\Delta_p >$ 0. This enhanced transmission at the red detuning highly depends on the atomic density $n_{0}$ and the coupling beam size $w_c$. With the same coupling beam size $w_c$ and the same peak Rabi frequency $\Omega_{c0}$, the atomic cloud with a higher density in Fig.~\ref{spectra}(a) focuses the probe light more than that with a lower density in Fig.~\ref{spectra}(b). Moreover, if the atomic density is about the same, but the coupling beam size $w_c$ is focused down further (the peak Rabi frequency $\Omega_{c0}$ is consequently larger), the focusing of the probe light is greatly enhanced, as shown in Fig.~\ref{spectra}(b) and (c). \begin{figure}[hpb] \includegraphics[width=7.5cm]{Figure4.pdf}% \caption{Transmission spectrum of the transmitted probe light through a thin single-beam ODT released atomic sample with the $1/e^2$ radius $w_z = 55.0\pm0.5$ $\mathrm{\mu m}$ (along propagation direction of EIT beams) and the atomic density $n_{0}$ = $(3.30\pm0.03)\times10^{10} \mathrm{cm^{-3}}$. The black squares with error bar are experimental data, and the red line is a one-dimensional fit from the formula $T(r=0,\Delta_p)$ given in the text.\label{ODTSpectra}} \end{figure} The lensing effect also critically depends on the size of the atomic cloud. In order to verify this, the same experiment is performed with an atomic cloud released from a very thin single-beam optical dipole trap (ODT). The ODT is horizontally positioned at 1.1 mm below the center of the molasses atomic cloud in the previous experiment, that is, at the object plane of the camera. The $1/e^2$ radius of this atomic cloud, which is along the propagation direction of the EIT beams, is $w_z=55.0\pm0.5$ $\mu$m (20 times smaller than that of the molasses atomic cloud). As shown in Fig.~\ref{ODTSpectra}, the spectrum of the Autler-Townes splitting is observed without any obvious lensing effect, in stark contrast to what is being observed in the molasses atomic cloud. To understand our experimental results on lensing effect more quantitatively, we model our experimental system with a set of coupled Maxwell-Bloch equations as described in detail in the appendix. The inputs are the experimentally calibrated parameters including: a) the atomic density $n_{0}$ and the atomic cloud size $w_z$; b) the peak Rabi frequency $\Omega_{c0}$ and the waist $w_c$ of the coupling light; c) the initial Rabi frequency of the probe light $\Omega_{p0}$; d) the decay rate of atomic coherence $\gamma_{gr}$. The atomic density $n_{0}$ and atomic cloud size $w_z$ are well known from measurement with absorption imaging. The peak Rabi frequency $\Omega_{c0}$ and waist $w_c$ are extracted from a two-dimensional fit of images taken in the experiment performed on the thin atomic cloud released from the ODT. The transmission formula used for fitting is $T(r,\Delta_p)= \mathrm{exp}\left(-k\int_{-\infty}^{+\infty} \mathrm{Im}[\chi^{(1)}(r,z,\Delta_p)]dz\right)$, where $r$ and $z$ stand for the radial and axial coordinates respectively, $k=2\pi/\lambda$, and $\chi^{(1)}$ is defined in Eq.\eqref{susceptibility}. These parameters are given in the caption of Figs.~\ref{Pic} and~\ref{spectra}. The dacay rate of atomic coherence $\gamma_{gr}$ is obtained from fitting the probe beam transmission vs. $\Omega_{p0}$ at the center of the transmitted beam (with $\Omega_{c0}$) and $\Delta_p = 0$. This measurement yields the value of $\gamma_{gr}$ in the range of 50 - 150 kHz depending on atomic density, which is consistent with the evaluation of $\gamma_{gr}$ from various dephasing mechanisms in our experiment. $\gamma_{gr}$ used in the simulation is set to be 100 kHz since we find the results of the simulation are not very sensitive to $\gamma_{gr}$ in the range of 50 - 150 kHz. The simulated images of the probe light intensity at the exit of the atomic cloud are shown in Fig.~\ref{Pic}(b), along the side of the experimental images taken with the same parameters. The spectra from simulation are plotted together with experimental data in Fig.~\ref{spectra}. The experimental and theoretical results show excellent agreement, which confirms the good control in our experiment and lays a solid foundation for further pursuing the experimental investigation of the interaction between Rydberg excitations using IEAI in our system. \section{Summary \label{summary}} In summary, we have observed the lensing effect on the probe light in electromagnetically induced transparency involving a Rydberg state by directly imaging the probe beam passing through a laser-cooled atomic cloud. With the atomic cloud of only moderate optical depth, the transmitted probe light is strongly focused at a frequency red detuned from the probe resonance, and has a peak intensity a few times that of the input probe light. This study is important for imaging Rydberg excitations via interaction enhanced absorption imaging based on Rydberg EIT. It is also highly relevant in studying non-linearity of cold interacting Rydberg ensembles as the probe intensity determines the strength of interaction between Rydberg polaritons \cite{firstenberg2013attractive,bienias2014scattering}. It will be interesting to investigate how such lensing effect is modified by the interaction between Rydberg atoms, which will be significant when a Rydberg state of high principal quantum number is used. Combining dispersive non-linearities and focusing, one may imagine creating a one-dimensional gas of Rydberg atoms. It may be also possible to tune the interaction between Rydberg atoms in order to switch from focusing to defocusing lensing effect. \begin{acknowledgments} The authors thank Thi Ha Kyaw, Nitish Chandra, and Armin Kekic for the early preparation of experimental setups, and acknowledge the support from the Ministry of Education and the National Research Foundation, Singapore. This work is partly supported through the Academic Research Fund, Project No. MOE2015-T2-1-085. \end{acknowledgments}
1,108,101,565,852
arxiv
\section{Introduction} Given a real number $\xi$, the irrationality exponent of $\xi$ is defined as follows $$ \mu(\xi):=\sup\left\{\mu\in\mathbb{R}\;:\;\left| \xi - \frac{p}{q}\right| < q^{-\mu}\;\mbox{has i.m. solutions }\; p/q\in\mathbb{Q}\right\}. $$ This is one of the most important approximational properties of a number, which indicates, how well it can be approximated by rationals in terms of their denominators. In recent years there was a lot of interest in understanding the irrationality exponents of Mahler numbers. By Mahler numbers we understand the values of Mahler functions at integer points. The Mahler functions are in turn analytical functions $f\in\mathbb{Q}((z^{-1}))$ which for any $z$ inside their disc of convergence satisfy the equation of the form $$ \sum_{i=0}^n P_i(z)f(z^{d^i})=Q(z) $$ where $n\ge 1, d\ge 2$ are integer, $P_0,\ldots, P_n, Q\in \mathbb{Q}[z]$ and $P_0P_n\neq 0$. One of the first results in this direction was achieved in 2011 by Bugeaud~\cite{bugeaud_2011}. He showed that the irrationality exponent of the Thue-Morse numbers equals two. These are the numbers of the form $f_{tm}(b):=\sum_{n=0}^\infty \frac{t_n}{b^n}$ where $b$ is integer and $t_n$ is the famous Thue-Morse sequence in $\{0,1\}$ which is recurrently defined as follows: $t_0:=0, t_{2n}:=t_n$ and $t_{2n+1} = 1-t_{2n}$. One can easily verify that the function $z^{-1}(1 - 2f_{tm}(z))$ satisfies the Mahler equation $$ f_{tm}(z) = (z-1)f_{tm}(z^2). $$ For more results of this type, see~\cite{adamczewski_rivoal_2009}, \cite{coons_2013}, \cite{gww_2014}, \cite{badziahin_2017}. In~\cite{bugeaud_han_wen_yao_2015}, the authors provide a non-trivial upper bound for the irrationality exponent of $f(b)$ where the Mahler functions satisfy $$ Q(z) = P_0(z) f(z) + P_1(z)f(z^d). $$ Their bound is quite general and in many cases it is sharp. But that result is often hard to apply because it requires the knowledge about the distribution of non-zero Hankel determinants of $f(z)$, which are not easy to compute. Later Badziahin~\cite{badziahin_2019} provided the precise formula for $\mu(f(b))$ for a slightly narrower set of Mahler functions: \begin{theoremd} Let $f(z)\in \mathbb{Q}((z^{-1}))\setminus \mathbb{Q}(z)$ be a solution of the functional equation $$ f(z) = \frac{A(z)}{B(z)} f(z^d),\qquad A,B\in\mathbb{Q}[z],\; B\neq 0,\quad d\in\mathbb{Z},\; d\ge 2. $$ Let $b\in \mathbb{Z}, |b|\ge 2$ be inside the disc of convergence of $f$ such that $A(b^{d^m})B(b^{d^m}) \neq 0$ for all $m\in\mathbb{Z}_{\ge 0}$. Then \begin{equation}\label{th1_eq} \mu(f(b)) = 1 + \limsup_{k\to\infty}\frac{d_{k+1}}{d_k}. \end{equation} \end{theoremd} Here, $d_k$ is the degree of the denominator of the $k$'th convergent of $f(z)$. We discuss these notions in Section~\ref{sec_hankel}. The upshot is that the irrationality measure of $f(b)$, given by~\eqref{th1_eq} is completely determined by the sequence $d_k$. However determining this sequence for a precise Mahler function $f(z)$ may be problematic. In 2017, the first author~\cite{badziahin_2017} verified that $d_k= k$ for all functions $g_u(z)$ which satisfy $g_u(z) = (z+u)g_u(z^2)$, $u\in\mathbb{Q}$ and $u\neq 0,1$. Equivalently, such functions can be written as the infinite products $$ g_u(z) = z^{-1}\prod_{t=0}^\infty (1+uz^{-2^t}). $$ Notice that $g_0(z)$ and $g_1(z)$ are rational functions. Therefore we now have a complete understanding of irrationality exponents of $g_u(b)$. The next natural case to investigate are the following infinite products: $$ g_{u,v}(z) = z^{-1} \prod_{t=0}^\infty (1+uz^{-3^t} + vz^{-2\cdot 3^t}),\quad u,v\in\mathbb{Q}. $$ In~\cite{badziahin_2017}, Badziahin started the investigation of sequences $d_k$ for various pairs $(u,v)$ of integer numbers. It was shown that $\limsup_{k\to\infty} d_{k+1}/d_k > 1$ for: \begin{enumerate} \item $(u,v) = (\pm u, u^2)$, $u\in\mathbb{Q}$; \item $(u,v) = (\pm s^3, -s^2(s^2+1))$, $s\in\mathbb{Q}$; \item $(u,v) = (\pm 2,1)$. \end{enumerate} Later, it was shown \cite{badziahin_2019} that in the first two cases the value $\limsup_{k\to\infty} d_{k+1}/d_k$ is equal to two while in the third case it is $\frac75$. It is shown in the same paper that for functions $g_{u,v}(z)$ the condition $\limsup_{k\to\infty} d_{k+1}/d_k=1$ is equivalent to $\forall k\in\mathbb{N}$, $d_k = k$. It is conjectured \cite[Conjecture A]{badziahin_2017} that $d_k = k$ for all $k\in\mathbb{Z}$ for the other pairs $(u,v)\in\mathbb{Z}^2$. This conjecture is verified\cite{badziahin_2017} for large sets of pairs $(u,v)$. In particular it is true if $u=0$ or $v=0$ and also for the region $$ \big\{(u,v)\in\mathbb{Q}^2\;:\; u^2\ge 6,\; v\ge \max\{3u^2-1, 2u^2+8\}\big\}. $$ Some local conditions on $u$ and $v$ modulo 3, ensuring $d_k=k$ are also provided in~\cite{bugeaud_han_wen_yao_2015}. The purpose of this note is to cover as many other pairs $(u,v)\in\mathbb{Z}^2$ as possible and hence make a contribution to the conjecture above. The main result of this paper provides a number of local conditions on $(u,v)$ modulo any prime $p\ge 3$ which ensure that $d_k=k$ for all $k$. In particular, for $p=3$ they coincide with those from~\cite{bugeaud_han_wen_yao_2015}. \begin{theorem}\label{th1} Let $p\geq 3$ be prime and $(u,v)\in \mathbb{Z}^2$ satisfy one of the properties \begin{gather} u^2 \equiv 3,\quad v \equiv 1 \pmod p; \label{case1}\\ u^2 \equiv -3,\quad v \equiv -1 \pmod p; \label{case2}\\ u \equiv \pm \varphi,\; v \equiv 0 \pmod p,\quad \mbox{where } \varphi^2 + \varphi + 1 \equiv 0\pmod p; \label{case3}\\ u \equiv \pm \varphi,\; v \equiv -1 \pmod p,\quad \mbox{where } \varphi^4 + 4\varphi^2 + 1 \equiv 0\pmod p; \label{case4}\\ u \equiv \pm \varphi,\; v \equiv \delta \pmod p,\quad \mbox{where } \delta^2 - \delta + 1 \equiv 0,\; \varphi^2 \equiv 2\delta \pmod p; \label{case5}\\ u \equiv 0,\; v \equiv \pm \delta \pmod p,\quad \mbox{where } \delta^2 + \delta + 1 \equiv 0\pmod p; \label{case6}\\ u=\pm 2\delta^2, v = \delta\;(\mathrm{mod}\; p), \; \mbox{where } \delta^2+\delta+1 = 0, \; p\neq 3. \label{case7} \end{gather} Then all the partial quotients of the continued fraction for the Mahler function $g_{u,v}(z)$ are linear. \end{theorem} Theorem~\ref{th1} together with~{\bf (B)} imply the following \begin{corollary} Let $p\ge 3$ be prime and $(u,v)\in \mathbb{Z}^2$ satisfy one of the properties~2 -- 8 of Theorem~\ref{th1}. Then for any integer $b$ such that $|b|\ge 2$ and $g_{u,v}(b)\neq 0$ one has $$ \mu(g_{u,v}(b)) = 2. $$ \end{corollary} \section{Continued Fractions of Laurent Series}\label{sec_hankel} Let $\mathbb{F}$ be a field. Consider the set $\mathbb{F}[[z^{-1}]]$ of Laurent series together with the valuation: $||\sum_{k=-d}^\infty c_kz^{-k}|| = d$, the biggest degree $d$ of $x$ having non-zero coefficient $c_{-d}$. For example, for polynomials $f(z)$ the valuation $||f(z)||$ coincides with their degree. It is well known that in this setting the notion of continued fraction is well defined. In other words, every $f(z)\in \mathbb{F}[[z^{-1}]]$ can be written as $$ f(z) = [a_0(z), a_1(z),a_2(z),\ldots] = a_0(z) + \frac{1}{a_1(z) + \frac{1}{a_2(z) + \cdots}} , $$ where the $a_i(z)$ are non-zero polynomials of degree at least 1, $i\in\mathbb{Z}_{\ge 0}$. We refer the reader to a nice survey~\cite{poorten_1998} for more properties of the continued fractions of Laurent series. It will be more convenient for us to renormalise this continued fraction to the form \begin{equation} f(z) = a_0(z) + \frac{\beta_1}{a_1(z) + \frac{\beta_2}{a_2(z) + \frac{\beta_3}{a_3(z) + \cdots}}} =: a_0(z) + \operatornamewithlimits{K}_{i=1}^\infty \frac{\beta_i}{a_i(z)} \end{equation} where $\beta_i\in\mathbb{F}\setminus\{0\}$ are constants and $a_i(z)\in \mathbb{F}[z]$ are monic polynomials for $i\ge 1$. By analogy with the classical continued fractions over $\mathbb{R}$, by $k$'th convergent of $f$ we denote the rational function $$ \frac{p_k(z)}{q_k(z)}:= a_0(z) + \operatornamewithlimits{K}_{i=1}^k \frac{\beta_i}{a_i(z)}. $$ They satisfy the following recurrent relation \begin{equation}\label{convp} p_0(z) = a_0(z), \quad p_1(z) = a_0(z)a_1(z) + \beta_1,\quad p_n(z) = a_n(z)p_{n-1}(z) + \beta_np_{n-2}(z), \end{equation} \begin{equation}\label{convq} q_0(z) = 1, \quad q_1(z) = a_1(z),\quad q_n(z) = a_n(z)q_{n-1}(z) + \beta_nq_{n-2}(z). \end{equation} By $d_k$, we denote the degree (or the valuation) of the denominater $q_k(z)$ of $k$'th convergent of $f(z)$. By analogy with the classical case of real numbers, we call a series $f(z)\in\mathbb{F}[[z^{-1}]]$ badly approximable if there exists an absolute constant $M$ such that $\forall k\in\mathbb{N}$, $||a_k(z)||\le M$. Formulae~\eqref{convp},~\eqref{convq} suggest that $||a_k(z)|| = d_{k} - d_{k-1}$ therefore an equivalent condition for badly approximable series is $d_k - d_{k-1} \le M$. Coming back to the series $g_{u,v}(z)$, it is known (see~\cite[Proposition 1]{badziahin_2017}) that $g_{u,v}(z)$ is badly approximable if and only if $d_k=k$ for all positive integer $k$. Now the main tool in the proof of Theorem~\ref{th1} is the following criterion~\cite[Theorems 2,3]{badziahin_2019} which ensures that condition for $g_{u,v}(z)$. \begin{theoremB} Let $\mathbf{u}=(u,v)\in\mathbb{Q}^2$. Let the sequences $\alpha_i$ and $\beta_i$ of rational numbers be computed by the recurrent formulae \begin{equation}\label{init_d3} \begin{array}{lll} \alpha_1 = -u,&\displaystyle \alpha_2 = \frac{u(2v-1-u^2)}{v-u^2},&\displaystyle\alpha_3 = \frac{-u(v-1)}{v-u^2};\\[2ex] \beta_1 = 1,& \beta_2 = u^2-v,&\displaystyle\beta_3 = \frac{u^2+u^4+v^3 - 3u^2v}{(v-u^2)^2}. \end{array} \end{equation} and \begin{equation}\label{recur_d3} \begin{array}{l} \displaystyle \alpha_{3k+4} = -u,\quad \beta_{3k+4} = \frac{\beta_{k+2}}{\beta_{3k+3}\beta_{3k+2}};\\[1ex] \displaystyle \beta_{3k+5} = u^2 - v - \beta_{3k+4},\quad \alpha_{3k+5} = u - \frac{\alpha_{k+2}+uv-\alpha_{3k+2}\beta_{3k+4}}{\beta_{3k+5}}\\[1ex] \alpha_{3k+6} = u-\alpha_{3k+5},\quad \beta_{3k+6} = v - \alpha_{3k+5}\alpha_{3k+6}. \end{array} \end{equation} for any $k\in\mathbb{Z}_{\ge 0}$. If all algebraic operations in these formulae are valid and $\beta_i\neq 0$ for all $i\in\mathbb{Z}$ then $$ g_\mathbf{u}(z) = a_0(z) + \operatornamewithlimits{K}_{i=1}^\infty \frac{\beta_i}{a_i(z)},\qquad a_i(z) = z+\alpha_i, $$ that is, all partial quotients of $q_\mathbf{u}(z)$ are linear. \end{theoremB} \section{Proof of Theorem~\ref{th1}} The main idea of the proof is that if a pair $(u,v)$ satisfies one of the conditions~2 -- 8 of Theorem~\ref{th1} then the sequences $\alpha_i$ and $\beta_i$ from Theorem~B2 modulo $p$ follow a nice pattern and moreover $\beta_i$ never congruent to zero modulo $p$. That immediately implies that for all $i\in\mathbb{N}$, $\beta_i\neq 0$ and Theorem~B2 implies the required result. While in each of the cases~\eqref{case1} --~\eqref{case7} the pattern for sequences $\alpha_i$ and $\beta_i$ is different, the proofs are extremely similar. We will provide a detailed proof in the case~\eqref{case1} and only outline the proofs in the remaining cases~\eqref{case2} --~\eqref{case7}. \begin{lemma} If $u^2 \equiv 3 \pmod p$ and $v \equiv 1 \pmod p$ for odd prime $p$, then the sequences $(\alpha_i)_{i\in\mathbb{N}}$ and $(\beta_i)_{i\in\mathbb{N}}$ are given by the formula for all $k\ge 0$: \begin{gather} \alpha_{3k+1} \equiv -u,\; \alpha_{3k+2} + \alpha_{3k+3} \equiv u \pmod p;\\ \alpha_{9k+2} \equiv u,\; \alpha_{9k+5} \equiv \alpha_{3k+3},\; \alpha_{9k+8} \equiv 0 \pmod p;\\ \beta_1 \equiv 1,\; \beta_2 \equiv 2,\; \beta_{k+3} \equiv 1 \pmod p. \end{gather} \end{lemma} \begin{proof} To shorten the notation, in this proof we omit the $\pmod p$ as it is implied in every congruence. We use the formulae~\eqref{init_d3} and~\eqref{recur_d3} to compute the first 9 values of $\alpha_i$ and $\beta_i$: $$\alpha_1 \equiv -u,\; \alpha_2 \equiv u,\; \alpha_3 \equiv 0,\; \alpha_4 \equiv -u,\; \alpha_5 \equiv 0,$$ $$\alpha_6 \equiv u,\; \alpha_7 \equiv -u,\; \alpha_8 \equiv 0,\ \alpha_9 \equiv u,$$ $$\beta_1 \equiv 1,\; \beta_2 \equiv 2,\; \beta_i \equiv 1 \text{ for } 3\leq i\leq 9.$$ Now we prove by induction that for $k \geq 1$: $$\alpha_{9k+1} \equiv \alpha_{9k+4} \equiv \alpha_{9k+7} \equiv-u,$$ $$\alpha_{9k+2} \equiv u, \alpha_{9k+3} \equiv 0, \alpha_{9k+5} \equiv \alpha_{3k+3}, \alpha_{9k+6} \equiv \alpha_{3k+2}, \alpha_{9k+8} \equiv 0, \alpha_{9k+9} \equiv u$$ $$\beta_{i} \equiv 1, \quad 9k+1 \leq i \leq 9k+9$$ Which will give the formula we desire. Note that this is the same as equations (18) - (20), as we have just given explicit formulas for the terms defined by $\alpha_{3k+3} = u - \alpha_{3k+2}$. Suppose that the formulas hold for all $0 \leq k \leq n$. Also note that this implies that up to these values, all every pair $\alpha_{3m+2},\alpha_{3m+3}$ is either $(0,u)$ or $(u,0)$ modulo $p$. Now we prove them for $k = n+1$. First, it is obvious that $\alpha_{9(n+1)+1} \equiv \alpha_{9(n+1)+4} \equiv \alpha_{9(n+1)+7} \equiv-u$ as they are all of the form $\alpha_{3k+1}$. Second, by~\eqref{recur_d3} we have $\beta_{9(n+1)+1} \equiv \frac{\beta_{3(n+1)+1}}{\beta_{9(n+1)}\beta_{9(n+1)-1}} \equiv \frac{1}{1\cdot 1} \equiv 1$ by the induction hypothesis. Then $\beta_{9(n+1) + 2} \equiv u^2 - 1 - \beta_{9(n+1)+1} \equiv 3 - 1 - 1 \equiv 1$. Third, we compute: \begin{align*} \alpha_{9(n+1)+2} &\equiv u - \frac{\alpha_{3(n+1)+1} + u - \alpha_{9(n+1)-1}\beta_{9(n+1)+1}}{\beta_{9(n+1)+2}} \\ &\equiv u - (\alpha_{3n+4} + u - \alpha_{9n+8}) \\ &\equiv u - (-u + u - 0) \equiv u \end{align*} This then implies $\alpha_{9(n+1)+3} \equiv u - \alpha_{9(n+1)+2} \equiv 0$. Thus, $\beta_{9(n+1)+3} \equiv 1 - \alpha_{9(n+1)+2}\alpha_{9(n+1)+3} \equiv 1$. Fourth, we continue in the same way to compute $\beta_{9(n+1)+4} \equiv \frac{\beta_{3(n+1)+2}}{\beta_{9(n+1)+3}\beta_{9(n+1)+2}} \equiv \frac{1}{1\cdot 1} \equiv 1$ and $\beta_{9(n+1) + 5} \equiv u^2 - 1 - \beta_{9(n+1)+4} \equiv 1$. Now this implies: \begin{align*} \alpha_{9(n+1)+5} &\equiv u - \frac{\alpha_{3(n+1)+2} + u - \alpha_{9(n+1)+2}\beta_{9(n+1)+4}}{\beta_{9(n+1)+5}} \\ &\equiv u - (\alpha_{3(n+1)+2} + u - \alpha_{9(n+1)+2}) \\ &\equiv u - (\alpha_{3(n+1)+2} + u - u) \\ &\equiv u - \alpha_{3(n+1)+2} \equiv \alpha_{3(n+1)+3}. \end{align*} This then implies $\alpha_{9(n+1)+6} \equiv u - \alpha_{3(n+1)+3} \equiv \alpha_{3(n+1)+2}$ and $\beta_{9(n+1)+6} \equiv 1 - \alpha_{9(n+1)+2}\alpha_{9(n+1)+3} \equiv 1 - \alpha_{3(n+1)+2}\alpha_{3(n+1)+3} \equiv 1$, as by the induction hypothesis one of these are $0$ and the other is $u$. Finally we have $\beta_{9(n+1)+7} \equiv \frac{\beta_{3(n+1)+3}}{\beta_{9(n+1)+6}\beta_{9(n+1)+5}} \equiv \frac{1}{1\cdot 1} \equiv 1$ and $\beta_{9(n+1) + 8} \equiv u^2 - 1 - \beta_{9(n+1)+7} \equiv 1$. This implies: \begin{align*} \alpha_{9(n+1)+8} &\equiv u - \frac{\alpha_{3(n+1)+3} + u - \alpha_{9(n+1)+5}\beta_{9(n+1)+7}}{\beta_{9(n+1)+8}} \\ &\equiv u - (\alpha_{3(n+1)+3} + u - \alpha_{9(n+1)+5}) \\ &\equiv u - (\alpha_{3(n+1)+3} + u - \alpha_{3(n+1)+3}) \equiv 0. \end{align*} This then implies $\alpha_{9(n+1)+9} \equiv u - \alpha_{9(n+1)+8} \equiv u$ and $\beta_{9(n+1)+9} \equiv 1 - \alpha_{9(n+1)+8}\alpha_{9(n+1)+9} \equiv 1$. Thus the formula also holds for $k = n+1$, the proof by inductions completes. \end{proof} For the other cases we provide similar lemmata which can be proven in the same way by induction. We leave their proof to the interested reader. \begin{lemma} If $u^2 \equiv -3 \pmod p$ and $v \equiv -1 \pmod p$ for odd prime $p$, then the sequences $(\alpha_i)_{i\in\mathbb{N}}$ and $(\beta_i)_{i\in\mathbb{N}}$ are given by the formula for all $k \geq 0$: \begin{gather*} \alpha_{3k+1} \equiv -u,\; \alpha_{3k+2} + \alpha_{3k+3} \equiv u \pmod p;\\ \alpha_{9k+2} \equiv 0,\; \alpha_{9k+5} \equiv \alpha_{3k+2},\; \alpha_{9k+8} \equiv u \pmod p;\\ \beta_1 \equiv 1,\; \beta_2 \equiv -2,\; \beta_{k+3} \equiv -1 \pmod p. \end{gather*} \end{lemma} \begin{lemma}\label{lem3} If $u \equiv \varphi \pmod p$ and $v \equiv 0 \pmod p$ where $\varphi\in\mathbb{Z}$ satisfies $\varphi^2 + \varphi + 1 \equiv 0 \pmod p$, then the sequences $(\alpha_i)_{i\in\mathbb{N}}$ and $(\beta_i)_{i\in\mathbb{N}}$ are given by the formula for all $k\ge 0$: \begin{gather*} \alpha_{3k+1} \equiv -\varphi,\; \alpha_{3k+2} + \alpha_{3k+3} \equiv \varphi \pmod p;\\ \alpha_{9k+2} \equiv -1,\; \alpha_{9k+5} \equiv \alpha_{3k+2},\; \alpha_{9k+8} \equiv -\varphi^2 \pmod p;\\ \beta_1 \equiv 1,\; \beta_2 \equiv \varphi^2,\; \beta_{3k+3} \equiv -\varphi^2,\; \beta_{3k+4} + \beta_{3k+5} \equiv \varphi^2 \pmod p;\\ \beta_{9k+1} \equiv \beta_{3k+1},\; \beta_{9k+4}\equiv-\varphi,\; \beta_{9k+7} \equiv -1 \pmod p. \end{gather*} \end{lemma} One can easily derive from Lemma~\ref{lem3} that for $i\ge 3$ the value of $\beta_i$ is congruent to either $-1, -\varphi$ or $-\varphi^2$ modulo $p$, hence it never equals zero. \begin{lemma}\label{lem4} If $u \equiv \varphi \pmod p$ and $v \equiv -1\pmod p$ where $\varphi\in\mathbb{Z}$ satisfies $\varphi^4 + 4\varphi^2 + 1 \equiv 0 \pmod p$ and $p$ is an odd prime, then the sequences $(\alpha_i)_{i\in\mathbb{N}}$ and $(\beta_i)_{i\in\mathbb{N}}$ are given by the formula for $k\ge 0$: \begin{gather*} \alpha_{3k+1} \equiv -\varphi,\; \alpha_{3k+2} + \alpha_{3k+3} \equiv \varphi \pmod p;\\ \alpha_{9k+2} \equiv -\varphi^{-1},\; \alpha_{9k+5} \equiv \alpha_{3k+3},\; \alpha_{9k+8} \equiv \varphi+\varphi^{-1} \pmod p;\\ \beta_1 \equiv 1,\; \beta_2 \equiv \varphi^2 + 1,\; \beta_{3k+3} \equiv \varphi^{-2},\; \beta_{3k+4} + \beta_{3k+5} \equiv \varphi^2 + 1 \pmod p;\\ \beta_{9k+1} \equiv \beta_{3k+1},\; \beta_{9k+4}\equiv\varphi^2,\; \beta_{9k+7} \equiv 1 \pmod p. \end{gather*} \end{lemma} One can check that for odd prime $p$ the values $\varphi^2$ and $\varphi^2+1$ are not congruent to zero modulo $p$. One can derive from Lemma~\ref{lem4} that for $i\ge 3$ the value of $\beta_i$ is congruent to either $\varphi^{-2}, \varphi^2$ or 1 modulo $p$. Hence it never equals zero. \begin{lemma}\label{lem5} If $u \equiv \varphi \pmod p$ and $v \equiv \delta \pmod p$ where $\delta, \varphi\in \mathbb{Z}$ satisfy $\delta^2 - \delta + 1 \equiv 0 \pmod p$, $\varphi^2 \equiv 2\delta \pmod p$ and $p$ is an odd prime, then the sequences $(\alpha_i)_{i\in\mathbb{N}}$ and $(\beta_i)_{i\in\mathbb{N}}$ are given by the formula for $k\ge 0$: \begin{gather*} \alpha_{3k+1} \equiv -\varphi,\; \alpha_{3k+2} + \alpha_{3k+3} \equiv \varphi \pmod p;\\ \alpha_{9k+2} \equiv \frac{\varphi}{\delta},\; \alpha_{9k+5} \equiv \alpha_{3k+3},\; \alpha_{9k+8} \equiv \varphi\delta \pmod p;\\ \beta_1 \equiv 1,\; \beta_2 \equiv \delta,\; \beta_{3k+3} \equiv -\delta,\; \beta_{3k+4} + \beta_{3k+5} \equiv \delta \pmod p\\ \beta_{9k+1} \equiv \beta_{3k+1},\; \beta_{9k+4}\equiv-\frac{1}{\delta},\; \beta_{9k+7} \equiv 1 \pmod p. \end{gather*} \end{lemma} Lemma~\ref{lem5} implies that for all $i\ge 3$ the value of $\beta_i$ is congruent to either $-\delta, -\delta^{-1}$ or 1 modulo $p$, hence it never equals zero. \begin{lemma}\label{lem6} If $u \equiv 0 \pmod p$ and $,v \equiv \delta \pmod p$ where $\delta\in \mathbb{Z}$ satisfies $\delta^2 + \delta + 1 \equiv 0 \pmod p$, then the sequences $(\alpha_i)_{i\in\mathbb{N}}$ and $(\beta_i)_{i\in\mathbb{N}}$ are given by the formula for $k\ge 0$: \begin{gather*} \alpha_{k} \equiv 0 \pmod p;\\ \beta_1 \equiv 1,\; \beta_2 \equiv -\delta,\; \beta_{3k+3} \equiv \delta,\; \beta_{3k+4} + \beta_{3k+5} \equiv -\delta \pmod p;\\ \beta_{9k+1} \equiv \beta_{3k+1},\; \beta_{9k+4}\equiv \delta^{-1},\; \beta_{9k+7} \equiv 1 \pmod p. \end{gather*} \end{lemma} Under conditions of lemma~\ref{lem6}, for all $i\ge 3$ the value of $\beta_i$ is congruent to either $\delta, \delta^{-1}$ or 1 modulo $p$. Hence it never equals zero. \begin{lemma}\label{lem7} If $u \equiv \pm 2\delta^2 \pmod p$ and $v \equiv \delta \pmod p$ where $\delta\in \mathbb{Z}$ satisfy $\delta^2 + \delta + 1 \equiv 0 \pmod p$ and $p > 3$ is an odd prime, then the sequences $(\alpha_i)_{i\in\mathbb{N}}$ and $(\beta_i)_{i\in\mathbb{N}}$ are given by the formula for $k\ge 0$: \begin{gather} \alpha_{3k+1} \equiv -u,\; \alpha_{3k+2} + \alpha_{3k+3} \equiv u \pmod p;\label{lem7_eq1}\\ \alpha_{9k+2} \equiv -\frac{2\delta + 4}{3},\; \alpha_{9k+5} \equiv \frac{u + \alpha_{3k+2}}{3},\; \alpha_{9k+8} \equiv -\frac{4\delta + 2}{3} \pmod p;\\ \beta_1 \equiv 1,\; \beta_2 \equiv 3\delta,\; \beta_{3k+4} + \beta_{3k+5} \equiv 3\delta \pmod p\\ \beta_{9k+1} \equiv \beta_{3k+1},\; \beta_{9k+4}\equiv -\frac{3}{\delta},\; \beta_{9k+7} \equiv -3 \pmod p.\\ \beta_{9k+3} \equiv -\frac{\delta}{3},\; \beta_{9k+6}\equiv \frac{\beta_{3k+3}}{9},\; \beta_{9k+9} \equiv -\frac{\delta}{3} \pmod p\label{lem7_eq5}. \end{gather} \end{lemma} Lemma~\ref{lem7} implies that for all $i\ge 3$ the value of $\beta_i$ is congruent to either $-\frac{\delta}{3}, -3\delta^{-1}, -3$ or $\frac{\beta_{i/3+1}}{9}$ modulo $p$, the latter of which is inductively never zero. Hence none of $\beta_i$ equals zero. Since the proof of Lemma~\ref{lem7} involves the most tedious computations, compared to other lemmata, we also outline its proof here. \begin{proof} Again we omit the $\pmod p$ in each congruence for this proof. We'll also just prove it for the $u \equiv 2\delta^2$ case as the proof is essentially the same. We use the formulae~\eqref{init_d3} and~\eqref{recur_d3} to compute the first 9 values of $\alpha_i$ and $\beta_i$: $$ \alpha_1 \equiv -u,\; \alpha_2 \equiv -\frac{2\delta + 4}{3},\; \alpha_3 \equiv -\frac{4\delta + 2}{3},\; \beta_1\equiv 1,\; \beta_2\equiv 3\delta,\; \beta_3\equiv -\frac{\delta}{3} $$$$ \alpha_4 \equiv -u,\; \beta_4\equiv -\frac{3}{\delta},\; \beta_5\equiv -3,\; \alpha_5 \equiv -\frac{8\delta + 10}{9},\; \alpha_6 \equiv -\frac{10\delta + 8}{9},\; \beta_6\equiv -\frac{\delta}{27}, $$ $$ \alpha_7 \equiv -u,\; \beta_7\equiv -3,\; \beta_8\equiv -\frac{3}{\delta},\; \alpha_8 \equiv -\frac{4\delta + 2}{3},\ \alpha_9 \equiv -\frac{2\delta + 4}{3},\; \beta_9\equiv -\frac{\delta}{3}.$$ This all clearly satisfy the equations~\eqref{lem7_eq1} --~\eqref{lem7_eq5}, except for $\alpha_5$, so let us check this. $$ \frac{u+\alpha_2}{3}\equiv \frac{2\delta^2 - \frac{2\delta+4}{3}}{3}\equiv -\frac{8\delta+10}{9}\equiv \alpha_5. $$ The base case $k=0$ has been proved. Now we assume that the equations~\eqref{lem7_eq1} --~\eqref{lem7_eq5} are satisfied for $\alpha_i,\beta_i$, $1\le i\le 9k$ and verify them for $9k+1\le i\le 9k+9$. First, it is obvious that $\alpha_{9k+1} \equiv \alpha_{9k+4} \equiv \alpha_{9k+7} \equiv-u$ as they are all of the form $\alpha_{3k+4}$. Second, by~\eqref{recur_d3} we have $$\beta_{9k+1} = \frac{\beta_{3k+1}}{\beta_{9k}\beta_{9k-1}} = \frac{\beta_{3k+1}}{-\frac{\delta}{3}\cdot (3\delta - \beta_{9k-2})} = \beta_{3k+1}$$ by the induction hypothesis. Then the equation~\eqref{recur_d3} implies that $$ \beta_{9k+1}+\beta_{9k+2}\equiv \beta_{9k+4}+\beta_{9k+5}\equiv\beta_{9k+7}+\beta_{9k+8}\equiv u^2-v\equiv 3\delta. $$ In particular, this together with $\beta_{9k+1}\equiv \beta_{3k+1}$ implies that $\beta_{9k+2}\equiv \beta_{3k+2}$. Third, we compute: \begin{align*} \alpha_{9k+2} &\equiv u - \frac{\alpha_{3k+1} + uv - \alpha_{9k - 1} \beta_{9k + 1}}{\beta_{9k + 2}}\\ &\equiv 2\delta^2 - \frac{-2\delta^2 + 2 + \frac{4\delta+2}{3} \beta_{9k + 1}}{3\delta - \beta_{9k + 1}} \\ &\equiv \frac{-(2\delta+4)(3\delta - \beta_{9k+1})}{3(3\delta - \beta_{9(n+1) + 1})}\\ &\equiv -\frac{2\delta+4}{3}. \end{align*} Thus we have $\alpha_{9k+3} \equiv u - \alpha_{9k+2} \equiv -\frac{4\delta + 2}{3}$. Finally, we use the last equation in~\eqref{recur_d3} to compute $\beta_{9k+3}$: $$ \beta_{9k+3} \equiv \delta - \alpha_{9k+2}\alpha_{9k+3} \equiv \delta - \frac{(2\delta+4)(4\delta+2)}{9} \equiv \frac{\delta}{3}. $$ Fourth, we similarly continue to compute $\beta_{9k+4} \equiv \frac{\beta_{3k+2}}{\beta_{9k+3}\beta_{9k+2}} \equiv \frac{\beta_{3k+2}}{-\frac{\delta}{3}\cdot \beta_{3k+2}} \equiv -\frac{3}{\delta}$. This then implies that $\beta_{9k + 5} \equiv 3\delta + \frac{3}{\delta} \equiv -3$. Now we compute: \begin{align*} \alpha_{9k+5} &\equiv u - \frac{\alpha_{3k+2} + u \delta - \alpha_{9k + 2} \beta_{9k + 4}}{\beta_{9k + 5}}\\ &\equiv 2\delta^2 - \frac{\alpha_{3k+2} + 2 - \frac{2\delta+4}{3} \cdot \frac{3}{\delta}}{-3}\\ &\equiv \frac{2\delta^2 + \alpha_{3(n+1)+2}}{3} \equiv \frac{u + \alpha_{3(n+1)+2}}{3} \\ \end{align*} This then implies $$ \alpha_{9k+6} \equiv u -\frac{u + \alpha_{3(n+1)+2}}{3} \stackrel{\eqref{lem7_eq1}}\equiv \frac{u + \alpha_{3k+3}}{3} $$ and then we compute \begin{align*} \beta_{9k+6} &\equiv \delta - \alpha_{9k+5}\alpha_{9k+6}\\ &\equiv \delta - \frac{2\delta^2 + \alpha_{3k+2}}{3}\cdot \frac{2\delta^2 + \alpha_{3k+3}}{3} \\ &\equiv \frac{5\delta-2\delta^2(\alpha_{3k+3}+\alpha_{3k+2})-\alpha_{3k+3}\alpha_{3k+2}}{9} \\ &\stackrel{\eqref{lem7_eq1}}\equiv \frac{\delta-\alpha_{3k+3}\alpha_{3k+2}}{9}\; \stackrel{\eqref{recur_d3}}\equiv\; \frac{\beta_{3k+3}}{9}. \end{align*} We finish the proof by computing the last triple of $\alpha$'s and $\beta$'s. We verify that $\beta_{9k+7} = \frac{\beta_{3k+3}}{\beta_{9k+6}\beta_{9k+5}} \equiv -3$ and $\beta_{9k + 8} \equiv 3\delta - \beta_{9k+7} \equiv -\frac{3}{\delta} $. Then we use already known values of $\alpha_{9k+5}, \beta_{9k+7}, \beta_{9k+8}$ to compute: \begin{align*} \alpha_{9k+8} &\equiv u - \frac{\alpha_{3k+3} + u \delta - \alpha_{9k +5} \beta_{9k + 7}}{\beta_{9k + 8}}\\ &\equiv 2\delta^2 - \frac{\alpha_{3k+3} + 2 + 2\delta^2 + \alpha_{3k+2} }{-\frac{3}{\delta}}\\ &\stackrel{\eqref{lem7_eq1}}\equiv -\frac{4\delta + 2}{3}. \end{align*} This then implies $\alpha_{9k+9} \equiv u - \alpha_{9k+8} \equiv -\frac{2\delta+4}{3}$ and $$ \beta_{9k+9} \equiv \delta - \alpha_{9k+8}\alpha_{9k+9} \equiv -\frac{\delta}{3}. $$ This finishes the inductional step, thus the proof by inductions completes. \end{proof} All the cases~\eqref{case1} --~\eqref{case7} are now covered and Theorem~\ref{th1} concludes. \endproof \section{Further remarks} In view of Theorem~\ref{th1} one can ask a natural question: are~\eqref{case1} --~\eqref{case7} the only local conditions on $u,v$ which guarantee that all the partial quotients of $g_{u,v}(z)$ are linear? In attempt to answer this question, we conduct a computer search of all primes $p$ between 3 and 1000 and all pairs $(u,v)\in\mathbb{F}_p^2$. The search reveals that every pair that did not seem to ever produce a value of 0 is of the conditions~\eqref{case1} --~\eqref{case7}. These findings, while heuristic, suggest that Theorem~\ref{th1} covers all local conditions which guarantee that the series $g_{u,v}$ is badly approximable. Also, a quick search reveals that around $82\%$ integer pairs $(u,v) \in [-1000,1000]^2$ satisfy at least one of the conditions~\eqref{case1} --~\eqref{case7}. This indicates that the majority of pairs are covered by Theorem~\ref{th1}. However there are still plenty of pairs for which the conjecture is still to be verified. One of the smallest such pair is $(u,v) = (2,-2)$.
1,108,101,565,853
arxiv
\section{Introduction} \begin{figure}[t] \centering \includegraphics[width=0.5\columnwidth]{./figures/asymmetry.png} \caption{A pictorial depiction of triplet scalar $\Delta$ partial decay giving rise to common origin of asymmetries in the lepton and DM sectors. \label{fig:asymm}} \end{figure} The existence of dark matter (DM) is supported by strong gravitational evidences, {\it i.e.} from galaxy rotation curves, lensing and large scale structures. This implies that the DM particle should be electrically neutral, massive and stable on cosmological time scales. However its intrinsic properties are largely unknown and lead to physics beyond the Standard Model (SM), based on the gauge group $SU(3)_C\times SU(2)_{L}\times U(1)_Y$. On the contrary, its relic abundance, $\Omega_{\rm DM} \sim 0.23$, is well measured by the WMAP satellite~\cite{Komatsu:2010fb}. However, the mechanism that provides its relic abundance is not yet established. Another issue concerning SM is the origin of tiny amount of visible matter in the Universe which is in the form of baryons with $\Omega_{\rm b} \sim 0.04$, that could be arising from a baryon asymmetry $n_B/n_\gamma \sim 6.15 \times 10^{-10}$, as established by WMAP combined with the big-bang nucleosynthesis (BBN) measurements. In the standard cosmological picture the early Universe has gone through a period of inflation and then reheated to a temperature at least larger than the epoch of BBN. Therefore the observed baryon asymmetry of the Universe (BAU) and the DM component must have been generated in the thermal bath after reheating. If the reheating temperature is less than electroweak (EW) scale ($\mathcal{O}(100) {\rm \ GeV}$), then it is difficult to generate both DM and BAU~\cite{Kohri:2009ka}. On the other hand, if the reheating temperature is larger than the EW scale, then a handful of mechanisms are available which can give rise to required BAU, while leaving a large temperature window for creating $\Omega_{\rm DM}$ from the DM species, which is set by freeze-out, a rather independent mechanism with respect to the dynamics of generating BAU. Indeed in most of the cosmological model, the energy density of baryons and of DM are independently determined. The fact that the energy density of DM is about a factor of 5 with respect to the baryonic one could be a hint that both sectors share a common origin and the present relic density of WIMP is also generated by an asymmetry. Over the years a large number of possibilities for asymmetric DM have been proposed~\cite{Nussinov:1985xr,Barr:1990ca,Dodelson:1991iv,Kaplan:1991ah,Kuzmin:1996he,Fujii:2002aj,Oaknin:2003uv,Hooper:2004dc,Kitano:2004sv,Cosme:2005sb,Farrar:2005zd,Roszkowski:2006kw,Kaplan:2009ag,Kohri:2009yn,An:2009vq,Frandsen:2010yj,Feldstein:2010xe,An:2010kc,Cohen:2010kn,Shelton:2010ta,Davoudiasl:2010am,Gu:2010ft,Blennow:2010qp,McDonald:2010rn,Hall:2010jx,Dutta:2010va,Haba:2011uz,Falkowski:2011xh,Chun:2011cc,Kang:2011wb,Heckman:2011sw,Frandsen:2011kt,Buckley:2011kk,Iminniyaz:2011yp,MarchRussell:2011fi,DelNobile:2011je}. The most stringent constraint on asymmetric DM candidate comes from neutron stars and white dwarfs in globular cluster, which exclude asymmetric scalar DM below $16$ GeV~\cite{McDermott:2011jp,Kouvaris:2011fi}. In this paper we consider a relatively heavy asymmetric scalar doublet DM (SDDM) whose stability is provided by a remnant $Z_2$ flavour symmetry inspired by the Inert Doublet Model~\cite{Deshpande:1977rw,Ma:2006km,Barbieri:2006dq,LopezHonorez:2006gr,Hambye:2007vf}. Indeed the asymmetry in this model is rather natural: in the limit in which the number violating coupling of DM to Higgs goes to zero the $Z_2$ symmetry that protect the DM is elevated to a global $U(1)_{\rm PQ}$ Peccei-Quinn symmetry. We show that the observed relic abundance of SDDM and BAU originate naturally in a type II seesaw scenario~\cite{Magg:1980ut,Cheng:1980qt,Schechter:1980gr,Gelmini:1980re,Lazarides:1980nt,Mohapatra:1980yp}, as pictorially depicted in figure~\ref{fig:asymm}. To accomplish the unification of asymmetries we extended the SM to include a $SU(2)_L$ scalar triplet $\Delta$ and an inert scalar doublet $\chi$. The partial decay width: $\Delta \to LL$, where $L$ is the $SU(2)_L$ lepton doublet, and $\Delta \to \chi\chi$ then induce the asymmetry simultaneously in both sectors. The lepton asymmetry is then transferred to a baryon asymmetry through the sphaleron transitions. In the low energy effective theory the induced vacuum expectation value (vev) of the same Higgs triplet gives rise to sub-eV Majorana masses, as required by oscillations experiments~\cite{Fukuda:2001nk,Ahmad:2002ka,Eguchi:2003gg}, to the three active neutrinos through the lepton number violating interaction $\Delta L L + \Delta^\dagger H H$, where $H$ is the SM Higgs. Thus a triple unification of neutrino mass, asymmetric DM and baryon asymmetry of the Universe is achieved in a type-II seesaw scenario. We show that in case of a SDDM $\chi_0$, the neutral component of the inert scalar doublet $\chi$, the asymmetry in the DM sector gets washed out below EW phase transition by fast oscillations between $\chi_0$ and its complex conjugate field $\overline{\chi_0}$. This sets a limit of the mass scale of $\chi_0$ to be $M_{\chi_0} \mbox{\;\raisebox{.3ex 2 {\rm \ TeV}$, so that the DM freezes out before oscillations begin to occur. Such heavy asymmetric SDDM are quite natural to explain positron anomalies at PAMELA and FermiLAT, while suppressing non-observation of antiproton fluxes~\cite{Nezri:2009jd}. The small number violating quartic coupling $\lambda_5$ of $\chi$ (see section 2.2) to the SM Higgs naturally provides $\mathcal{O}(\rm{keV})$ mass splitting between DM particle and its excited state leading to inelastic interaction of detector nuclei and DM. Indeed by definition during inelastic scattering a DM particle that scatters off a nucleus produces a heavier state. It has been introduced by~\cite{TuckerSmith:2001hy} for reconciling the annual modulation at DAMA~\cite{Bernabei:2010mq} experiment and the null results at other experiments, {\it e.g.}~\cite{MarchRussell:2004uf,MarchRussell:2008dy,Arrenberg:2008wy,Cui:2009xq,Alves:2009nf,Finkbeiner:2009mi,Finkbeiner:2009ug,Farina:2011bh,Schwetz:2011xm}. Here we re-investigate~\cite{Arina:2009um} the compatibility of the SDDM, explaining the DAMA signal with the most recent exclusion bounds of CDMS-II~\cite{Ahmed:2010hw}, CRESST-II~\cite{Angloher:2008jj} and Xenon100~\cite{Aprile:2011ts}. The SDDM appears to be strongly constrained by the new Xenon100 data. In analogy to SDDM, we discuss a similar model where the DM candidate is given by a vector like fermionic doublet, odd under a $Z_2$ flavour symmetry, with mass ${\cal O}(100) {\rm \ GeV}$, and hence to be called fermion doublet DM (FDDM). It will arise that the asymmetric inelastic FDDM is appropriate to explain the high precision annual modulation at DAMA while satisfying the latest constraint from Xenon100 experiment. In that case the small mass splitting arises through a small Majorana mass of the dark fermion doublet given by the triplet $\Delta$. The outline of the paper is as follows. The next section presents the particle physics content of the scalar triplet model which achieve a triple unification of neutrino mass, asymmetric inert doublet (scalar and fermion) DM and the observed BAU. After briefly commenting about the neutrino mass we describe the most general scalar potential for triplet scalar, SDDM and SM Higgs. We also address the issue of generating asymmetries in case of a vector like FDDM model. In section~\ref{sec:constraints} we broadly discuss the constraints on asymmetric doublet scalar and fermion dark matter. In section~\ref{sec:lepto} the asymmetries in the baryonic and DM sector are computed and possible wash-out mechanisms are discussed. The Boltzmann equations are solved numerically using Monte-Carlo-Markov-Chain (MCMC) techniques with CP asymmetries and branching fractions as free parameters of the theory. Section~\ref{sec:inel} presents the constraints for inelastic scattering on the model parameter space from the current direct search experiments, using bayesian inference and marginalising over the velocity distribution of the DM particles. We then come to the concluding remarks in section~\ref{sec:concl}. The technical details about bayesian analysis and MCMC are left to~\ref{app1}. In~\ref{app2} we show the results of the analysis of inelastic scattering in the case of the standard maxwellian halo and fixed astrophysical parameters. \section{Scalar Triplet Model providing darko-lepto-genesis with Non-zero Neutrino Masses}\label{sec:model} It is known that the bilinear L-violating coupling ($\Delta L= 2$) of scalar triplet to lepton and Higgs leads to neutrino mass via type II seesaw. Moreover, the out-of-equilibrium decay of triplets through the same coupling can give rise to lepton asymmetry in the early Universe~\cite{Ma:1998dx,Hambye:2000ui}. Here the additional decay of scalar triplets to a pair of inert scalar doublets ($\chi$) or a pair of vector like inert fermion doublets ($\psi$) simultaneously explain the asymmetries in visible and dark sector. In our convention the scalar triplet is defined as $\Delta = (\Delta^{++}, \Delta^{+},\Delta^0)$, with hypercharge $Y=1$. \subsection{Triplet Seesaw and Non-zero Neutrino Masses} Since the lepton number is a conserved quantum number within the SM, the masses of neutrinos are exactly zero upto all orders in perturbation theory. On the other hand, oscillation experiments confirm that the neutrinos are massive, however small, and hence they mix among themselves. This non-trivial result can be minimally explained by incorporating a heavy triplet scalar $\Delta$ to the SM of particle physics. The lepton number violating ($\Delta L= 2$) interaction of $\Delta$ with SM fields is given by the Lagrangian: \begin{equation} \mathcal{L} \supset M_\Delta^2 \Delta^\dagger \Delta + \frac{1}{\sqrt{2}} \left[ \mu_H \Delta^\dagger H H + f_{\alpha \beta} \Delta L_{\alpha} L_{\beta} + {\rm h.c.} \right]\,, \label{Lag-1} \end{equation} where $H$ and $L$ are the SM Higgs and lepton doublets respectively. After the EW phase transition $\Delta $ acquires a small induced vev, given by \begin{equation} \langle \Delta \rangle = -\mu_H \frac{v^2}{\sqrt{2} M_\Delta^2}, \label{Delta-vev} \end{equation} where $v=\langle H \rangle = 246$ GeV. The vev of $\Delta$ is required to satisfy \begin{equation} \rho \equiv \frac{M_W^2}{M_Z^2 \cos^2 \theta} = \frac{1 + 2 x^2}{1+4 x^2}\approx 1\,, \end{equation} where $x=\langle \Delta \rangle/v$. The above constraint implies that $\langle \Delta \rangle < {\cal O} (1) {\rm \ GeV}$. The trilinear coupling $\Delta L L$ then give rise to Majorana mass matrix for three flavours of light neutrinos as: \begin{equation} \left( M_\nu \right)_{\alpha \beta} = \sqrt{2} f_{\alpha \beta} \langle \Delta \rangle =f_{\alpha \beta} \left( \frac{-\mu_H v^2}{M_\Delta^2} \right) \,. \end{equation} Hence for $\langle \Delta \rangle < {\cal O} (1) {\rm \ GeV}$ a wide range of allowed values of $f_{\alpha \beta}$ gives rise to required neutrino masses. For $f_{\alpha \beta} \approx {\cal O}(1)$, the required value of $\langle \Delta \rangle$ satisfying neutrino masses can be obtained by choosing $\mu_H \sim M_\Delta \sim 10^{12}$ GeV. This implies that the scale of lepton number violation is very high. However, in presence of an extra scalar triplet the lepton number violating scale can be brought down to TeV scales without finetuning, so that its dilepton signatures can be studied at LHC~\cite{Sahu:2007uh,McDonald:2007ka,Majee:2010ar}. \subsection{Inelastic SDDM in Triplet Seesaw Model}\label{sec:SDDM} We now extend the Lagrangian (\ref{Lag-1}) by including a Inert scalar doublet $\chi \equiv (\chi^+ \chi^0)^T$ and impose a $Z_2$ symmetry under which $\chi$ is odd while all other fields are even. As a result $\chi$ does not couple to SM fermions and hence serve as a candidate of DM. The interactions between $\Delta$, $\chi$ and $H$ can be given by the scalar potential: \begin{eqnarray} V(\Delta, H, \chi) &=& M_\Delta^2 \Delta^\dagger \Delta + \lambda_\Delta (\Delta^\dagger \Delta)^2 + M_H^2 H^\dagger H + \lambda_H (H^\dagger H)^2 \nonumber\\ &+& M_\chi^2 \chi^\dagger \chi + \lambda_\chi (\chi^\dagger\chi)^2 + \left[ \mu_H \Delta^\dagger H H + \mu_\chi \Delta^\dagger \chi \chi + {\rm h.c.}\right]\nonumber\\ &+& \lambda_3 |H|^2 |\chi|^2 + \lambda_4 |H^\dagger \chi|^2 + \frac{\lambda_5}{2} \left[ (H^\dagger \chi)^2 + {\rm h.c.} \right]\,, \label{scalar-potential} \end{eqnarray} where we have neglected the quartic terms involving $\Delta-H-\chi$ as those are not relevant for our discussion since the vev of $\Delta$ is small. The vacuum stability of the potential requires $\lambda_\Delta, \lambda_H, \lambda_\chi > 0$ and $\lambda_L \equiv \lambda_3 + \lambda_4 -|\lambda_5| > - 2\sqrt{\lambda_\chi \lambda_H}$. We further assume that $M_\chi^2 > 0$, so that $\chi$ does not develop any vev. This is required in order to distinguish the visible matter from DM given by the neutral component of the doublet $\chi$. Hence the true vacuum of the potential is given by: \begin{equation} \langle H \rangle = v;~~~ \langle \chi \rangle = 0~~~ {\rm and}\ \ \ \langle \Delta \rangle = u\,. \end{equation} Since $\Delta$ is heavy, its vev is small as demonstrated by Eq.~\ref{Delta-vev} and hence does not play any role in the low energy dynamics. Therefore, in what follows we neglect the dynamics of $\Delta $ in low energy phenomena. The perturbative expansion of the fields around the minimum is: \begin{equation} H = \pmatrix{0\cr\\ \frac{v + h }{\sqrt{2}}} ~~~{\rm and}~~~ \chi= \pmatrix{\chi^+\cr\\ \frac{S + i A} {\sqrt{2}}}\,. \end{equation} Thus the low energy spectrum of the theory constitutes two charged scalars $\chi^\pm$, two real scalars $h,S$ and a pseudo scalar $A$, whose masses are given by: \begin{eqnarray} M_{\chi^\pm}^2 &=& M_\chi^2 + \lambda_3 \frac{v^2}{2}\,,\nonumber\\ M_h^2 &=& 2 \lambda_H v^2 \,,\nonumber\\ M_S^2 &=& M_\chi^2 + (\lambda_3 + \lambda_4 + \lambda_5) \frac{v^2}{2}\,,\nonumber\\ M_A^2 &=& M_\chi^2 + (\lambda_3 + \lambda_4 - \lambda_5) \frac{v^2}{2}\,. \end{eqnarray} Depending on the sign of $\lambda_5$, either $S$ or $A$ constitutes the DM. Let us assume that $\lambda_5$ is negative and hence $S$ is the lightest scalar particle. The next to lightest scalar particle is then $A$. The mass splitting between them is given by \begin{equation} \Delta M^2 \equiv M_S^2 -M_A^2 = \lambda_5 v^2\,. \end{equation} From which we can deduce the coupling \begin{equation} \lambda_5 = \frac{2 M_S \delta}{v^2}\,, \end{equation} where $\delta= M_S - M_A$. This plays a key role in the direct searches of DM as we will discuss later. Notice that in the limit $\lambda_5 \to 0$ in the scalar potential (\ref{scalar-potential}), there is no mass splitting between $S$ and $A$ and the two degrees of freedom can be re-expressed as $\chi_0$ and its complex conjugate $\bar{\chi_0}$. In this limit we discuss the asymmetry between $\chi_0$ and $\bar{\chi_0}$ via the decay of the triplet $\Delta$. We then derive upper bound on DM number violating processes, namely $\chi \chi \to H^\dagger H^\dagger $ involving the coupling $\lambda_5$. The smallness of $\lambda_5$ can be attributed to the breaking of a global $U(1)_L$ symmetry under which $\chi$ carries a lepton number +1. Indeed in the absence of $\mu_H \Delta^\dagger H H$ and $\lambda_5 (H^\dagger \chi)^2+ {\rm h.c.}$ terms in the Lagrangian, there is a global $U(1)_L$ symmetry. The other parameters $\mu_H$ and $\mu_\chi$, which involves in the DM number violating processes $\chi \chi \to \Delta \to H H$ and $\chi \chi \to H \to \bar{f} f$, $f$ being the SM fermion, are not necessarily to be small as these processes are suppressed by the large mass scale of $\Delta$. \subsection{Inelastic FDDM in Triplet Seesaw Model}\label{sec:FDDM} Let us replace the inert scalar doublet $\chi $ by a vector like fermion doublet $\psi \equiv (\psi_{\rm DM}, \psi_-)$ of hypercharge $Y=-1/2$. With the same $Z_2$ symmetry, under which $\psi$ is odd, the neutral component of $\psi$ {\it i.e.} $\psi_{\rm DM}$ can be a candidate of DM. The relevant Lagrangian including the triplet scalar $\Delta$ is: \begin{equation} \fl -\mathcal{L} \supset M_\Delta^2 \Delta^\dagger \Delta + M_D \overline{\psi} \psi + \frac{1}{\sqrt{2}} \left[ \mu_H \Delta^\dagger H H + f_{\alpha \beta} \Delta L_{\alpha} L_{\beta} + g \Delta \psi \psi + {\rm h.c.} \right]\,, \label{Lag-DM} \end{equation} where $M_D \sim {\cal O}(100) {\rm \ GeV} $ is the Dirac mass of $\psi$. The bilinear DM coupling $\Delta \psi\psi$ can be re-expressed as: \begin{eqnarray} \fl \frac{1}{\sqrt{2}} g\Delta \psi \psi & \equiv & \frac{1}{\sqrt{2}} g \overline{\psi^c} i \tau_2 \Delta \psi \nonumber\\ & = & -\frac{1}{2} g \left[ \sqrt{2} (\overline{\psi_-^c} \psi_- \Delta^{++}) + (\overline{\psi_-^c} \psi_{\rm DM} + \overline{\psi_{\rm DM}^c}\psi_- )\Delta^+ - \sqrt{2} ( \overline{ \psi_{\rm DM}^c} \psi_{\rm DM} \Delta^0) \right]\,, \nonumber \\ \end{eqnarray} where we have used the matrix form of the triplet scalar: \begin{equation} \Delta = \pmatrix{ \frac{\Delta^+}{\sqrt{2}} & \Delta^{++}\cr \Delta^0 & -\frac{\Delta^+}{\sqrt{2}} }\,. \end{equation} After EW symmetry breaking the neutral component of $\Delta$ acquires an induced vev and hence give rise a small Majorana mass to $\psi$, $ m = \sqrt{2} g \langle \Delta^0 \rangle$. Therefore the Dirac spinor $\psi_{\rm DM}$ can be written as sum of two Majorana spinors $(\psi_{\rm DM})_L$ and $(\psi_{\rm DM})_R$. The Lagrangian for the DM mass becomes: \begin{eqnarray} -\mathcal{L}_{\rm DM mass} &=& M_D \left[ \overline{(\psi_{\rm DM})_L} (\psi_{\rm DM})_R + \overline{ (\psi_{\rm DM})_R} (\psi_{\rm DM})_L \right] \nonumber\\ && + m \left[ \overline{ (\psi_{\rm DM})_L^c} (\psi_{\rm DM})_L + \overline{ (\psi_{\rm DM})_R^c} (\psi_{\rm DM})_R \right] \,. \end{eqnarray} This implies there is a $2\times 2$ mass matrix for the DM in the basis $\{(\psi_{\rm DM})_L, (\psi_{\rm DM})_R\}$. By diagonalising it two mass eigenstates $(\psi_{\rm DM})_1$ and $(\psi_{\rm DM})_2$ arise, with masses $M_{\psi_1}=M_D -m$ and $M_{\psi_2}=M_D + m$. Thus the mass difference between the two states $\delta = 2 m \sim {\cal O} (100) {\rm \ keV}$ is required by the direct search experiments. We will come back to this issue while discussing inelastic scattering of DM with nucleons. Note that in this case the inelastic scattering of DM with nucleons ( {\it i.e.} $(\psi_{\rm DM})_1 N \to (\psi_{\rm DM})_2 N $) via SM $Z$-exchange dominates to elastic scattering, as in the case of the scalar candidate. Now we will briefly comment about the dark matter asymmetry. Similar to the decay of $\Delta \to \chi \chi $, the decay of $\Delta \to \psi \psi $ will produce an asymmetry in the dark sector. Since $\psi$ is odd under a $Z_2$ flavor symmetry, it will not couple to any other SM fields and hence the asymmetry will remain in $\psi_{\rm DM}$ for ever, namely in this case there are no strong wash-out processes. \subsection{Sub-eV Neutrino Mass versus keV Majorana mass of FDDM} Notice that in case of FDDM, the induced vev of $\Delta$ introduces two mass scales. One is the Majorana mass of neutrinos, {\it i.e.} $M_\nu = \sqrt{2} f \langle \Delta^0 \rangle \sim {\cal O}(1) {\rm \ eV}$ and other is the Majorana mass of DM, {\it i.e.} $m = \sqrt{2} g \langle \Delta^0 \rangle \sim {\cal O}(100) {\rm \ keV}$. This implies a hierarchy between the two couplings $f$ (third term in Eq.~\ref{Lag-DM}) and $g$ (fourth term in Eq.~\ref{Lag-DM}) of the order of ${\cal O}(10^5)$ in order to explain the triple unification of neutrino mass, asymmetric DM and BAU. \section{Constraints on Asymmetric Inert Doublet (Scalar and Fermion) DM}\label{sec:constraints} \subsection{Constraints on SDDM from Oscillation} In case of SDDM the two states $\chi_0$ and its complex conjugate $\bar{\chi}_0$ can be written in terms of the mass eigenstates $S$ and $A$: \begin{eqnarray} | \chi_0 \rangle &=& \frac{1}{\sqrt{2}} (S + i A)\,, \nonumber\\ | \bar{\chi}_0 \rangle &=& \frac{1}{\sqrt{2}} (S - i A)\,. \label{flavor-states} \end{eqnarray} The state $| \chi_0 \rangle $ at any space-time point $(x,t)$ is given by \begin{equation} |\phi(x,t) \rangle = \frac{1}{\sqrt{2}} \left[ e^{-i(E_S t -k_S x)} | S \rangle + i e^{+i(E_A t- k_A x)} | A \rangle \right]\,, \label{wavefunction} \end{equation} where $E_S=\sqrt{k_S^2 + M_S^2 }$ and $E_A=\sqrt{k_A^2 + M_A^2 }$ are the energy of $S$ and $A$ respectively. The probability of $| \chi_0 \rangle $ oscillating into $| \bar{\chi}_0 \rangle$ is then given by \begin{equation} P_{| \chi_0 \rangle \to | \bar{\chi}_0 \rangle } = |\langle \bar{\chi}_0| \phi(x,t) \rangle |^2 \,. \end{equation} Using Eqs.~\ref{flavor-states} and~\ref{wavefunction} the probability of oscillation takes the form: \begin{equation} P_{|\chi_0 \rangle \to | \bar{\chi}_0 \rangle } = \frac{1}{4} \left[ 2 - e^{-i\left[(E_S-E_A)t - (k_A-k_S)x \right]} - e^{+i\left[(E_S-E_A)t - (k_A-k_S)x \right]} \right]\,. \label{probability} \end{equation} Above the EW phase transition there is no mass splitting between the two mass eigenstates $S$ and $A$, therefore $M_S=M_A$, $E_S=E_A$ and $k_S=k_A$. As a result from Eq.~\ref{probability} the probability of oscillation is null: \begin{equation} P_{|\chi_0 \rangle \to | \bar{\chi}_0 \rangle } = 0\,. \end{equation} Below the EW phase transition the DM number violating term $\frac{\lambda_5}{2}\left( (H^\dagger \chi)^2 + {\rm h.c.} \right)$ produce a mass splitting between the two mass eigenstates $S$ and $A$. From Eq.~\ref{probability} the probability of oscillation becomes: \begin{equation} P_{|\chi_0 \rangle \to | \bar{\chi}_0 \rangle } \simeq \frac{1}{2} \left[ 1- \cos \left(\frac{\Delta M^2 (t-t_{\rm EW})}{2 E} \right) \right]\,, \label{probability_EW} \end{equation} where we have assumed $E_S \sim E_A \sim E$, which is a good approximation for a small mass splitting. In the following we will consider a mass splitting of ${\cal O}({\rm keV})$, which implies $\lambda_5 \sim 10^{-7}$. We also normalise the time of evolution from the time of EW phase transition, so that at $t=t_{\rm EW}$, $P_{|\chi_0 \rangle \to | \bar{\chi}_0 \rangle }=0$. Below EW phase transition the time of oscillation from $\chi_0$ to $\bar{\chi}_0$ can be estimated as \begin{equation} t-t_{\rm EW} = \frac{2 E \pi}{\Delta M^2}\,. \end{equation} In the relativistic limit the energy of the DM particle $E \sim T $, where $T$ is the temperature of the thermal bath. Hence the oscillation time can be given as: \begin{equation} t-t_{\rm EW} \sim 4 \times 10^{-10} {\rm s} \left( \frac{T}{100 {\rm GeV}} \right) \left( \frac{{\rm keV}^2} {\Delta M^2} \right) \,. \end{equation} On the other hand, in the non-relativistic limit the energy of the DM particle $E \sim M_S$. Thus for $M_S \sim 100\ {\rm GeV}$, the time of oscillation is again similar to relativistic case. This implies that $\chi_0$ oscillates rapidly to $\bar{\chi}_0$. In this case if $\chi_0$ is in thermal equilibrium then during each oscillation there is a leakage of asymmetry through the annihilation channel $\chi_0 \bar{\chi}_0 \to {\rm SM\ particles}$. Alternatively to keep the generated asymmetry intact $\chi_0$ should freeze-out before it oscillate to $\bar{\chi}_0$. In other words, the mass of $\chi_0$ should be given by \begin{equation} M_{\chi_0} \gae x_f T_{\rm EW}\,, \end{equation} where $x_f \sim 20 $, which determines the epoch of freeze-out. From the above equation we see that to get an asymmetric SDDM one should have $M_{\chi_0} \gae 2\ {\rm TeV}$. \subsection{Constraints from Collider} Since the DM (scalar or fermion) is a doublet under the SM gauge group, it couples to the $Z$ boson. As a result they can change the invisible decay width of the later unless the mass of DM is greater than half of $Z$-boson mass. This gives a lower bound on the mass scale of either SDDM or FDDM to be $\mbox{\;\raisebox{.3ex 45 {\rm \ GeV}$~\cite{Lundstrom:2008ai}. \section{Developing asymmetries in the lepton and DM sectors}\label{sec:lepto} If the triplet $\Delta$ is heavy enough as required by the seesaw, then it can go out-of-equilibrium even if the gauge couplings are ${\cal O}(1)$~\cite{Ma:1998dx,Hambye:2000ui,Hambye:2003ka,Sahu:2006pf}. In such a case the out-of-equilibrium decays of $\Delta \to LL$ and $\Delta \to \chi\chi \ (\psi \psi)$ produce asymmetries in visible and dark sectors respectively. The CP asymmetry for the two sectors arise via the interference of tree level decay and self-energy correction diagrams as shown in figure~\ref{fig-2} and~\ref{fig-3} respectively, for the scalar DM case, but totally analogous for the fermionic doublet. \begin{figure}[t] \centering \includegraphics[width=0.5\columnwidth]{./figures/cpasy_vis.png} \caption{Tree level and self energy correction diagrams for the production of CP asymmetry in leptogenesis. \label{fig-2}} \end{figure} \begin{figure}[t] \centering \includegraphics[width=0.5\columnwidth]{./figures/cpasy_dm.png} \caption{Tree level and self energy correction diagrams for the production of CP asymmetry in generating asymmetric DM. \label{fig-3}} \end{figure} Considering the inert scalar doublet as a reference for the scalar potential, from Figs.~\ref{fig-2} and~\ref{fig-3} we see that the CP asymmetry requires at least two triplet scalars. Hence in presence of their interactions, the diagonal mass $M_\Delta^2$ in Eq.~\ref{scalar-potential} is replaced by: \begin{equation} \frac{1}{2} \Delta_a^\dagger \left( {\mathcal M}_+^2 \right)_{ab} \Delta_b + \frac{1}{2} (\Delta^*_a)^\dagger \left( {\mathcal M}_-^2 \right)_{ab} \Delta_b^*\,, \label{mass-matrix} \end{equation} and the trilinear couplings $\mu_H \Delta^\dagger H H + \mu_\chi \Delta^\dagger \chi \chi + {\rm h.c.}$ in the scalar potential (\ref{scalar-potential}) become: \begin{equation} \sum_{a=1,2}\mu_{aH} \Delta^\dagger H H + \mu_{a\chi} \Delta^\dagger \chi \chi + {\rm h.c.}\,. \end{equation} In Eq.~\ref{mass-matrix}, the mass matrix is given by: \begin{equation} {\mathcal M}_\pm^2 = \pmatrix{M_1^2-i C_{11} & -i C_{12}^{\pm} \cr\\ -i C_{21}^\pm & M_2^2- i C_{22} }\,, \label{mass-matrix-1} \end{equation} where \begin{equation} C_{ab}^+ = \Gamma_{ab} M_b = \frac{1}{8\pi}\left(\mu_{aH}\mu_{bH}^* + \mu_{a\chi}\mu_{b\chi}^*+ M_a M_b \sum_{\alpha\beta}f_{a\alpha\beta}^* f_{b\alpha\beta} \right)\,, \end{equation} with $C_{ab}^- = \Gamma_{ab}^* M_b$, and $C_{aa} = \Gamma_{aa} M_a$. Solving the mass matrix~\ref{mass-matrix-1} one gets two mass eigenstates $\xi^+_{1,2}=A_{1,2}^+ \Delta_1 + B_{1,2}^+ \Delta_2$ with masses $M_1$ and $M_2$. The complex conjugate of $\xi^+_{1,2}$ are given by $\xi^-_{1,2}=A_{1,2}^- \Delta_1 + B_{1,2}^- \Delta_2$. Note that $\xi^+$ and $\xi^-$ states are not CP eigenstates and hence their decay can give rise to CP asymmetry. We assume that there is no asymmetry, either in the visible sector or in the dark sector, at a temperature above the mass scale of the triplets. The asymmetries are generated in a thermal bath by the decay of these triplets. If we further assume that the mass of $\xi^\pm_{1}$ is much less than the mass of $\xi^\pm_{2}$ then the final asymmetries in visible and dark sectors will be given by the decay of $\xi^{\pm}_{1}$ as: \begin{eqnarray} \epsilon_L &=& 2 \left[ {\rm Br}(\xi_1^{-}\to \ell \ell) - {\rm Br} (\xi_1^{+} \to \ell^c \ell^c) \right] \equiv \epsilon_{\rm vis}\,, \nonumber\\ \epsilon_{\chi} &=& 2 \left[ {\rm Br}(\xi_1^{-}\to \chi_0 \chi_0) - {\rm Br} (\xi_1^{+} \to \chi_0^* \chi_0^*) \right] \equiv \epsilon_{\rm dark} \,, \end{eqnarray} where the front factor 2 takes into account of two similar particles are produced per decay. From Figs.~\ref{fig-2} and~\ref{fig-3}, the asymmetries are estimated to be: \begin{equation} \epsilon_{L} = \frac{{\rm Im} \left( \mu_{1\chi} \mu_{2\chi}^* \left[1 + \frac{\mu_{1H} \mu_{2H}^*} {\mu_{1\chi} \mu_{2\chi}^* } \right] \sum_{\alpha \beta} f_{1\alpha\beta} f^*_{2\alpha\beta} \right) } {8\pi^2 (M_2^2- M_1^2)} \left[\frac{M_1}{\Gamma_{1}} \right]\,, \label{cp_vis} \end{equation} and \begin{equation} \epsilon_{\chi} = \frac{{\rm Im} \left( \mu_{1\chi} \mu_{2\chi}^* \left[ \frac{\mu_{1H} \mu_{2H}^*} {M_1^2} + \sum_{\alpha \beta} f_{1\alpha\beta} f^*_{2\alpha\beta} \right] \right) } {8\pi^2 (M_{2}^2- M_{1}^2)} \left[\frac{M_1}{\Gamma_{1}} \right]\,, \label{cp_dark} \end{equation} where $\Gamma_1 \equiv \Gamma_{11}$. In a thermal bath these asymmetries evolve as the Universe expands and settle to a final value as soon the relevant processes go out of equilibrium, {\it i.e.} \begin{equation} \Gamma_i \equiv n_i \langle \sigma_i|v| \rangle \ll H(T)\,, \end{equation} where $H(T)$ is the Hubble scale of expansion. As a result the Yields in both sectors can be written as: \begin{eqnarray} Y_L \equiv \frac{n_L}{s} = \epsilon_L X_\xi \eta_L\,, \nonumber\\ Y_{\chi} \equiv \frac{n_\chi}{s} = \epsilon_\chi X_\xi \eta_\chi\,, \label{asymmetry_L_DM} \end{eqnarray} where $X_\xi = n_{\xi_1^-}/s \equiv n_{\xi_1^+}/s$, $s=2(\pi^2/45) g_* T^3$ is the entropy density and $\eta_{L},\eta_{\chi}$ are the efficiency factors, which take into account the depletion of asymmetries due to the number violating processes involving $\chi$, $L$ and $H$ (this holds also for the fermionic inert doublet, hence we can replace $\chi \to {\rm DM}$ label). At a temperature above the EW phase transition a part of the lepton asymmetry gets converted to the baryon asymmetry via the $SU(2)_L$ sphaleron processes. As a result the baryon asymmetry~\cite{Harvey:1990qw} is: \begin{equation} Y_B = -\frac{8 n + 4 m}{14 n + 9 m} Y_L = - \mathcal{S}_{\rm DM} Y_L\,, \label{B-asy} \end{equation} where $n$ is the number of generation and $m$ is the number of scalar doublets, leading to $\mathcal{S}_{\rm DM}=0.53,0.55$ for scalar DM and fermionic DM respectively. As introduced in Sec~\ref{sec:SDDM}, in the case of the scalar doublet DM, the asymmetry may strongly washed out, if kinematically allowed, by the DM number violating processes: $\chi \chi \to \Delta \to H H$, $\chi\chi \to H^\dagger H^\dagger$ (contact annihilation through $\lambda_5$ coupling) and $\chi \chi \to H \to \bar{f} f$. The reduced cross-section for the former process is given by: \begin{equation} \hat{\sigma} (\chi \chi \to \Delta \to H H) = \frac{1}{8 \pi} \frac{|\mu_\chi|^2 |\mu_H|^2} {(\hat{s}-M_1^2)^2}\,, \end{equation} where $\hat{s}$ is the centre of mass energy for the process: $\chi\chi \to \Delta \to HH$. Below the mass scale of the triplet this process is strongly suppressed. On the other hand in case of the contact annihilation of $\chi$'s the reduced cross-section is given by \begin{equation} \hat{\sigma}_\chi = \frac{\lambda_5^2}{32 \pi}\,. \label{reduced-cross-section} \end{equation} As a result the reaction rate is given by $\Gamma_\chi = (\gamma_\chi/n_\chi^{\rm eq})$, where the reaction density is \begin{equation} \gamma_\chi = \frac{T}{64 \pi^4} \int_{\hat{s}_{\rm min}}^{\infty} d\hat{s} \sqrt{\hat{s}} K_1\left(\frac{\sqrt{\hat{s}}}{T} \right)\hat{\sigma}_\chi\,, \label{scattering-density} \end{equation} and the equilibrium number density of $\chi$ is \begin{equation} n_\chi^{\rm eq} = \frac{g_{\rm dof} M_\chi^2 T}{2 \pi^2} K_2\left(\frac{M_\chi}{T} \right)\,, \label{eq_density} \end{equation} where $g_{\rm dof}$ is the internal degrees of freedom and $\hat{s}$ is the usual Mandelstam variable for the center of mass energy. In Eqs.~\ref{scattering-density} and~\ref{eq_density}, $K_1$ and $K_2$ are modified Bessel functions. \begin{figure}[t] \centering \includegraphics[width=0.9\columnwidth]{./figures/gamma_mod.png} \caption{The scattering rate of the process $\chi\chi \to H^\dagger H^\dagger$ for different values of $\lambda_5$ is compared with the Hubble expansion rate. For illustration purpose we have used $M_\chi= 2 {\rm \ TeV}$ and $M_1=10^{10}{\rm \ GeV}$.} \label{rate_Hubble} \end{figure} In fig.~\ref{rate_Hubble} we compare the rate of the process $\chi\chi \to H^\dagger H^\dagger$ with the Hubble rate by taking three values of $\lambda_5$. We see that for $\lambda_5 \mbox{\;\raisebox{.3ex 10^{-5}$ (blue dotted line), the scattering rate remains out-of-equilibrium through out the epoch. For larger values of $\lambda_5$ the scattering process comes to equilibrium at late epoch. At around $z\equiv M_1/T \approx 10^7$, which implies $M_\chi/T = (M_\chi/M_1) z \approx 1$, the scattering rate of the process sharply drops as it is expected and does not depend on the value of $\lambda_5$. This argument also holds in case of the process $ \chi \chi \to H \to \bar{f} f$. However, the rate of the scattering is further suppressed by the mass scale of Higgs. Returning to the general case of doublet DM, both scalar and fermion, from Eqs.~\ref{asymmetry_L_DM} and~\ref{B-asy} the DM to baryon ratio is given by: \begin{equation}\label{eq:IMP} \frac{\Omega_{\rm DM}}{\Omega_B} = \frac{1}{\mathcal{S}_{\rm DM} }\frac{m_{\rm DM}}{m_p} \frac{\epsilon_{\rm DM}}{\epsilon_L} \frac{\eta_{\rm DM}}{\eta_L}\,, \end{equation} where $m_p\sim 1{\rm \ GeV} $ is the proton mass. From this equation it is clear that the dependence of $\eta_{\rm DM}/\eta_L$ on the mass of DM goes as $1/m_{\rm DM}$. Hence for a $\mathcal{O}(100)$ GeV scale DM, the required efficiency factor for DM is two orders of magnitude less than the case of lepton provided that the CP asymmetries are equal on both sectors (see the end of section~\ref{sec:numsol}). For example, from Eqs.~\ref{cp_vis} and~\ref{cp_dark} we notice that the CP asymmetries are identically equal if: \begin{equation} \frac{\mu_{1\chi} \mu_{2\chi}^*}{M_1^2} = \sum_{\alpha,\beta} f_{1\alpha\beta} f^*_{2\alpha\beta}\,. \end{equation} In what follows, section~\ref{sec:numsol}, we solve numerically the relevant Boltzmann equations for quasi equilibrium evolution of triplet scalars, presented in section~\ref{sec:boleq}, to show that the parameter space of the theory fulfills the criteria $\Omega_{\rm DM} \sim 5\ \Omega_B$ and the observed BAU, while allowing a broad range of asymmetric DM masses. \subsection{Boltzmann equations for quasi-equilibrium evolution of triplet scalars}\label{sec:boleq} If the triplet ($\xi_1^\pm$) decay occurs in a quasi-equilibrium state then the detailed of $\eta_\chi$ and $\eta_L$ depends on the dynamics of the processes occuring in the thermal bath and can be obtained by solving the relevant Boltzmann equations~\cite{Hambye:2005tk,Chun:2006sp}. In our case the additional decay channel of the scalar triplet into DM particles is included. At first the number density of $\xi_1^\pm$ particles changes due to their decay ($\xi_1^\pm \to LL, HH, \chi\chi $ or $\psi\psi$) and gauge annihilation ($\xi_1^- \xi_1^+ \to \bar{L} L, H^\dagger H, \chi^\dagger \chi (\bar{\psi}\psi), W^\mu W_\mu, B^\mu B_\mu$), where $W^\mu$ and $B^\mu$ are the $SU(2)_L$ and $U(1)_Y$ gauge bosons respectively. If we assume that the masses of the components of the triplet are same before EW symmetry breaking then it is fairly general to use the dimensionless variables $z=M_1/T$ and $X_\xi = n_{\xi_1^-}/s \equiv n_{\xi_1^+}/s$. The Boltzmann equation for the evolution of $\xi_1^\pm$ density is then given by: \begin{equation} \frac{dX_{\xi_1}}{dz}=-\frac{\Gamma_D}{zH(z)}\left( X_{\xi_1} -X_{\xi_1}^{\rm eq} \right) - \frac{\Gamma_A}{z H(z)} \left( \frac{X_{\xi_1}^2-{X_{\xi_1}^{\rm eq}}^2}{X_{\xi_1}^{\rm eq}} \right)\,, \label{boltzman-1} \end{equation} where \begin{equation} \Gamma_D=\Gamma_1 \frac{K_1(z)}{K_2(z)}\,,\,\,\Gamma_A = \frac{\gamma_A}{n_{\xi_1}^{\rm eq}} ~~~{\rm and}~~~H(z)= \frac{ H(T=M_1)} {z^2}\,, \end{equation} and $\gamma_A$'s are the scattering densities, described in Eqs.~\ref{eq:scats}. The temperature independent decay rate of $\xi_1$ can be written as a function of the neutrino mass: \begin{equation} \Gamma_1=\frac{1}{8\pi} \frac{|m_\nu| M_1^2}{\langle H \rangle^2 \sqrt{B_L B_H}}\,, \end{equation} where $B_L$ and $B_H$ are the branching fractions in the decay channels: $\xi_1 \to L L$ and $\xi_1 \to H H$. Note that we have re-expressed the total decay rate $\Gamma_1(f_{\rm DM},f_H,f_L,M_1)$, where $f_{\rm DM} \equiv \mu_\chi/M_1$ for SDDM and $g$ for FDDM and $f_H\equiv \mu_H/M_1$, as $\Gamma_1(m_\nu, B_L, B_H, M_1)$. In the following we set $m_\nu=0.05$ eV and therefore the total decay rate depends only on three variables, namely $B_L$, $B_H$ and $M_1$. This makes a crucial decision in setting up the final asymmetry as we will show in section~\ref{sec:numsol}. For the gauge annihilation processes, the scattering densities are given by: \begin{eqnarray}\label{eq:scats} \fl \gamma ( \xi_1^+\xi_1^-\to \bar{f}f) &=& \frac{M_1^4 (6 g_2^4 + 5 g_Y^4)}{128 \pi^5 z} \int_{x_{\rm min}}^{\infty} dx \sqrt{x} K_1(z \sqrt{x}) r^3 \,,\nonumber\\ \fl \gamma ( \xi_1^+\xi_1^-\to H^\dagger H) &=& \frac{M_1^4 (g_2^4 + g_Y^4/2)}{512\pi^5 z} \int_{x_{\rm min}}^{\infty} dx \sqrt{x} K_1(z \sqrt{x})r^3 \,,\nonumber\\ \fl \gamma ( \xi_1^+\xi_1^-\to \chi^\dagger \chi) &=& \frac{M_1^4 (g_2^4 + g_Y^4/2)}{512\pi^5 z} \int_{x_{\rm min}}^{\infty} dx \sqrt{x} K_1(z \sqrt{x})r^3 \,,\nonumber\\ \fl \gamma ( \xi_1^+\xi_1^-\to W^a W^b) &=& \frac{ M_1^4 g_2^4}{64 \pi^5 z} \int_{x_{\rm min}}^{\infty} dx \sqrt{x} K_1(z \sqrt{x}) \left[r (5+34/x)-\frac{24}{x^2}(x-1)\ln \left(\frac{1+r}{1-r} \right)\right]\,,\nonumber\\ \fl \gamma ( \xi_1^+\xi_1^-\to BB) &=& \frac{3 M_1^4 g_Y^4}{128 \pi^5 z} \int_{x_{\rm min}}^{\infty} dx \sqrt{x} K_1(z \sqrt{x})\nonumber\\ && \times \left[r(1+4/x)-\frac{4}{x^2}(x-2) \ln \left( \frac{1+r}{1-r} \right) \right]\,, \end{eqnarray} where $r= \sqrt{1-4/x}$ and $x=\hat{s}/M_1^2$. In case of FDDM, the process corresponding to $\xi_1^+\xi_1^-\to \chi^\dagger \chi$ is given by: \begin{equation} \gamma ( \xi_1^+\xi_1^-\to \bar{\psi}\psi) = \frac{M_1^4 (6 g_2^4 + 5 g_Y^4)}{128 \pi^5 z} \int_{x_{\rm min}}^{\infty} dx \sqrt{x} K_1(z \sqrt{x}) r^3 \,. \end{equation} Since $\xi_1^\pm$ are charged particles there is an evolution of the asymmetry: $Y_{\xi_1} = ( n_{\xi_1^-}-n_{\xi_1^+})/s$ due to the decay and inverse decay of $\xi_1^\pm$ particles. The evolution of $Y_{\xi_1}$ is described by the Boltzmann equation: \begin{equation} \frac{d Y_{\xi_1}}{dz} = -\frac{\Gamma_D}{zH(z)} Y_{\xi_1} + \sum_j \frac{\Gamma^j_{ID}}{zH(z)} 2 B_j Y_j\,, \label{boltzman-2} \end{equation} where $Y_j=(n_j-n_{\bar j})/s$, with $j=L, H, \chi \ (\psi)$ and \begin{equation} \Gamma^j_{ID} = \Gamma_D \frac{X_{\xi_1}^{\rm eq}}{X_j^{\rm eq}}~~~{\rm and}~~~ B_j=\frac{\Gamma_j} {\Gamma_1}\,, \label{eq:pippo} \end{equation} where $X_j=n_j/s$. The evolution of the asymmetries $Y_j$ is given by the Boltzmann equation: \begin{eqnarray} \frac{d Y_j}{dz} &=&\ 2\ \Big\{ \frac{\Gamma_D}{zH(z)} \left[ \epsilon_j (X_{\xi_1} - X_{\xi_1}^{\rm eq}) \right] + B_j \left( \frac{\Gamma_D}{zH(z)} Y_{\xi_1} - \frac{\Gamma^j_{ID}}{zH(z)} 2 Y_j\right)\nonumber\\ && -\sum_k \frac{\Gamma^k_S}{z H(z)} \frac{X_{\xi_1}^{\rm eq}}{X_k^{\rm eq}} 2 Y_k\Big\}\,. \label{boltzman-3} \end{eqnarray} where $\Gamma_S=\gamma_S/n_{\xi_1^-}^{\rm eq}$ is the scattering rate involving the number violating processes, such as $\chi \chi \to \xi \to HH$, $LL \to \xi \to HH$. The front factor in Eq.~\ref{boltzman-3} takes into account of the two similar particles produced in each decay. Solving the Boltzmann Eqs.~\ref{boltzman-1}, ~\ref{boltzman-2} and~\ref{boltzman-3} we can get the lepton ($Y_L$) and dark matter ($Y_{\rm DM}$) asymmetries. Note that because of the conservation of hypercharge the Boltzman equations~\ref{boltzman-1}, ~\ref{boltzman-2} and~\ref{boltzman-3} satisfy the relation: $2 Y_\xi + \sum_j Y_j =0$. This implies: \begin{equation} Y_\xi =-\frac{1}{2} \sum_j Y_j\,. \end{equation} We will follow a phenomenological approach and calculate the ratio of efficiency factors $\eta_{\rm DM}$ and $\eta_L$ (and hence also the individual efficiency) solving the set of coupled equations~\ref{boltzman-1}, ~\ref{boltzman-2} and~\ref{boltzman-3}. As usual the efficiency factor for the species $i=L,H,{\rm DM}$ is defined as: \begin{equation} \eta_i = \frac{Y_i}{\epsilon_i \ X_\xi\Big|_{T >> M_1}}\,. \end{equation} The free parameters of the model are the CP asymmetries $\epsilon_i$ for all the species, the dark matter mass $m_{\rm DM}$ and the triplet mass $M_1$. However in the remaining of the paper we will focus on heavy triplet, $M_{1} \sim 10^{10}$ GeV, which has been shown to lead to successful leptogenesis~\cite{Hambye:2005tk,Chun:2006sp} for a wide range of CP asymmetries and branching ratios. In addition the following constraints apply: \begin{equation}\label{eq:const} \sum_j \epsilon_j =0\,, \ \ \ \sum_j B_j =1 \ \ {\rm and}\ | \epsilon_j| \le 2 \ B_j\,. \end{equation} The first and third conditions ensure that all amplitudes are physical and the total amount of CP violation can not exceed 100\% in each channel, while the second condition simply demands unitarity of the model. The number of free parameters therefore drops to 5, which we choose to be: $\epsilon_L, \epsilon_{\rm DM}, B_L, B_{\rm DM}$ and $m_{\rm DM}$. The numerical procedure and the results are described in the next section. \subsection{Numerical solutions of the Boltzmann equations}\label{sec:numsol} In principle for unlimited computational power one could use a griding method to explore the whole parameter space and to localise the hypervolume that satisfies Eq.~\ref{eq:IMP}. For the problem under scrutiny we have a 5-dimensional space: MCMC technique is well suited in this case since it numerically scales linearly with the number of dimension instead of exponentially. We follow the approach presented in~\cite{Clesse:2009ur}~\footnote{MCMC technique has been applied for scanning hybrid inflationary parameter space~\cite{Clesse:2009ur} and for investigating the multi-dimensional parameter space of supersymmetric theories, {\it e.g.}~\cite{deAustri:2006pe}.}. \begin{figure}[t] \begin{minipage}[t]{0.5\textwidth} \centering \includegraphics[width=0.9\columnwidth]{./figures/likeratio.pdf} \end{minipage} \hspace*{-0.2cm} \begin{minipage}[t]{0.5\textwidth} \centering \includegraphics[width=0.9\columnwidth]{./figures/likeratio1.pdf} \end{minipage} \caption{{\it Left:} Likelihood function $\mathcal{L}_{\rm ratio}$ given by the ratio distribution, as a function of $r\equiv \Omega_{\rm DM}/\Omega_b$. {\it Right:} Logarithm of the ratio likelihood (red line) and logarithm of a gaussian distribution (dashed gray curve). Both curve have the same variance.} \label{fig:lkhratio} \end{figure} Defining a probability measure over the full parameter space allows to use Bayesian inference to assess the posterior probability distribution of all the parameters to get asymmetry in the dark sector as well as in the lepton sector, which gives rise to observed BAU. The details about bayesian statistical methods and the implementation of the MCMC are given in~\ref{app1}, while for detailed reviews we refer to {\it e.g.}~\cite{loredo,Trotta:2008qt}. The important point that has to be underlined is that through Bayes' theorem: \begin{equation} \label{eq:bayes} \mathcal{P} (\theta | X) {\rm d}\theta \propto\ \mathcal{L}(X | \theta) \cdot \pi(\theta) {\rm d}\theta \,, \end{equation} the posterior probability density function (pdf) $\mathcal{P}(\theta|X)$ for the model parameter $\theta$, given the data $X$, is proportional to the likelihood of the experiment times the prior belief in our model $\pi(\theta)$ and is sampled directly by the MCMC elements. \begin{table}[t!] \caption{MCMC parameters and priors for the CP asymmetries, branching ratios and $m_{\rm DM}$. All priors are uniform over the indicated range.\label{tab:prior2}} \begin{center} \lineup \begin{tabular}{ll} \br MCMC parameter & Prior \\ \mr $\log(m_{\rm DM}/{\rm GeV})$ & $0 \to 5$\\ $\log(\epsilon_L)$ & $-9 \to 0$ \\ $\log(\epsilon_{\rm DM})$ & $-9 \to 0$\\ $\log(B_L)$ & $-5 \to 0$\\ $\log(B_{\rm DM})$ & $-5 \to 0$ \\ \br \end{tabular} \end{center} \end{table} The likelihood function $\mathcal{L}(X|\theta)$ denotes the probability of the data $X$ given some theoretical prediction $\theta$ and plays a central role in Bayesian inference. Both the abundances of dark matter $\Omega_{\rm DM}$ and baryonic matter $\Omega_{b}$ are variables normally distributed around their mean values given by WMAP measurements~\cite{Komatsu:2010fb}: $\Omega_{\rm DM} \equiv \bar{\Omega}_{\rm DM}\pm\sigma_{\rm DM} = 0.227 \pm 0.014$ and $\Omega_b \equiv \bar{\Omega}_b \pm \sigma_b = 0.0456 \pm 0.0016$. Since we are interested in the ratio of the dark to baryonic matter, Eq.~\ref{eq:IMP}, we define the likelihood as the probability distribution of the model parameter to satisfy that ratio. The likelihood is therefore well described by the so-called ratio distribution~\cite{HINKLEY01121969}, which is constructed as the distribution of the ratio of variables normally distributed with non zero mean. Calling $r =\Omega_{\rm DM}/\Omega_b \equiv f(m_{\rm DM},\epsilon_i,B_i)$, the likelihood reads: \begin{equation}\label{eq:loglkBE} \fl {\cal L}_{\rm ratio} = \frac{1}{\sqrt{2 \pi}\sigma_b \sigma_{\rm DM}} \frac{b(r) c(r)}{a(r)^3} \Phi\Big(\frac{b(r)}{a(r)}\Big) + \frac{\exp\Big(-1/2 (\bar{\Omega}_{\rm DM}^2/\sigma_{\rm DM}^2 +\bar{\Omega}_b^2/\sigma_b^2)\Big)}{a(r)^2 \pi \sigma_b \sigma_{\rm DM}}\,, \end{equation} with: \begin{eqnarray} a(r) & = & \sqrt{\frac{z^2}{\sigma_{\rm DM}^2}+\frac{1}{\sigma_b^2}}\,,\nonumber\\ b(r) & = & \frac{\bar{\Omega}_{\rm DM}}{\sigma_{\rm DM}^2} r +\frac{\bar{\Omega}_{b}}{\sigma_b^2}\,,\nonumber\\ c(r) & = & \exp\Big(\frac{1}{2}\frac{b(r)^2}{a(r)^2} - \frac{1}{2}(\frac{\bar{\Omega}_{\rm DM}^2}{\sigma_{\rm DM}^2}+\frac{\bar{\Omega}_b^2}{\sigma_b^2})\Big)\,,\nonumber\\ \Phi(u) & = & {\rm Erf}\Big(\frac{u}{\sqrt{2}}\Big)\,. \end{eqnarray} with Erf being the error function. The shape of the likelihood function is depicted in figure~\ref{fig:lkhratio}, where it can be seen that the peak is at around $r \sim 5$ as it is expected. In the right plot we show that the ratio distribution is slightly skewed with respect to a gaussian distribution (gray dashed line) with the same variance. \begin{figure}[t] \begin{minipage}[t]{0.5\textwidth} \centering \includegraphics[width=1.\columnwidth]{./figures/mDM_1D.png} \end{minipage} \hspace*{-0.2cm} \begin{minipage}[t]{0.5\textwidth} \centering \includegraphics[width=1.\columnwidth]{./figures/mDMreff_2Dpost.png} \end{minipage} \caption{{\it Left:} 1D posterior pdf for the DM mass $m_{\rm DM}$. {\it Right:} 2D credible regions at 68\% and 95\% C.L. in the \{$m_{\rm DM},\eta_{\rm DM}/\eta_{L}$\}-plane. The vertical dot-dashed blue line denotes the bound from LEPII, while the red dashed vertical line marks the bound from $\chi_0-\bar{\chi}_0$ oscillations for SDDM. All of other parameters in each plane have been marginalized over.} \label{fig:1DmDM} \end{figure} In addition the baryon asymmetry should satisfy the constraints from WMAP: \begin{equation} \label{eq:barasym} \eta_b = \Big(\frac{n_b}{n_\gamma}\Big)\Big|_0 = \bar{\eta}_b \pm \sigma_{\eta b} = (6.15 \pm 0.25) \times 10^{-10} \,, \end{equation} where $\eta_b=7.02 \times \mathcal{S}_{\rm DM} Y_L$, and the density of photon and baryons are computed at present time. The baryon asymmetry is described by a gaussian distribution: \begin{equation} \label{eq:lkhb} \mathcal{L}_L \propto \exp\Big(-\frac{(\eta_b -\bar{\eta}_b)^2}{2 \sigma^2_{\eta b}}\Big)\,. \end{equation} Summarizing, the logarithm of the total likelihood is given by the sum of Eqs.~\ref{eq:loglkBE} and~\ref{eq:lkhb}: \begin{equation} \ln\mathcal{L}_{\rm asym} = \ln\mathcal{L}_{\rm ratio} + \ln\mathcal{L}_L\,. \end{equation} \begin{figure}[t!] \begin{minipage}[t]{0.5\textwidth} \centering \includegraphics[width=1.2\columnwidth]{./figures/BDMreff_2Dpost.png} \end{minipage} \hspace*{-0.2cm} \begin{minipage}[t]{0.5\textwidth} \centering \includegraphics[width=1.2\columnwidth]{./figures/BLreff_2Dpost.png} \end{minipage} \caption{ {\it Left:} 2D posterior pdf in the \{$B_{\rm DM},\eta_{\rm DM}/\eta_{L}$\}-plane. {\it Right:} Same as left in the \{$B_{\rm L},\eta_{\rm DM}/\eta_L$\}-plane. The credible regions are given at $68\%$ and $95\%$ C.L.. All of other parameters in each plane have been marginalized over. \label{fig:set2}} \end{figure} MCMC techniques require a prior assumption on the probability distribution of the parameters, namely on $\pi(\theta)$. In the absence of theoretical constraints on the model parameter there are a priori no constraints on these quantities. We therefore focus on the regions singled out by successful triplet leptogenesis in~\cite{Hambye:2005tk,Chun:2006sp}, that is values of the CP asymmetries ranging from $10^{-9} - 1$ and branching ratios from 1 to $10^{-5}$. In order not to support a particular scale we choose flat prior on the log distribution for each parameters, as described in table~\ref{tab:prior2}. The dark matter mass is let free to vary between 1 GeV up to 10 TeV (even though masses below 50 GeV are excluded by LEPII). As it is known, if the data are not informative enough a dependence on the prior choice is left in the posterior pdf. We present in the following the results of the bayesian inference for the case of the asymmetric inert scalar doublet dark matter (SDDM). In the left panel of figure~\ref{fig:1DmDM} we show the 1D posterior pdf for $m_{\rm DM}$, while all other parameters are marginalized over. We see that all the mass range from 1 GeV up to $\sim 4$ TeV can lead to successful leptogenesis, namely $Y_L \sim 10^{-10}$ and an asymmetric dark matter satisfying the ratio Eq.~\ref{eq:IMP}. The vertical blue line denotes the bound from the $Z$ decay width and masses below 50 GeV are excluded, while the region on the left of the red vertical line is excluded by $\chi_0-\bar{\chi}_0$ oscillations. As one can see from the posterior pdf the most favoured region is at $m_{\rm DM} \sim 10$ GeV, while there are candidates at 100 GeV with smaller statistical significance. With even less probability but still viable are candidates at TeV, which is the range of interest for SDDM. On the right panel of figure~\ref{fig:1DmDM} the 68\% and 95\% credible region are shown in the \{$m_{\rm DM},\eta_{\rm DM}/\eta_{L}$\}-plane. From there we see that for DM mass up to around 500 GeV, the preferred values of the ratio $\eta_{\rm DM}/\eta_L$ remains constant to be around 10-50 as these are easily compensated by the small CP asymmetry ratio $\epsilon_{\rm DM}/\epsilon_L$. However, for DM masses above ${\cal O}$(TeV), $\epsilon_{\rm DM}/\epsilon_L$ is not sufficiently small to compensate with large $\eta_{\rm DM}/\eta_L$ and therefore, the preferred values of $\eta_{\rm DM}/\eta_L$ remains to be around unity. Alternatively for a given DM mass, smaller values of $\eta_{\rm DM}/\eta_L$ are allowed at 95\% C.L. for $\epsilon_{\rm DM}/\epsilon_L > 1$. \begin{figure}[t!] \begin{minipage}[t]{0.33\textwidth} \centering \includegraphics[width=1.2\columnwidth]{./figures/effLeffDM_2Dpost.png} \end{minipage} \begin{minipage}[t]{0.33\textwidth} \centering \includegraphics[width=1.2\columnwidth]{./figures/BDMetaDM_2Dpost.png} \end{minipage} \hspace*{-0.2cm} \begin{minipage}[t]{0.33\textwidth} \centering \includegraphics[width=1.2\columnwidth]{./figures/BLetaL_2Dpost.png} \end{minipage} \caption{{\it Left:} 2D posterior pdf in the \{$\eta_{\rm DM},\eta_{L}$\}-plane. {\it Central:} 2D posterior pdf in the \{$B_{\rm DM},\eta_{\rm DM}$\}-plane. {\it Right:} same as left in the \{$B_{L},\eta_{L}$\}-plane. The credible regions are given at $68\%$ and $90\%$ C.L.. All of other parameters in each plane have been marginalized over. \label{fig:set3}} \end{figure} The above preferred values of the parameter space can be understood from the Boltzmann equation \ref{boltzman-3} in which the input parameters are the CP asymmetries and the branching ratios. Having chosen a mass of the scalar triplet of $10^{10}$ GeV the dominant processes that regulate the boltzmann equations are the decay and inverse decay, and fundamental quantities are the branching ratios. In figure~\ref{fig:set2} we show the correlation of $\eta_{\rm DM}/\eta_L$ versus $B_{\rm DM}$ and $B_L$ respectively in the left and right panels, within the 68\% and 95 \% credible regions. We see that large efficiency ratio $\eta_{\rm DM}/\eta_L$ is preferred when $B_L \to 1$ and small $B_{\rm DM} \to 10^{-5}$. This is because larger the value of $B_L$ (which implies smaller is the $B_{\rm DM}$ as $\sum_i B_i=1$ with i=$L,H$,DM) the larger is the washout due to inverse decay and hence leads to small $\eta_L$. On the contrary smaller is the $B_{\rm DM}$ the washout effect is small due to inverse decay and hence large $\eta_{\rm DM}$. Note that in either case the production of asymmetry is proportional to $\Gamma_1 \propto 1/\sqrt{B_L B_H}$. Therefore when $B_L$ approaches towards $10^{-5}$ the asymmetry ($Y_L$) as well as the efficiency ($\eta_L$) get increased. On the other hand when $B_{\rm DM}$ approaches towards $1$, which implies small $B_L$, the asymmetry $Y_{\rm DM}$ gets increased but efficiency gets decreased. These behaviours of $\eta_{\rm DM}$ and $\eta_L$ can be confirmed from figure~\ref{fig:set3} where we have shown the 2D credible regions at 68\% and 95\% C.L.. The extreme left one, which constitutes the summary of middle and right ones, reveals that a successful asymmetric dark matter and lepton asymmetry can be generated with small $\eta_L$ and large $\eta_{\rm DM}$. In other words, large $B_L$ and small $B_{\rm DM}$ are required in favour of the observed BAU and asymmetric dark matter. For sake of reference we report the preferred values of the input CP asymmetries. The preferred values range between $10^{-9}-10^{-2}$ for $\epsilon_{\rm DM}$, respectively to $B_{\rm DM}=10^{-5}-0.5$. A more tighter range is selected in the case of the lepton CP asymmetry: $10^{-8}-10^{-5}$ again for $B_L$ ranging from its extremal values. We remark in addition that for large $B_{\rm DM}$ and small $B_L$ and masses around 50 GeV, the CP asymmetry in the DM sector can be be larger by an order of magnitude with respect to $\epsilon_L$ to compensate the small value of $\eta_{\rm DM}/\eta_L$. Regarding the inert fermionic doublet DM (FDDM) candidate, the discussion is very similar. We have verified that there are no substantial differences in the selected 1D and 2D credible regions as the Boltzmann equations are same in both cases. A small difference comes from the internal degrees of freedom which makes the equilibrium value of a FDDM different from a SDDM. Therefore, in case of FDDM, the allowed mass range goes up to a few TeV starting from 50 GeV as shown in figure~\ref{fig:1DmDM}. \begin{figure}[t] \begin{minipage}[t]{0.5\textwidth} \centering \includegraphics[width=1.\columnwidth]{./figures/point1.png} \end{minipage} \hspace*{-0.2cm} \begin{minipage}[t]{0.5\textwidth} \centering \includegraphics[width=1.\columnwidth]{./figures/point3.png} \end{minipage} \caption{{\it Left:} Absolute value for the Yield of leptons (cyan solid), DM (dotted magenta), Higgs (dashed black), $\xi$ asymmetry (solid red) plus scalar triplet abundancy (black solid), for a successful point with $m_{\rm DM}=86$ GeV, $B_L=0.09$, $B_{\rm DM}=4.1 \times10^{-4}$, $\epsilon_{L}=2.6 \times 10^{-6}$, $\epsilon_{\rm DM}=1.1\times 10^{-8}$ which leads to $r\equiv \Omega_{\rm DM}/\Omega_b=4.75$, $Y_L=1.7 \times 10^{-10}$ and $\eta_{\rm DM}/\eta_L=6.53$. {\it Right:} Same as left for $m_{\rm DM}=2$ TeV, $B_L=9.5 \times 10^{-3}$, $B_{\rm DM}=2.6 \times 10^{-5}$, $\epsilon_L=7 \times 10^{-7}$, $\epsilon_{\rm DM}=1.2 \times 10^{-9}$, $\Omega_{\rm DM}/\Omega_b=5.4$ and $Y_L=1.6\times 10^{-10}$ and $\eta_{\rm DM}/\eta_L=0.86$. The $|Y_i|$ are rescaled in terms of CP asymmetries.} \label{fig:points} \end{figure} In figure~\ref{fig:points} we show the behavior of the Yields for leptons, Higgs, DM, scalar triplet and $X_\xi$ for two particular points. The first point in the parameter space is shown in the left panel, which leads to a successful model for FDDM with a mass of $\sim 86$ GeV, $r \sim 4.8$ and $Y_L=1.7 \times10^{-10}$. The second point in the parameter space is depicted in the right panel and accounts for a SDDM with $m_{\rm DM}\sim 2$ TeV, $r \sim 5.3$ and successful baryon asymmetry, $Y_L= 1.6\times 10^{-10}$. The details about the parameters are given in the caption. These two points are representative of the behavior discussed above. In particular, for the left panel the branching ratios are $B_L=0.09$ and $B_{\rm DM}=4.1\times 10^{-4}$, which implies small $\eta_L$ and large $\eta_{\rm DM}$. Therefore, the ratio of $\eta_{\rm DM}/\eta_L$ is maximum and can be confirmed from figure~\ref{fig:set2}. For the figure in right panel the branching ratios are both small $B_L=9 \times 10^{-3}$ and $B_{\rm DM}=3 \times 10^{-5}$, which implies $\eta_{\rm DM}$ and $\eta_L$ are comparable. As a result the ratio $\eta_{\rm DM}/\eta_L\sim 0.9$ and the large DM mass is compensated by the very small CP asymmetry ratio. This behavior can be confirmed from figure~\ref{fig:set2}. For checking the consistencies, we have investigated the behavior of efficiency factors in case of equal CP asymmetries in DM and lepton channels. There are two interesting results that come out from the MCMC run, shown in figure~\ref{fig:eqcp}. In the left panel the 1D posterior pdf for the DM mass is depicted. We note that equal asymmetries lead to a upper bound to the asymmetric dark matter mass of $\mathcal{O}(50)$ GeV and can be applied to the case of FDDM. This is some how expected because the ratio of CP asymmetries can not compensate the increasing DM mass and therefore around 100 GeV there are no more candidate which can fulfill the DM to baryon requirement. This can be seen from the central panel where the 2D credible regions in the \{$m_{\rm DM},\eta_{\rm DM}/\eta_L$\}-plane are shown: the ratio of efficiency drops very rapidly as soon the DM mass increases. As a consequence we found the known results of a light DM mass, around 10 GeV, being favoured. In the right panel the 2D credible regions are shown in the $B_L-B_{\rm DM}$-plane. We see that the preferred branching ratios are similar in magnitude as expected and therefore the efficiencies are comparable. In other words the preferred ratio $\eta_{\rm DM}/\eta_L \mbox{\;\raisebox{.3ex 0.1$ for $m_{\rm DM} \mbox{\;\raisebox{.3ex 50$ GeV. \begin{figure}[t] \begin{minipage}[t]{0.33\textwidth} \centering \includegraphics[width=1.2\columnwidth]{./figures/fermEqCP_1Dm.png} \end{minipage} \hspace*{-0.2cm} \begin{minipage}[t]{0.33\textwidth} \centering \includegraphics[width=1.2\columnwidth]{./figures/fermEqCP_2Dmeff.png} \end{minipage} \hspace*{-0.2cm} \begin{minipage}[t]{0.33\textwidth} \centering \includegraphics[width=1.2\columnwidth]{./figures/fermEqCP_2DBLBDM.png} \end{minipage} \caption{{\it Left:} 1D posterior pdf (black solid line) for the DM mass $m_{\rm DM}$. The vertical dot-dashed blue line denotes the bound from LEPII. {\it Middle:} 2D credible regions at 68\% and 95\% C.L. in the \{$m_{\rm DM},\eta_{\rm DM}/\eta_L$\}-plane. {\it Right:} Same as middle in the \{$B_{\rm DM},B_{L}$\}-plane. All of other parameters in each plane have been marginalized over.} \label{fig:eqcp} \end{figure} \section{Inelastic scattering and Asymmetric DM}\label{sec:inel} In this section we briefly recall the definition of event rate in a detector and discuss the features of inelastic scattering. For each experiments we describe the likelihood functions used in the data analysis and the choice of priors. In the case of direct detection signals, the only free parameters of the model are $m_{\rm DM}$ and the mass splitting $\delta$. For details on the experimental set up and bayesian inference we refers to~\ref{app1} and~\cite{Arina:2011si}. \subsection{Experiment description, likelihoods and priors}\label{sec:intheo} The direct detection experiments aim to detect or set limits on nuclear recoils arising from the scattering of DM particles off target nuclei. The energy $E_R$ transferred during the collision between the incident particle with mass $m_{\rm DM}$ and the nucleus with mass $M_{\cal N}$ is of the order of the keV for a mean DM velocity of $v/c\sim 10^{-3}$ in the Galactic halo. The differential spectrum for such recoils, measured in events per day/kg/keV, is given by \begin{equation} \label{eq:diffrate} \frac{\rmd R}{\rmd E_{R}} = \frac{\rho_{\odot}}{m_{\rm DM}} \frac{\rmd\sigma}{\rmd E_{R}} \; \eta (E_R,t)\,, \end{equation} where $\rmd \sigma/\rmd E_{R}$ encodes all the particle and nuclear physics factors, $\rho_{\odot} \equiv \rho_{\rm DM}(R_{\odot})$ is the DM density at the Sun position and $\eta (E_R,t)$ is the mean inverse velocity of the incoming particles that can deposit a given recoil energy $E_R$: \begin{equation} \label{eq:eta} \eta (E_R,t) = \int_{v_{\rm min}} {\rm d}^3 \vec{v}\ \frac{ {\rm f}(\vec{{v}}(t))}{{v}} \,, \end{equation} where the velocity $\vec{v}$ is taken with respect to the Earth frame. The quantity $v_{\rm min}$ is the minimum velocity needed to lead to a recoil inside the detector: \begin{equation} v_{min} = c \sqrt{\frac{1}{2 M_{\mathcal N} E_R}} \Big(\frac{M_{\mathcal N} E_R}{\mu_n}+\delta\Big) \,, \label{eq:vmin} \end{equation} where $\mu_n$ the WIMP-nucleus reduced mass and $M_{\mathcal N}$ is the nucleon mass. $\delta$ denotes the mass splitting between the DM particle and the excited state, therefore proportional to $\lambda_5$ in case of SDDM and Majorana mass in case of FDDM. The value required by preserving the asymmetry, $\lambda_5 \sim 10^{-7}$ leads precisely to $\delta \sim 100$ keV, the right order of magnitude for inelastic scattering in case of SDDM. On the other hand, in case of FDDM, $\delta \sim m$, the Majorana mass of FDDM. The total event rate per unit detector mass is obtained integrating in a given energy bin $[E_1,E_2]$ Eq.~(\ref{eq:diffrate}), \begin{equation} \label{eq:totrate} R(t) = \int_{E_1}^{E_2} {d}E_R\ \epsilon(E_R)\ \frac{\rmd R}{\rmd E_R}\, , \end{equation} where $\epsilon$ is the energy dependent efficiency of the detector. The expected number of event observed in a detector is given by: \begin{equation} S = M_{\rm det} T R(t)\,, \label{eq:Ntot} \end{equation} where $M_{\det}$ is the detector mass and $T$ is the exposure time. For some detectors, like scintillators, the recoiling nucleus may loose energy by collisions with other nuclei, hence in form of heat, or through collisions with electrons, which create scintillation light. The observed energy released in scintillation light (typically expressed in keVee) is related to the nuclei recoil energy through the quenching factor $q$, $E_{\rm scint} = q E_R$. The particle physics cross-section for coherent inelastic scattering $S \ \mathcal{N} \to A\ \mathcal{N}$, mediated by the Z exchange on $t$-channel, is parameterised as: \begin{eqnarray} \label{eq:pppart} \frac{\rmd \sigma}{\rmd E_{R}} & = & \frac{M_{\cal N}}{2 \mu^2_n}\ \frac{G_F^2}{2 \pi f_n^2} \ \Big( (A-Z)f_n + Z f_p\Big)^2 F^2(E_R)\nonumber\\ & = & \frac{M_{\cal N}}{2 \mu^2_n}\ \frac{G_F^2}{2 \pi} \ \Big( A + Z\ (4 \sin^2\theta_{\rm W}-1)\Big)^2 F^2(E_R) \, , \end{eqnarray} where $Z$ and $A$ are respectively the number of protons and the atomic number of the element, $G_F$ is the Fermi constant and $\sin^2\theta_{\rm W}$ is the Weinberg angle. For $SU(2)_L$ doublets with hypercharge 1/2 the couplings to proton and neutron are different, i.e. $f_n \neq f_p$ in contrast to the standard elastic scattering. We note that the cross-section is no longer a free parameter. The nuclei form factor $F^2(E_R)$ characterizes the loss of coherence for non zero momentum transfer and is described as the Helm factor~\cite{Helm:1956zz,Lewin:1995rx}. Regarding $\eta(E_R,t)$ in equation~\ref{eq:eta}, we consider the velocity distributions generated by a cored isothermal and the NFW~\cite{Navarro:1996gj} density profile marginalized over the astrophysical variables ($v_0,v_{\rm esc}$ and $\rho_\odot$), while the standard maxwellian halo is presented in~\ref{app2} for sake of reference. Given a DM density profile and assuming equilibrium between gravitational attractive force and pressure, a corresponding velocity distribution arises from the Eddigton formula~\cite{Binneybook}. We consider these two DM density profiles because they lead to velocity distributions that differ mainly in the selected mean and escape velocity values~\cite{Arina:2011si}, which are fundamental quantities in inelastic scattering. Indeed the splitting factor $\delta$ in Eq.~\ref{eq:vmin} means that only the very high velocity particles will have enough energy to produce a recoil in the detector. This can be seen re-expressing it in terms of target and DM masses \begin{equation} \Big(\frac{v}{c}\Big)^2 > \frac{2 \delta (M_\mathcal{N} + m_{\rm DM})}{m_{\rm DM} M_{\mathcal{N}}}\,. \end{equation} In addition experiments with heavy nuclei will have a large sensitivity to the high tail of the velocity distribution. We therefore discuss the DAMA experiment because of the Iodine, CRESST-II on W and Xenon100. The Germanium is more sensitive to particle of mass of the order of 50-70 GeV but we consider the dedicated analysis for inelastic scattering~\cite{Ahmed:2010hw}. We do not consider the CoGeNT~\cite{Aalseth:2011wp} experiment since only very light DM can account for its excess. We do not consider Zeplin-III~\cite{Lebedenko:2008gb} since it has analogous sensitivity than CRESST-II. \paragraph{CRESST-II} CRESST is a cryogenic experiment running at the Laboratori Nazionali del Gran Sasso. The 33 detector modules are made by CaWO$_4$ crystal, each of a mass of 333 g. In this analysis we consider the second run of CRESST, carried out in 2007 and in particular the data on Tungsten~\cite{Angloher:2008jj}. These data are obtained with two detector modules, leading to a total exposure of 30.6 kg days on W after cuts, in the energy range of 10-40 keV. Three events have been seen, which are compatible with the expected background, mainly from neutrons, of $\sim 0.063$ kg days. We therefore use a background value $B=3$ with $\sigma_B = 10\%$ of $B$. The likelihood is described by the poisson probability of seeing three events for a given theoretical prediction $S$ and a given background $B$: \begin{equation} \label{eq:cresst} \ln{\cal L}_{\rm Cresst} (3|S,B)= - (S+B) + 3 \ln \left(S+B \right)\,. \end{equation} The effective likelihood we used in the analysis is marginalized numerically over the background: \begin{equation} \label{eq:effcresst} \ln {\mathcal L}^{\rm eff}_{\rm Cresst} = \int_0^{\infty} \rmd B \ \ln{\mathcal L}_{\rm Cresst} (3 | S,B)\ p(B)\,, \end{equation} where $p(B)$ is the probability function of the background, modeled as a gaussian distribution. The invariant $90_S\%$ confidence level, based on the $S$-signal, corresponds to $\Delta\chi^2 \sim 3.34$. \paragraph{CDMS on Germanium} The CDMS collaboration has published a dedicated analysis for inelastic DM~\cite{Ahmed:2010hw}, which we use for constructing the likelihood of the experiment. The total exposure is 969 kg days and the energy range is from 10 keV up to 150 keV. From $\Delta E_1=10-25$ keV 8 events has been found, with an expected background of $5.88_{-1.75}^{+2.33}$, while in the remaining energy range, $\Delta E_1=25-150$, 3 events survive all the cuts and have an expected background of $0.93_{-0.36}^{+0.58}$. The background accounts for all surface events and cosmogenic particles. For the detector efficiency we used the red dotted curve presented in figure 5 of~\cite{Ahmed:2010hw}. The total likelihood is the sum of the contribution from the two energy range $\Delta E_1$ and $\Delta E_2$. Each partial likelihood follows the poisson distribution: \begin{equation} \label{eq:cdmsge} \ln{\cal L}_{\rm CDMS} (11|S,B)= \ln{\cal L}_{\Delta E_1} (8|S,B)+ \ln{\cal L}_{\Delta E_2} (3|S,B) \,, \end{equation} with \begin{eqnarray} \ln{\cal L}_{\Delta E_1} (8|S,B) & = & - (S+B)+ 8 \ln \left(S+B \right)\,,\nonumber\\ \ln{\cal L}_{\Delta E_2} (3|S,B) & = & - (S+B) + 3 \ln \left(S+B \right)\,.\ \end{eqnarray} We then marginalise numerically over the background: \begin{equation} \label{eq:bckm} {\cal L}^{\rm eff}_{\rm CDMS} = \int_0^{\infty} \rmd B \ {\cal L}_{\rm CDMS} (11 | S,B)\ p(B), \end{equation} to get the effective likelihood we use in computing the exclusion bound. The $90_S$\% confidence interval corresponds to $\Delta\chi^2 = 2.5$, obtained considering that in the whole energy range there are 11 events with an expected background of 6. \paragraph{DAMA} The DAMA likelihood for the modulated rate is described in~\cite{Arina:2011si} and follows a Gaussian distribution: \begin{equation} \ln\mathcal{L}_{\rm DAMA} = - \sum_{i=1}^{N_{\rm bin}} \frac{(s_i-\bar{s}^{\rm obs}_i)^2}{2 \sigma_i^2} \,, \end{equation} where $s_i$ and $\bar{s}^{\rm obs }_i$ are the theoretical and the mean observed modulation respectively in the $i$th energy bin, $\sigma_i$ is the associated uncertainty in the observed signal. We use in this analysis the 12-bin data from figure~9 of~\cite{Bernabei:2008yi}. The quenching factors $q_{\rm Na}$ and $q_{\rm I}$ are taken to be free parameters in our analysis, which we vary over their respective allowed range~\cite{Bernabei:1996vj,Chagani:2008in}, as reported in table~\ref{tab:prior1}. In addition we require that the unmodulated predicted signal does not overcome the total unmodulated rate in figure~1 of~\cite{Bernabei:2008yi}, namely in each energy bin the predicted total rate should be at most equal to the measured rate. \paragraph{Xenon100} As far as it concerns Xenon100 (Xe100 hereafter) experiment, we use the last data release for inelastic scattering~\cite{Aprile:2011ts}. The likelihood is given by a Poisson distribution for three seen events times a gaussian which takes into account the uncertainties on the scintillation efficiency $L_{\rm eff}$. These latter however affect only the low mass DM region. In addition the uncertainties over the background are marginalized over. For details about this experiment we refer to~\cite{Arina:2011si}. \begin{table}[t!] \caption{MCMC parameters and priors for the model parameter space and experimental systematics (nuisance parameters). All priors are uniform over the indicated range.\label{tab:prior1}} \begin{center} \lineup \begin{tabular}{lll} \br Experiment& MCMC parameter & Prior \\ \mr All & $\log(m_{\rm DM}/{\rm GeV})$ & $0 \to 5$\\ All & $\delta/{\rm keV}$ & $0 \to 200$ \\ DAMA & $q_{\rm Na}$ & $0.2 \to 0.4$\\ DAMA & $q_{\rm I}$ & $0.06 \to 0.1$\\ Xenon100 & $L_{\rm eff}$& $-0.01 \to 0.18$ \\ \br \end{tabular} \end{center} \end{table} \paragraph{Choice of priors and pills of bayesian inference} As for the bayesian inference we follow closely the approach of~\cite{Arina:2011si}. We consider a full bayesian analysis without discussing profile likelihoods: indeed in case of informative data (as for DAMA experiment) the posterior pdf and the profile likelihood are equivalent, while in the case of exclusion bounds, in order to be insensitive on the choice of priors, we use the invariant bound for the $x_S\%$ credible region, as described in~\ref{app1}, which can have a bayesian interpretation in terms of probability for the $S$ signal. Having specified the likelihood functions for each experiments, the only missing element for Bayes theorem, Eq.~\ref{eq:bayes}, is the choice of priors. As in the previous section, the prior on the DM mass is chosen flat on a logarithmic scale on the same range (note that it is the only parameter in common with the asymmetry generation MCMC), while for $\delta$ we chose a flat prior, since the scale of this parameter is known by the requirement of inelastic scattering. The range is given in table~\ref{tab:prior1}, together with the priors for the systematic parameters in each experiment. \paragraph{Astrophysics} In addition to the candidate mass $m_{\rm DM}$, the mass splitting $\delta$ between the DM and its excited state and the nuisance parameters in the experimental set-ups, two further free parameters are used to characterise the DM velocity distribution: the virial mass of the DM halo, and its concentration. These additional parameters are, however, also constrained by astrophysical observations on the velocity of the local standard at rest, $v_0$, on the escape velocity for the DM halo, $v_{\rm esc}$ and on the DM density at the sun position $\rho_\odot$, which all enter in equations~\ref{eq:diffrate} and~\ref{eq:eta}. The gaussian priors and the astrophysical likelihood are given in details in~\cite{Arina:2011si}. Only in the case of Maxwellian velocity distribution we do not vary the astrophysical observable in their allowed range but keep them fixed at their mean values, which are $\bar{v}_0 = 230 \ {\rm km s^{-1}}$~\cite{Reid:2009nj,Gillessen:2008qv}, $\bar{v}_{\rm esc} = 544\ {\rm km s^{-1}}$~\cite{Smith:2006ym,Dehnen:1997cq} and $\bar{\rho}_\odot = 0.4\ {\rm GeV cm^{-3}}$~\cite{Weber:2009pt,Salucci:2010qr}. \subsection{Results for scalar and fermionic candidates}\label{sec:Inres} In this section we present our inference analysis for the considered experiments. Before coming to the results for the scalar and fermionic candidate we would like to make few general comments about inelastic scattering mediated by the $Z$ boson and the DAMA region. Figure~\ref{fig:DamaTrNFW} shows the preferred DAMA region for inelastic scattering in the plane $\{\delta,m_{\rm DM}\}$ for a NFW density profile. In the left plot only the modulated signal is considered, while on the right-hand plot we add the additional constraint on the total rate: it follows that the region with small $\delta \sim 20$ keV and large masses, $\mathcal{O}(10)$ TeV, is excluded. In addition the mass range around $50$ GeV and small mass splitting is disfavoured, while an island at lower masses and splittings survives. The same behavior is retrieved for the cored isothermal halo. In figure~\ref{fig:Damaq} the dependence on the quenching factors is depicted. As expected $q_{\rm Na}$ is unconstrained, as shown in the left plot by the flat 1D marginal posterior (cyan solid line) while for $q_I$ one might claim for a slight preference for values around 0.08 although it is statistically insignificant. The quenching factors for inelastic, Z mediated scattering result to be less constraint than the elastic spin-independent case~\cite{Arina:2011si}. The right panel illustrates the correlation between \{$\delta,m_{\rm DM}$\} and the quenching factor on Iodine. There is a clear dependence on $q_{\rm I}$ for masses between 3 and 30 TeV and splittings in the range 50-100 keV: smaller value of the quenching factor favours smaller splitting and lighter masses. All the remaining region does not show a correlation between the model parameters and $q_{\rm I}$. The small island at masses of few GeV is due to scattering on Sodium and therefore correlated to $q_{\rm Na}$. \begin{figure}[t] \begin{minipage}[t]{0.5\textwidth} \centering \includegraphics[width=1.1\columnwidth]{./figures/DAMAmIn_NFW_2Dposteriorpart.png} \end{minipage} \hspace*{-0.2cm} \begin{minipage}[t]{0.5\textwidth} \centering \includegraphics[width=1.1\columnwidth]{./figures/DAMAmInTr_NFW_2Dposteriorpart.png} \end{minipage} \caption{{\it Left}: 2D marginal posterior for DAMA in the parameters space \{$\delta,m_{\rm DM}$\} and the NFW density profile. {\it Right}: Same as left with the additional constraint of the total unmodulated rate. \label{fig:DamaTrNFW}} \end{figure} \begin{figure}[t] \begin{minipage}[t]{0.5\textwidth} \centering \includegraphics[width=1.1\columnwidth]{./figures/DAMAmInTr_NFW_1Dq.png} \end{minipage} \hspace*{-0.2cm} \begin{minipage}[t]{0.5\textwidth} \centering \includegraphics[width=1.1\columnwidth]{./figures/DAMAmInTr_NFW_3DqI.png} \end{minipage} \caption{Inference for DAMA assuming the NFW density profile for the DM halo. {\it Left}: 1D marginal posterior pdf for the quenching factor of Sodium and Iodine, as labeled. {\it Right}: 3D marginal posterior for \{$\delta,m_{\rm DM},q_{\rm I}$\}, where the $q_{\rm I}$ direction is represented by the colour code. \label{fig:Damaq}} \end{figure} The dependence on the astrophysical observables and NFW density profile for inelastic scattering is shown in figure~\ref{fig:DamaNFWq} with the 3D marginal posteriors for \{$\delta, m_{\rm DM}$\} and a third parameter direction $v_0$, $v_{\rm esc}$ and $\rho_\odot$. The DAMA signal favours the high tail of the velocity distribution from the central panel, where the larger values of $v_{\rm esc}$ are preferred. From the left and right panel we see that in the `croissant'-shaped region the internal parts are due to circular velocity below $\bar{v}_0$ and DM density close to $0.2\ {\rm GeV cm^{-3}}$. The increase of $v_0$ and $\rho_\odot$ favours instead the outer parts of the region, in example very large mass splitting $\sim 190$ keV and masses around the TeV scale. \begin{figure}[t] \begin{minipage}[t]{0.33\textwidth} \centering \includegraphics[width=1.1\columnwidth]{./figures/DAMAmInTr_NFW_3Dv0.png} \end{minipage} \hspace*{-0.2cm} \begin{minipage}[t]{0.33\textwidth} \centering \includegraphics[width=1.1\columnwidth]{./figures/DAMAmInTr_NFW_3Dvesc.png} \end{minipage} \hspace*{-0.2cm} \begin{minipage}[t]{0.33\textwidth} \centering \includegraphics[width=1.1\columnwidth]{./figures/DAMAmInTr_NFW_3Drho.png} \end{minipage} \caption{Inference for DAMA assuming the NFW density profile for the DM halo. {\it Left}: 3D marginal posterior for \{$\delta,m_{\rm DM},v_0$\}, where the $v_0$ direction is represented by the colour code. {\it Center, right}: Same as left, but for \{$\delta,m_{\rm DM},v_{\rm esc}$\} and \{$\delta,m_{\rm DM},\rho_{\odot}$\} respectively. \label{fig:DamaNFWq}} \end{figure} \begin{table}[t!] \small \caption{1D posterior modes and $90\%$ credible intervals for the circular velocity $v_0$, escape velocity $v_{\rm esc}$, and the local DM density $\rho_{\odot}$ for DM density profiles considered in this work. \label{tab:allparams}} \begin{center} \lineup \begin{tabular}{ l | lll } \br & $v_0$ (${\rm km \ s}^{-1}$) & $v_{\rm esc}$ (${\rm km \ s}^{-1}$) & $\rho_{\odot}$ ($ {\rm GeV \ cm}^{-3}$) \\ \br \bfseries{Cored Isothermal} & & &\\ DAMA & $211^{+25}_{-18}$ & $629_{-20}^{+22}$ &$0.31_{-0.03}^{+0.05}$ \\ CDMS & $211_{-19}^{+26}$ & $629_{-21}^{+22}$ & $0.31 \pm 0.04$ \\ Xe100 &$210_{-19}^{+27}$ &$628_{-19}^{+25}$ &$0.31_{-0.03}^{+0.05}$ \\ CRESST &$210_{-18}^{+27}$ &$628_{-20}^{+23}$ & $0.31_{-0.04}^{+0.05}$ \\ \mr {\bfseries NFW} & & & \\ DAMA & $221^{+40}_{-23}$ & $558_{-18}^{+20}$ & $0.38_{-0.10}^{+0.16}$ \\ CDMS & $220_{-21}^{+39}$ & $558_{-16}^{+19} $ & $0.38_{-0.10}^{+0.14}$ \\ Xe100 &$221_{-22}^{+39}$ &$557_{-21}^{+25}$ & $0.38_{-0.11}^{+0.14}$ \\ CRESST &$220_{-21}^{+42}$ &$558_{-17}^{+21}$ & $0.38_{-0.10}^{+0.16}$ \\ \br \end{tabular} \end{center} \end{table} An analogous behavior holds for velocity distributions arising from the cored isothermal halo. The main difference is that this latter prefers in particular the very high end of the observationally allowed escape velocities. In table~\ref{tab:allparams} the preferred values for the astrophysical observables are indicated for both the NFW dark matter profile and the cored isothermal one. We underline that the difference in the preferred values of $v_{\rm esc}$ will play a role in case of inelastic scattering, even if the statistical significance in the difference of the preferred $v_{\rm esc}$ values is small. Indeed in figure~\ref{fig:Sall} we show all the experimental constraints and the DAMA region in a single plot, on the left for NFW profile and on the right for isothermal cored DM density profile. Firstly we note that the NFW profile favours larger splitting for fitting DAMA with respect to the isothermal profile and secondly the low mass region is larger. The exclusion limits for CDMS (blue dashed), Xe100 (pink dot-dashed) and CRESST (black dotted) are $90_S\%$ confidence intervals and all the region on the left of the curve is excluded. As a general remark the NFW prefers smaller $v_{\rm esc}$, namely the tail of the velocity distribution is constituted by less high speed particles than the isothermal one: there is less room for the detectors to be sensitive to inelastic scattering and therefore the exclusion limits are less constraining. Regarding the NFW profile, up to masses of 80 GeV the most constraining upper bounds is CDMS, then leaving the place to Xe100 that excluded all DM masses above 316 GeV. The trend for the isothermal profile is the same, except that CDMS and Xe100 intercepts at 50 GeV and all masses above 251 are excluded by Xe100. The transparent region below 50 GeV is excluded by LEPII constraint on the $Z$ decay width, while the orange region above 56 TeV is excluded requiring unitarity of the S matrix~\cite{Griest:1989wd}. The CRESST upper bound is comparable to the Xe100 one up to masses of 300 GeV, because even though it has a much smaller total exposure the W is heavier than Xe. For larger masses the effect of the larger total exposure of Xe100 dominates. In terms of SDDM, an asymmetric candidate is completely excluded: the parameter space that survives the $\chi_0-\bar{\chi}_0$ oscillation bound is severely disfavoured by Xe100, for both DM density profiles. In case of the inert doublet model as standard thermal relic there is still room up from 45 GeV to 300 GeV for a NFW profile and 250 GeV for an isothermal halo. It has been shown that this mass range provides the correct WMAP relic abundance thanks to three body annihilation channels~\cite{Honorez:2010re,LopezHonorez:2010tb} and is in the reach of LHC~\cite{Dolle:2009ft,Miao:2010rg}. On the contrary, the asymmetric fermionic doublet is a good DM candidate up to 200 or 300 GeV depending on the DM density profile, in addition of satisfying the DM to baryon ratio. \begin{figure} \begin{minipage}[t]{0.5\textwidth} \centering \includegraphics[width=1.1\columnwidth]{./figures/AsymmScalar_NFWTr.png} \end{minipage} \hspace*{-0.2cm} \begin{minipage}[t]{0.5\textwidth} \centering \includegraphics[width=1.1\columnwidth]{./figures/AsymmScalar_ISOTr.png} \end{minipage} \caption{{\it Left}: 2D credible regions for the individual experimental bounds and regions assuming the NFW DM density profile and marginalizing over the astrophysical uncertainties, combined in a single plot. For DAMA (shaded) we show the 90\% and 99\% contours. The $90_S\%$ contours are given by respectively the pink dot-dash curve for Xe100, the dashed blue line for CDMS and the dark green one denotes CRESST. The region below $M_\chi =$ 2 TeV is excluded by $\chi_0-\bar{\chi}_0$ oscillation (horizontal red solid line), while the orange/dark grey region is excluded by unitarity bound and below the dashed gray line by the LEP constraints on the Z decay width. {\it Right}: same as left but for the isothermal cored DM density profile. \label{fig:Sall}} \end{figure} \section{Conclusions}\label{sec:concl} We proposed a simple extension of the SM model to explain the observed ratio $\Omega_{\rm DM}/\Omega_B \approx 5$ as given by WMAP. We extended the SM by including two heavy triplet scalars whose partial decay to SM leptons and inert (odd under a $Z_2$ symmetry) doublet scalars ($\chi$), or vector like fermions ($\psi$), could explain a common origin of asymmetric dark matter and visible matter via leptogenesis route. Moreover, the induced vev of the triplets also gave rise to neutrino masses, as required by the oscillation experiments, via the type-II seesaw mechanism. Thus a triple unification of asymmetric dark matter, leptogenesis and neutrino masses could be achieved. We studied the relevant annihilation and scattering processes that arise in the model. The asymmetry in case of inert scalar ($\chi$) doublet dark matter (SDDM) gets strongly depleted by the contact annihilation process $\chi \chi \to H H$ mediated via $\lambda_5$ coupling. Therefore, the survival of the asymmetry in case of inert SDDM required $\lambda_5 < 10^{-5}$. Besides that we showed that $\lambda_5 \sim 10^{-7}$ is required for the annual modulation signal at DAMA while restoring the asymmetry. On the other hand, the inert fermion ($\psi$) doublet dark matter (FDDM) does not under go any further depletion of asymmetry in comparison to leptons. A strong constraint arose on the mass scale of inert SDDM from the rapid oscillation between $\chi_0$ and its complex conjugate $\overline{\chi_0}$. Below EW phase transition the fast oscillation between $\chi_0$ and $\overline{\chi_0}$ depletes the asymmetry strongly. Therefore, the survival of the asymmetry in case of SDDM led to its mass $M_\chi \mbox{\;\raisebox{.3ex 2 {\rm \ TeV}$ so that it freezes out before it begins to oscillate. On the other hand, in case of inert FDDM, the survival of asymmetry does not depend on its mass apart from the LEP constraint that $M_\psi \mbox{\;\raisebox{.3ex M_Z/2$. Hence a ${\mathcal O}(100) {\rm \ GeV}$ dark matter is allowed. We then numerically solved the relevant Boltzmann equations to estimate the efficiency factors of DM and lepton in either scenarios, for a fixed scalar triplet mass of $M_1=10^{10}$ GeV. The model parameter space has been systematically investigated via MCMC techniques. We have singled out the preferred regions in the parameter space that lead to a successful leptogenesis and to an asymmetric DM, namely satisfying $\Omega_{\rm DM}/\Omega_b$ and $n_b/n_\gamma$ ratios. We showed that: \begin{enumerate} \item dark matter, irrespective of SDDM or FDDM, masses up to $\mathcal{O}({\rm \ TeV})$ can fulfill the requirement of $\Omega_{\rm DM}/\Omega_b$, \item for observed BAU and asymmetric dark matter large $B_L$ and small $B_{\rm DM}$ are preferred. In particular for $B_L\to 1$ and $B_{\rm DM} \to 10^{-5}$ the efficiency ratio $\eta_{\rm DM}/\eta_L$ approaches its maximum value. \end{enumerate} The survival of asymmetry in the dark sector leads to inelastic dark matter because the elastic scattering is subdominant in both (SDDM and FDDM) cases. In case of SDDM the small coupling $\lambda_5 \sim 10^{-7} $ gave rise to a mass difference between the excited state and ground state of DM to be ${\mathcal O}(100) {\rm \ keV}$. On the other hand, in case of inert FDDM, the ${\mathcal O}(100) {\rm \ keV}$ mass difference between the ground state and excited state of DM is provided by its Majorana mass induced by the triplet scalar. By performing a bayesian analysis we found that an asymmetric SDDM of mass larger than 2 TeV is strongly disfavoured by the Xenon100 data while an asymmetric FDDM of mass ${\mathcal O}(100) {\rm \ GeV}$ is suitable to explain DAMA annual modulation signal while passing the latest constraint from Xenon100 experiment. \ack N.S. would like to thank Jean-Marie Fr\`ere, Thomas Hambye and Michel Tytgat for useful discussions. C.A. acknowledges use of the cosmo computing resources at CP3 of Louvain University. N.S. is supported by the IISN and the Belgian Science Policy (IAP VI-11).
1,108,101,565,854
arxiv
\section{Introduction}\label{sec1} With the advance of genotyping techniques, high density SNP (single nucleotide polymorphism) arrays are often used in current genetic studies. In such situations, test statistics (e.g., LOD scores or $p$-values) can be evaluated directly at each of the SNPs in order to map the quantitative/qualitative trait loci. We focus on such marker-based study in this paper. Given one trait and $p$ markers (e.g., SNPs), in order to assess the statistical significance of the most extreme test statistic, multiple tests across the $p$ markers need to be taken into account. In other words, we seek to evaluate the first step family-wise error rate (FWER), or the ``experiment-wise threshold'' [\citet{Churchill94}]. Because nearby markers often share similar genotype profiles, the simple Bonferroni correction is highly conservative. In contrast, the correlation structure among genotype profiles is preserved across permutations and thus is incorporated into permutation $p$-value estimation. Therefore, the permutation $p$-value is less conservative and has been widely used in genetic studies. Ideally, the \textit{true} permutation $p$-value can be calculated by enumerating all the possible permutations, calculating the proportion of the permutations where more extreme test statistics are observed. In each permutation, the trait is permuted, or equivalently, the genotype profiles of all the markers are permuted simultaneously. However, enumeration of the possible permutations is often computationally infeasible. Permutation $p$-values are often estimated by randomly permuting the trait a large number of times, which can still be computationally intensive. For example, to accurately estimate a permutation $p$-value of 0.01, as many as 1000 permutations may be needed [\citet{Barnard63}, \citet{Marriott79}]. In studies of gene expression quantitative trait loci (eQTL), efficient permutation $p$-value estimation methods become even more important, because in addition to the multiple tests across genetic markers, multiple tests across tens of thousands of gene expression traits need to be considered [\citet{Kendzioriski06a}, \citet {Kendzioriski06b}]. One solution is a two-step procedure, which concerns the most significant eQTL for each expression trait. First, the permutation $p$-value for the most significant linkage/association of each expression trait is obtained, which takes account of the multiple tests across the genotype profiles. Second, a permutation $p$-value threshold is chosen based on a false discovery rate (FDR) [\citet{Benjamini95}, \citet{Efron01}, \citet{Storey03}]. This latter step takes account of the multiple tests across the expression traits. Following this approach, the computational demand increases dramatically, not only because there are a large number of expression traits and genetic markers, but also because stringent permutation $p$-value threshold, and therefore more permutations must be applied to achieve the desired FDR. In order to alleviate the computational burden of permutation tests, many eQTL studies have merged the test statistics from all the permuted gene expression traits to form a common null distribution, which, as suggested by empirical studies, may not be appropriate [\citet{Carlborg05}]. In this paper we estimate the permutation $p$-value for each gene expression trait separately. In order to avoid the large number of permutations, some computationally efficient alternatives have been proposed. \citet {Nyholt04} proposed to estimate the effective number of independent genotype profiles (hence the effective number of independent tests) by eigen-value decomposition of the correlation matrix of all the observed genotype profiles. Empirical results have shown that, while Nyholt's procedure can provide an approximation of the permutation $p$-value, it is not a replacement for permutation testing [\citet {Salyakina05}]. In this study we also demonstrate that the effective number of independent tests is related to the significance level. Some test statistics (e.g., score test statistics) from multiple tests asymptotically follow a multivariate normal distribution, and adjusted $p$-values can be directly calculated [\citet{Conneely07}]. However, currently at most 1000 tests can be handled simultaneously, due to the limitation of multivariate normal integration [\citet{GenZ00}]. \citet{Lin05} has proposed to estimate the significance of test statistics by simulating them from the asymptotic distribution under the null hypothesis, while preserving the covariance structure. This approach can handle a larger number of simultaneous tests efficiently, but it has not been scaled up to hundreds of thousands of tests, and its stability and appropriateness of asymptotics have not been validated in this context. In this paper we present a geometric interpretation of permutation $p$-values and a permutation $p$-value estimation method based on this geometric interpretation. Our estimation method does not rely on any asymptotic property and, thus, it can be applied when the sample size is small, or when the distribution of the test statistic is unknown. The computational cost of our method is constant, regardless of the significance level. Therefore, we can estimate very small permutation \mbox{$p$-values}, for example, $10^{-8}$ or less, while estimation by direct permutations or even by simulation of test statistics may not be computationally feasible. In principle, our approach can be applied to the data of association studies as well as linkage studies. However, the high correlation of test statistics in nearby genomic regions plays a key role in our approach. Thus, the application to linkage data is more straightforward. We restrict our discussion to binary genotype data, which only take two values. Such data include many important classes of experiments: study of haploid organisms, backcross populations and recombinant inbred strains. This restriction simplifies the computation so that an efficient permutation $p$-value estimation algorithm can be developed. However, the general concept of our method is applicable to any categorical or numerical genotype data. The remainder of this paper is organized as follows. In Section \ref{sec2} we first present the problem setup, followed by an intuitive interpretation of our method, and finally we describe the more complicated algebraic details. In Section \ref{sec3} we validate our method by comparing the estimated permutation $p$-values with the direct values obtained by a large number of permutations. We also compare the permutation $p$-values with the nominal $p$-values to assess the effective number of independent tests. Finally, we discuss the limitations of our method, and suggest possible improvements. \section{Methods}\label{sec2} \subsection{Notation and problem setup} Suppose there are $p$ markers genotyped in $n$ individuals. The trait of interest is a vector across the $n$ individuals, denoted by $y = (y_1,\ldots, y_n)$, where $y_i$ is the trait value of the $i$th individual. The genotype profile of each marker is also a vector across the $n$ individuals. Throughout this paper, we use the term ``genotype profile'' to denote the genotype profile of one marker, instead of the genotype profile of one individual. Thus, a genotype profile is a point in the $n$-dimensional space. We denote the entire genotype space as $\Omega$, which includes $2^n$ distinct genotype profiles. As mentioned in the \hyperref[sec1]{Introduction}, we restrict our discussion to binary genotype data, which only take two values. Without loss of generality, we assume the two values are 0 and 1. Let $m_1 = (m_{11},\ldots, m_{1n})$ and $m_2 = (m_{21},\ldots, m_{2n})$ be two genotype profiles. We measure the distance between $m_1$ and $m_2$ by Manhattan distance, that is, \[ d _{\mathrm{M}} (m_{1} ,m_{2} ) \equiv\sum_{i=1}^{n} |m_{1i} -m_{2i} | . \] We employ Manhattan distance because it is easy to compute and it has an intuitive explanation: the number of individuals with different genotypes. In our algorithm the distance measure is only used to group genotype profiles according to their distances to a point in the genotype space. Therefore, any distance measure that is a monotone transformation of Manhattan distance leads to the same grouping of the genotype profiles, hence the same estimate of the permutation $p$-value. For binary genotype data, any distance measure $ ( \sum _{i=1}^{n} |m_{1i} -m_{2i} |^{\tau_1} )^{\tau_2}$ $(\forall\tau_1, \tau_2 > 0)$ is a monotone transformation of Manhattan distance. We note, however, this is not true for categorical genotype data with more than two levels. For example, suppose the genotype of a biallelic marker is coded by the number of minor allele. Consider three biallelic markers with genotypes measured in three individuals: $m_1=(0, 0, 0)$, $m_2=(0, 2, 0)$ and $m_3=(1, 1, 1)$. By Manhattan distance, $d _{\mathrm{M}} (m_{1} ,m_{2} ) = 2 < d _{\mathrm{M}} (m_{1} ,m_{3} ) = 3$. However, by Euclidean distance, $d (m_{1} ,m_{2} ) = 2 > d (m_{1} ,m_{3} ) = \sqrt{3}$. Therefore, different distance measures may not be equivalent and the optimal distance measure should be the one that is best correlated with the test-statistic. In the following discussions we assume one test statistic has been computed for each marker (locus). Our method can estimate permutation $p$-value for any test statistic. For the simplicity of presentation, throughout this paper we assume the test statistic is the nominal $p$-value. \subsection{A geometric interpretation of permutation $p$-values} One fundamental concept of our method is a so-called ``significance set.'' Let $\alpha$ be a genome-wide threshold used for the collection of nominal $p$-values from all the markers. A \textit{significance set} $\Phi(\alpha)$ denotes, for a fixed trait of interest, the set of possible genotype profiles (whether or not actually observed) with nominal $p$-values no larger than~$\alpha$. Similarly, we denote such genotype profiles in the $i$th permutation as $\Phi_i(\alpha)$. Since permuting the trait is equivalent to permuting all the genotype profiles simultaneously, $\Phi_i(\alpha)$ is simply a permutation of $\Phi(\alpha)$. Whether any nominal $p$-value no larger than $\alpha$ is observed in the $i$th permutation is equivalent to whether $\Phi_i(\alpha)$ captures at least one observed genotype profile. With this concept of a significance set, we can introduce the geometric interpretation of the permutation $p$-value: \textit{The permutation $p$-value for nominal $p$-value $\alpha$ is, by definition, the proportion of permutations where at least one nominal $p$-value is no larger than $\alpha$. This is equivalent to the proportion of $\{\Phi_i(\alpha)\}$ that capture at least one observed genotype profile. Therefore, the permutation $p$-value depends on the distribution of the genotype profiles within $\Phi_i(\alpha)$ and the distribution of the observed genotype profiles in the entire genotype space.} Intuitively, the permutation $p$-value depends on the trait, the observed genotype profiles and the nominal $p$-value cutoff $\alpha$. In our geometric interpretation we summarize these inputs by two distributions: the distribution of all the observed genotype profiles in the entire genotype space, and the distribution of the genotype profiles in $\Phi_i(\alpha)$, which include the information from the trait and the nominal $p$-value cutoff $\alpha$. We first consider the genotype profiles in $\Phi_{i}(\alpha)$. For any reasonably small $\alpha$ (e.g., $\alpha=0.01$), all the genotype profiles in $\Phi_{i}(\alpha)$ should be correlated, since they are all correlated with the trait of interest. Therefore, we can imagine these genotype profiles in $\Phi_i(\alpha)$ are ``close'' to each other in the genotype space and form a cluster (or two clusters if we separately consider the genotype profiles positively or negatively correlated with the trait). In later discussions we show that under some conditions, the shape of one cluster is approximately a hypersphere in the genotype space. Then, in order to characterize $\Phi_i(\alpha)$, we need only know the center and radius of the corresponding hyperspheres. In more general situations where $\Phi_i(\alpha)$ cannot be approximated by hyperspheres, we can still define its center and further characterize the genotype profiles in $\Phi_i(\alpha)$ by a probability distribution: $P(r, \alpha)$, which is the probability a genotype profile belongs to $\Phi_i(\alpha)$, given its distance to the center of $\Phi_i(\alpha)$ is $r$ (Figure \ref{fig1}A). We summarize the information across all the $\Phi_i(\alpha)$'s to estimate permutation $p$-values. Since $\{\Phi_{i}(\alpha)\}$ is a one-to-one mapping of all the permutations, we actually estimate permutation $p$-values by acquiring all the permutations. Therefore, the computational cost is constant regardless of $\alpha$. We show this seemingly impossible task is actually doable. First, because permutation preserves distances among genotype profiles, the probability distributions from all the significance sets $\{\Phi(\alpha), \Phi_i(\alpha)\}$ are the same. Therefore, we only need to calculate it once. Second, the remaining task is to count the qualifying significance sets, which can be calculated efficiently using combinations, with some approximations. \begin{figure} \includegraphics{298f01.eps} \caption{A two-dimensional schematic representation of the geometric interpretation of permutation $p$-value, reflecting genotype profiles that actually reside in $2^n$-space. \textup{(A)} In the general situation, the function $P(r, \alpha)$, shown in grayscale, decreases with distance from the center of a significance set. Under hypersphere assumption, $P(r, \alpha)$ is either 0 or 1, thus, it can be illustrated by a hypershpere surrounding the center of the significance set. \textup{(B)} The space occupied by the series of markers is calculated serially. Denote the neighborhood region of the $h$th marker as $B_h$. Then the contribution of the $h$th marker to $\Psi(r_{\alpha})$ is approximated by $B_h \backslash(B_h \cap B_{h-1})$, where ``$\backslash$'' indicate set difference. As indicated by the darker shade, this serial counting approximation is not exact when $(B_h \cap B_k) \protect\notin(B_h \cap B_{h-1})$, for any $k < h -1$. Note the dot in \textup{(A)} is the center of a significance set, while the dots in \textup{(B)} are the observed marker genotype profiles.} \label{fig1} \end{figure} The distribution of the observed genotype profiles in the genotype space depends on the number of the observed genotype profiles and their correlation structure. Since $\Phi_i(\alpha)$ may be thought of as randomly located in the genotype space in each permutation, on average, the chance that $\Phi_i(\alpha)$ captures at least one observed genotype profile depends on how much ``space'' the observed genotype profiles occupy. We argue that such space include the observed genotype profiles as well as their neighborhood regions. How to define the neighborhood regions? We first consider the conceptually simple situation that $\Phi_i(\alpha)$ forms a hypersphere of radius~$r_{\alpha }$, where the subscript $\alpha$ indicates that $r_{\alpha}$ is a function of $\alpha$. Then $\Phi_i(\alpha)$ captures an observed genotype profile $m_1$ if its center is within the hypersphere centered at $m_1$ with radius $r_{\alpha} $. Therefore, the neighborhood region of $m_1$ is a hypersphere of radius $r_{\alpha} $. We take the union of the neighborhood regions of all the observed genotype profiles and denote it by $\Psi(r_{\alpha})$ (Figure \ref{fig1}B). Then we can evaluate permutation $p$-values by calculating the proportion of significance sets with their centers within $\Psi(r_{\alpha})$. In the general situation where the hypersphere assumption does not hold, a significance set $\Phi_i(\alpha)$ is characterized by a probability distribution $P(r, \alpha)$. Instead of counting a significance set by 0 or 1, we count the probability it captures at least one observed genotype profile. We will discuss this estimation method more rigorously in the following sections. Before presenting the algebraic details, we emphasize that our method uses the entire set of the observed genotypes profiles simultaneously. Specifically, the correlation structure of all the genotype profiles is incorporated into the construction of $\Psi(r_{\alpha})$. The higher the correlations between the observed genotype profiles, the more the corresponding neighborhood regions overlap (Figure \ref{fig1}). This in turn produces a smaller space $\Psi(r_{\alpha})$, and thus a smaller permutation $p$-value. In the extreme case when all the observed genotype profiles are the same, there is effectively only one test and the permutation $p$-value should be close to the nominal $p$-value. \subsection{From significance set to best partition} Explicitly recording all the elements in all the significance sets is not computationally feasible. We instead characterize each significance set by a best partition, which can be understood as the center of the significance set, and a probability distribution: the probability that one genotype profile belongs to the significance set, given its distance to the best partition. We first define best partition. The \textit{best partition} for $\Phi (\alpha)$ [or $\Phi_i(\alpha)$] is a partition of the samples that is most significantly associated with the trait (or the $i$th permutation of the trait). For a binary trait, the trait itself provides the best partition. For a quantitative trait, we generate the best partition by assigning the smallest $t$-values to one phenotype class and the other $(n-t)$-values to another phenotype class. We typically use $t = n/2$ as a robust choice. The robustness of this choice is illustrated by the empirical evidence in the Supplementary Materials [\citet{Sun09}]. Given $t$, we refer to all the possible best partitions (partitions that divide the $n$ individuals into two groups of size $t$ and $n-t$) as \textit{desired partitions}. The total number of distinct desired partitions, denoted by $N_p$, is \begin{equation} \label{3np} N_{p} =\cases{ \pmatrix{n \cr t}, &\quad if $t\ne n/2$,\vspace*{2pt}\cr \dfrac{1}{2} \pmatrix{n \cr t}, &\quad if $t=n/2$.} \end{equation} When $t=n/2$, there are ${n \choose t}$ ways to choose $t$ individuals, but two such choices correspond to one partition, that is why we need the factor $1/2$. For a binary trait, the desired partitions and the significance sets have one-to-one correspondence and, thus, $N_p$ is the total number of significance sets (or the total number of permutations). For a quantitative trait, $N_p$ is much smaller than the total number of significance sets. In fact, each desired partition corresponds to $t!(n-t)!$ distinct significance sets (or permutations). Since we restrict our study for binary genotype, this definition of best partition can be understood as the projection of the trait into the genotype space. This projection is necessary to utilize the geometric interpretation of permutation $p$-value. Note the best partition does not replace the trait since the trait data is still used in calculating $P(r, \alpha)$. The projection of trait into genotype space is less straightforward when the genotype has three or more levels, though it is still feasible. Further theoretical and empirical studies are needed for such genotype data. Next, we study the probability that one genotype profile belongs to a significance set given its distance to the best partition of the significance set. Each desired partition, denoted as $\mathit{DP}_j$, has perfect correspondence with two genotype profiles, depending on whether the first $t$-values are 0 or 1. We denote these two genotype profiles as $m_{j}^0$ and $m_{j}^1$, respectively. The distance between one genotype profile $m_1$ and one desired partition $\mathit{DP}_j$ is defined as \[ d_{\mathrm{M}} (m_{1} ,\mathit{DP}_{j} ) \equiv\min _{a=0,1} \{d_{\mathrm{M}} (m_{1} ,m_{j}^{a} )\} . \] Suppose $\mathit{DP}_j$ is the best partition of the significance set $\Phi _i(\alpha)$. In general, the smaller the distance from a genotype profile to $\mathit{DP}_j$, the greater the chance it falls into $\Phi_i(\alpha )$. Thus, the genotype profiles in $\Phi_i(\alpha)$ form two clusters, centered on $m_{j}^0$ and $m_{j}^1$, respectively. The probability distribution we are interested in is \[ \Pr\bigl(m_{1} \in\Phi_i(\alpha) |\forall m_{1} \in\Omega, d _{\mathrm{M}} (m_{1} ,\mathit{DP}_{j} )=r \bigr) . \] This probability certainly depends on the trait $y$. However, because all of our inference is conducted on $y$, we have suppressed $y$ in the notation. A similar probability distribution can be defined for the significance set $\Phi(\alpha)$. Because the permutation-based mapping $\Phi (\alpha) \rightarrow\Phi_i(\alpha)$ preserves distances, the distributions for $\Phi(\alpha)$ and $\Phi_i(\alpha)$ are the same and, thus, we need only quantify the distribution for $\Phi(\alpha)$. We denote the best partition of the unpermuted trait $y$ as $\mathit{DP}_y$, and denote the two genotype profiles corresponding to $\mathit{DP}_y$ as $m_{y}^0$ and $m_{y}^1$, then we define the distribution as follows: \begin{equation} \label{equ1} P(r,\alpha) \equiv\Pr\bigl(m_{1} \in\Phi(\alpha) |\forall m_{1} \in\Omega, d _{\mathrm{M}} (m_{1} ,\mathit{DP}_{y} )=r \bigr) . \end{equation} Let \begin{equation} \label{equ2} P(m_{y}^{a} ,r, \alpha) \equiv\Pr\bigl(m_{1} \in\Phi(\alpha) |{\forall m_{1} \in\Omega, d}_{\mathrm{M}} (m_{1} ,m_{y}^{a} )=r \bigr), \end{equation} where $a = 0, 1$. We have the following conclusion. \begin{Proposition}\label{Proposition1} $P(r, \alpha) = P(m_{y}^0, r, \alpha) = P(m_{y}^1, r, \alpha)$ for any $r< n/2$. \end{Proposition} The proof is in the Supplementary Materials [\citet{Sun09}]. By Proposition \ref{Proposition1}, in order to estimate $P(r, \alpha)$, we can simply estimate $P(m_{y}^0, r, \alpha)$. Specifically, we first randomly generate $H$ genotype profiles $\{m_h\dvtx h = 1,\ldots, H\}$ so that $d _{\mathrm{M}} (m_h, m_{y}^0) = r$. To generate $m_h$, we flip the genotype of $m_{y}^0$ for $r$ randomly chosen individuals. Then $P(r, \alpha)$ is estimated by the proportion of \{$m_h$\} that yield nominal $p$-values no larger than $\alpha$. In summary, we characterize a significance set $\Phi_i(\alpha)$ by the corresponding best partition and the probability distribution $P(r, \alpha)$. All the distinct best partitions are collectively referred to as desired partitions. This characterization of significance sets has two advantages. First, the probability distribution $P(r, \alpha)$ is the same across all the significance sets, so we need only calculate it once. This is because the probability distribution relies on distance measure, which is preserved across significance sets (permutations). Second, for a quantitative trait, one desired partition corresponds to a large number of significance sets; therefore, we significantly reduce the dimension of the problem by considering desired partitions instead of significance sets. \subsection{Estimating permutation $p$-values under a hypersphere assumption} By the definition of a significance set, we can calculate the permutation $p$-value by counting the number of significance sets that capture at least one observed genotype profile. However, it is still computationally infeasible to examine all significance sets. Therefore, in the previous section we discuss how to summarize the significance sets by desired partitions and a common probability distribution. In this and the next sections, we study how to estimate permutation $p$-values by ``counting'' desired partitions. To better explain the technical details, we begin with a simplified situation, by assuming there is an $r_\alpha$ such that $P(r, \alpha)$ = 1 if $r \leq r_{\alpha}$ and $P(r, \alpha)$ = 0 otherwise. This is equivalent to assuming $\Phi(\alpha)$ or $\Phi_{i}(\alpha)$ occupies two hyperspheres with radius $r_{\alpha}$. This \textit{hypersphere assumption} turns out to be a reasonable approximation for a balanced binary trait (see Supplementary Materials [\citet{Sun09}]). Let $\{m_{o,k}, 1\leq k \leq p\}$ be the observed $p$ genotype profiles. We formally define the space occupied by the observed genotype profiles and their neighborhood regions as \[ \Psi(r_{\alpha} ) \equiv\Bigl\{ m_1\dvtx m_1\in\Omega, \min_{1\le k\le p} \{d _{\mathrm{M}}(m_1, m_{o,k})\} \le r_{\alpha} \Bigr\}, \] that is, all the possible genotype profiles within a fixed distance $r_{\alpha}$ from at least one of the observed genotype profiles. We have the following conclusion under the hypersphere assumption. \begin{Proposition}\label{Proposition2} Consider a significance set $\Phi_i(\alpha)$ occupying two hyperspheres centered at $m_{j}^0$ and $m_{j}^1$, respectively,\vspace*{1pt} with radius $r_{\alpha}$. $\Phi_i(\alpha)$ corresponds to one permutation of the trait. The minimum nominal $p$-value of this permutation is no larger than $\alpha $ iff at least one of $m_{j}^0$ and $m_{j}^1$ is within $\Psi (r_{\alpha})$. \end{Proposition} The proof is in the Supplementary Materials [\citet{Sun09}]. Based on Proposition \ref{Proposition2}, we can calculate the permutation $p$-value by counting the number of significance sets with at least one of its centers belonging to~$\Psi(r_{\alpha} )$. Note under this hypersphere assumption, for any fixed $\alpha$ (hence fixed $r_{\alpha}$), the significance sets are completely determined by the centers of the corresponding hyperspheres. Thus, there is a one-to-one mapping between significance sets and their centers, the desired partitions. Counting significance sets is equivalent to counting desired partitions. Therefore, we can estimate the permutation $p$-value by counting the number of desired partitions. Specifically, let the distances from all the observed genotype profiles to $\mathit{DP}_j$, sorted in ascending order, be $(r_{j1}, \ldots, r_{jp})$. Then under the hypersphere assumption, the permutation $p$-value for significance level $\alpha$ is \begin{equation} \label{eq:04} |\{ \mathit{DP}_{j} \dvtx r_{j1} \le r_{\alpha} \} |/N_{p} \equiv C(r_\alpha)/N_{p}, \end{equation} where $N_p$ is the total number of desired partitions, and $C(r_\alpha) \equiv|\{ \mathit{DP}_{j} \dvtx r_{j1} \le r_{\alpha} \} |$ is the number of desired partitions within a fixed distance $r_\alpha$ from at least one of the observed genotype profiles. The calculation of $C(r_ \alpha)$ will be discussed in the next section. We note that the hypersphere assumption is not perfect even for the balanced binary trait. We employ the hypersphere assumption to give a more intuitive explanation of our method. In the actual implementation of our method, even for a balanced binary trait, we still use the general approach to estimate permutation $p$-values, as described in the next section. \subsection{Estimating permutation $p$-values in general situations} In general situations where the hypersphere assumption does not hold, we estimate the permutation $p$-value by \begin{equation} \label{eq:05} \sum_{j} \Pr(\mathit{DP}_{j} ,\alpha) /N_{p} , \end{equation} where $\Pr(\mathit{DP}_j, \alpha)$ is the probability that the minimum nominal $p$-value $\leq$ $\alpha$ given $\mathit{DP}_j$ is the best partition. Equation (\ref{eq:05}) is a natural extension of equation (\ref{eq:04}) by replacing the counts with the summation of probabilities. It is worth noting that in the previous section, one desired partition corresponds to one significance set given the hypersphere assumption. However, in general situations, one desired partition may correspond to many significance sets. Therefore, $\Pr(\mathit{DP}_j, \alpha)$ is the average probability that the minimum nominal $p$-value $\leq$ $\alpha$ for all the significance sets centered at $\mathit{DP}_j$. Taking averages does not introduce any bias to permutation $p$-value estimation, because permutation $p$-value is itself an average. Here we just take the average in two steps. First, we average across all the significance sets (or permutations) corresponding to the same desired partition to estimate $\Pr(\mathit{DP}_j, \alpha)$. Second, we average across desired partitions. Let all the desired partitions whose distances to an observed genotype profile $m_{o,k}$ are no larger than $r$ be $B_k(r)$, that is, \[ B_k(r) \equiv\{ \mathit{DP}_j\dvtx d _{\mathrm{M}}(m_{o,k}, \mathit{DP}_j) \leq r\}, \] where $1 \leq k \leq p$. Assume the observed genotype profiles $\{m_{o,k}\}$ are ordered by the chromosomal locations of the corresponding markers. We employ the following two approximations to estimate $\sum_{j}\Pr(\mathit{DP}_{j} , \alpha) $: \begin{enumerate} \item\textit{shortest distance approximation}: \[ \Pr(\mathit{DP}_{j} ,\alpha)\approx P(r_{j1} ,\alpha), \] \item\textit{serial counting approximation}: \[ C(r)\approx C_{U} (r)\equiv\sum_{h=1}^{p} |B_{h} (r) |- \sum_{h=2}^{p} |B_{h}(r)\cap B_{h-1}(r) |, \] \end{enumerate} where $C(r)$ has been defined in equation (\ref{eq:04}). \begin{Proposition}\label{Proposition3} As long as $\alpha$ is reasonably small, for example, $\alpha< 0.05$, there exist $r_{L} <r_{U}$, such that $P(r,\alpha)=1$, if $r\le r_{L} $; $P(r,\alpha)=0$, if $r\ge r_{U} $. Given the shortest distance and the serial counting approximations, \begin{eqnarray} \label{eq:06} \sum_{j}\Pr(\mathit{DP}_{j} ,\alpha) &\approx& \sum_{j}P(r_{j1} ,\alpha) \nonumber\\[-8pt]\\[-8pt] &\approx& C_{U} (r_{L} )+\sum_{r=r_{L} +1}^{r_{U} -1} \bigl[P(r,\alpha) \bigl(C_{U} (r)-C_{U} (r-1) \bigr) \bigr].\nonumber \end{eqnarray} When $\alpha$ is extremely small, for example, $\alpha=10^{-20}$, it is possible $r_L=0$. We define $C_{U} (0)=0$ to incorporate this situation into equation (\ref{eq:06}). \end{Proposition} In the Supplementary Materials [\citet{Sun09}], we present the derivation of Proposition \ref{Proposition3}, as well as Propositions 4 and 5 that provide the algorithms to calculate $|B_h(r)|$ and $|B_h(r) \cap B_{h-1}(r)|$, respectively. Therefore, by Propositions~\ref{Proposition3}--5, we can estimate the permutation $p$-value by equation (\ref{eq:05}). The rationale of shortest distance approximation is as follows. If the space occupied by a significance set is approximately two hyperspheres, this approximation is exact. Otherwise, if $\alpha$ is small, which is the situation where direct permutation is computationally unfavorable, this approximation still tends to be accurate. This is because when $\alpha$ is smaller, the genotype profiles within the significance set are more similar and, hence, the significance set is better approximated by two hyperspheres. In Section \ref{sec3} we report extensive simulations to evaluate this approximation. The serial counting approximation can be justified by the property of genotype profiles from linkage data, and (with less accuracy) in some kinds of association data. In linkage studies, the similarity between genotype profiles is closely related to the physical distances, with conditional independence of genotypes between loci given the genotype at an intermediate locus. Therefore, the majority of the points in $B_{h}(r)\cap B_{h-k}(r)$ ($2 \leq k \leq h-1$) are already included in $B_{h}(r)\cap B_{h-1}(r)$ (Figure~\ref{fig1}B) and, thus, \[ B_{h}(r)\cap\biggl(\bigcup_{1 \leq k \leq h-1} B_k(r)\biggr) \approx B_{h}(r)\cap B_{h-1}(r). \] Then, we have \begin{eqnarray*} C(r) &=& \sum_{k=1}^p |B_{k}(r) | - \sum_{h=2}^p \biggl|B_{h}(r)\cap\biggl(\bigcup_{1 \leq k \leq h-1} B_k(r)\biggr) \biggr| \\ &\approx& \sum_{k=1}^p |B_{h}(r) | - \sum_{h=2}^p |B_{h}(r)\cap B_{h-1}(r) |. \end{eqnarray*} Our method has been implemented in an R package named permute.t, which can be downloaded from \href{http://www.bios.unc.edu/\textasciitilde wsun/software.htm}{http://www.bios.unc.edu/\textasciitilde wsun/software.htm}. \section{Results}\label{sec3} \subsection{Data} \setcounter{footnote}{2} We analyzed an eQTL data set of 112 yeast segregants generated from two parent strains [\citet{Brem05a}, \citet{Brem05b}]. Expression levels of 6229 genes and genotypes of 2956 SNPs were measured in each of the segregants. Yeast is a haploid organism and, thus, the genotype profile of each marker is a binary vector of 0's and 1's, indicating the parental strain from which the allele is inherited. We dropped 15 SNPs that had more than 10\% missing values, and then imputed the missing values in the remaining SNPs using the function fill.geno in R/qtl [\citet {Broman03}]. Finally, we combined the SNPs that have the same genotype profiles, resulting in 1017 distinct genotype profiles.\footnote{Most SNPs sharing the same genotype profiles are adjacent to each other, although there are 10~exceptions in which the SNPs with identical profiles are separated by a few other SNPs. In all the 10 exceptions, the gaps between the identical SNPs are less than 10 kb. We recorded the position of each combined genotype profile as the average of the corresponding SNPs' positions.} As expected, genotype profiles between chromosomes have little correlation (Figure \ref{fig2} in the Supplementary Materials [\citet{Sun09}]), while the correlations of genotype profiles within one chromosome are closely related to their physical proximity (Figure \ref{fig3} in the Supplementary Materials [\citet{Sun09}]). \subsection{Evaluation of the shortest distance approximation} We evaluate the shortest distance approximation $\Pr(\mathit{DP}_{j} ,\alpha )\approx P(r_{j1} ,\alpha)$ in this section. Because the permutation $p$-value is actually estimated by the average of\break $\Pr(\mathit{DP}_{j} ,\alpha )$ [equation~(\ref{eq:05})], it is sufficient to study the average of $\Pr(\mathit{DP}_{j} ,\alpha)$ across all the $\mathit{DP}_j$'s having the same $r_{j1}$. Specifically, we simulated 50 desired partitions $\{\mathit{DP}_j, j = 1,\ldots, 50\}$ such that, for each $\mathit{DP}_j$, $r_{j1} = r$. Suppose $\mathit{DP}_j$ divides the $n$ individuals into two groups of size $t$ and $n-t$; then $\mathit{DP}_j$ is consistent with $t!(n-t)!$ permutations of the trait. We randomly sampled 1000 such permutations to estimate $\Pr (\mathit{DP}_{j} ,\alpha)$. We then took the average of these 50 $\Pr(\mathit{DP}_{j} ,\alpha)$'s, denoted it as $\bar{\rho}(r)$, and compared it with $P(r, \alpha)$. We randomly selected 88 gene expression traits. For each gene expression trait, we chose $\alpha$ to be the smallest nominal $p$-value (from $t$-tests) across all the 1,107 genotype profiles. We first estimated $P(r, \alpha)$ and $\bar{\rho}(r)$, and then examined the ratio $P(r, \alpha)/\bar{\rho}(r)$ at three distances $r_{i} $, $i=1, 2, 3$, where $r_i = \arg\min_r \{|P(r, \alpha) - 0.25i|\}$, that is, the approximate 1st quartile, median and 3rd quartile of $P(r, \alpha)$ when $P(r, \alpha)$ is between 0 and 1 (Figure \ref{fig2}). For the \begin{figure} \includegraphics{298f02.eps} \caption{Evaluation of the shortest distance approximation using 88 randomly selected gene expression traits. For each gene expression trait, the ratio $P(r, \alpha)/\bar{\rho}(r)$ is plotted at three r's, which are approximately the 1st quartile, median and 3rd quartile of $P(r, \alpha)$ when $P(r, \alpha)$ is between 0 and 1. The vertical broken line indicates the nominal $p$-value $2\times10^{-4}$, which corresponds to genome-wide permutation $p$-value $0.05\sim0.10$.} \label{fig2} \end{figure} genes with larger nominal $p$-values, $P(r, \alpha)/\bar{\rho}(r)$ can be as small as 0.4. Thus, the shortest distance approximation is inaccurate. We suggest estimating the permutation $p$-values for the genes with larger nominal $p$-values by a small number of direct permutations, although, in practice, such nonsignificant genes may be of little interest. After excluding genes with nominal $p$-values larger than $2\times10^{-4}$, on average, $P(r, \alpha)/\bar{\rho}(r)$ is 0.80, 0.88, 0.95 for the 1st, 2nd and 3rd quartile respectively. We chose the threshold $2\times10^{-4}$ because it approximately corresponds to permutation $p$-value $0.05\sim0.10$ (see Section 3.4. Comparing permutation $p$-value and nominal $p$-value). It is worth emphasizing that when we estimate permutation \mbox{$p$-values}, we average across $\mathit{DP}_{j}$'s. In many cases, $P(r_{j1} ,\alpha) = 0$ or 1 and, thus, $\Pr(\mathit{DP}_{j} ,\alpha) = P(r_{j1} ,\alpha)$. Therefore, after taking the average across $\mathit{DP}_{j}$'s, the effects of those cases with small $P(r, \alpha)/\bar{\rho}(r)$ will be minimized. \subsection{Permutation $p$-value estimation for a balanced binary trait---evaluation of the serial counting approximation} Using the genotype data from the yeast eQTL data set, we performed a genome-wide scan of a simulated balanced binary trait, with 56 0's and 56 1's. The standard chi-square statistic was used to quantify the linkages. As we discussed before, for a balanced binary trait, the space occupied by a significance set is approximately two hyperspheres, and the shortest distance approximation is justified. This conclusion can also be validated empirically by examining $P(r, \alpha)$. As shown in Table 3 of the Supplementary Materials [\citet{Sun09}], for each $\alpha$, there is an $r_{\alpha}$, such that $P(r, \alpha)=1$ if $r \le r_{\alpha}$, and $P(r,\alpha) \approx0$ if $r > r_\alpha$. From the sharpness of the boundary we can see that a significance set indeed can be well approximated by two hyperspheres. Given that the shortest distance approximation is justified, we can evaluate the accuracy of the serial counting approximation by examining the accuracy of permutation $p$-value estimates. \begin{table} \caption{Comparison of permutation $p$-value estimates for a balanced binary trait. Values at the column of ``Permutation $p$-value'' are estimated via 500,000 permutations. Values at the columns ``Permutation $p$-value estimate I/II'' are estimated by our method before and after perturbing~the~locations of the SNPs}\label{table1} \begin{tabular*}{\tablewidth}{@{\extracolsep{\fill}}ld{1.10}d{1.10}d{1.10}@{}} \hline \textbf{Nominal} & \multicolumn{1}{c}{\textbf{Permutation}} & \multicolumn{1}{c}{\textbf{Permutation}} & \multicolumn{1}{c@{}}{\textbf{Permutation}} \\ \textbf{$\bolds p$-value} & \multicolumn{1}{c}{\textbf{$\bolds p$-value}} & \multicolumn{1}{c}{\textbf{$\bolds p$-value}} & \multicolumn{1}{c@{}}{\textbf{$\bolds p$-value}}\\ \textbf{cutoff} & & \multicolumn{1}{c}{\textbf{estimate I}} & \multicolumn{1}{c@{}}{\textbf{estimate II}}\\ \hline $10^{-3}$ & ,0.19 & ,0.21 & ,0.41 \\ $10^{-4}$ & ,0.02 & ,0.021 & ,0.039 \\ $10^{-5}$ & ,\mbox{$2.0\times10^{-3}$} & ,\mbox{$1.9\times10^{-3}$} & ,\mbox{$2.9\times10^{-3}$} \\ $10^{-6}$ & ,\mbox{$2.4\times10^{-4}$} & ,\mbox{$2.2\times10^{-4}$} & ,\mbox{$3.1\times10^{-4}$} \\ \hline \end{tabular*} \end{table} \begin{figure}[b] \includegraphics{298f03.eps} \caption{Comparison of permutation $p$-values estimated by our method (denoted as pe) or by direct permutations (denoted as pp) for 500 randomly selected gene expression traits (each gene corresponds to one point in the plot). \textup{(a)} Using the original genotype data. \textup{(b)} Using the location-perturbed genotype data. Each gene expression trait is permuted up to 500,000 times to estimate pp. Thus, the smallest permutation $p$-value is $2\times10^{-6}$, and we have more confidence for those permutation $p$-values bigger than $2\times10^{-4}$ (indicated by the vertical line). The degree of closeness of the points to the solid line ($y = x$) indicates the degree of consistency of the two methods. The two broken lines along the solid line are $y=x \pm \log _{10}(2)$ respectively, which, in the original $p$-value scale, are pe${}={}$0.5pp and pe${}={}$2pp, respectively.}\label{fig3} \end{figure} The accuracy of the serial counting approximation relies on the assumption that the adjacent genotype profiles are more similar than the distant ones. We dramatically violate this assumption by randomly ordering the SNPs in the yeast eQTL data. As shown in Table \ref{table1}, the permutation $p$-value estimates from the original genotype data are close to the permutation $p$-values estimated by direct permutations, whereas the estimates from the location-perturbed genotype data are systematically biased. \subsection{Permutation $p$-value estimation for quantitative traits} We randomly selected 500 gene expression traits to evaluate our permutation $p$-value estimation method in a systematic manner. We used $t$-tests to evaluate the linkages between gene expression traits and binary markers. For each gene expression trait, we first identified the genome-wide smallest $p$-value, and then estimated the corresponding permutation $p$-value by either our method or by direct permutations [Figure~\ref{fig3}(a)]. For those relatively larger permutation $p$-values ($>$0.1), the estimates from our method tend to be inflated. Some of them are even greater than 1. This is because the serial counting approximation is too loose for larger permutation \mbox{$p$-values}, due to the fact that each significance set occupies a relatively large space. Nevertheless, the two estimation methods give consistent results for those permutation $p$-values smaller than 0.1. We also estimated the permutation $p$-values after perturbing the order of the SNPs [Figure \ref{fig3}(b)]. As expected, the permutation \mbox{$p$-value} estimates are inflated. The advantage of our method is the improved computational efficiency. The computational burden of our method is constant no matter how small the permutation $p$-value is. To make a fair comparison, both our estimation method and direct permutation were implemented in C. In addition, for direct permutations, we carried out different number of permutations for different gene expression traits so that a large number of permutations were performed only if they were needed. Specifically, we permuted a gene expression trait 100, 1000, 5000, 10,000, 50,000 and 100,000 times if we had 99.99\% confidence that the permutation $p$-value of this gene was bigger than 0.1, 0.05, 0.02, 0.01, 0.002 and 0.001, respectively. Otherwise we permuted 500,000 times. It took 79 hours to run all the permutations. If we ran at most 100,000 permutations, it took about 20 hours. In contrast, our method only took 46 minutes. All the computation was done in a computing server of Dual Xenon 2.4 Ghz. \subsection{Comparing permutation $p$-values and nominal $p$-values} The results we will report in this section are the property of permutation $p$-values, instead of an artifact of our estimation method. However, using direct permutation, it is infeasible to estimate a very small permutation $p$-value, for example, $10^{-8}$ or less. In contrast, our estimation method can accurately estimate such permutation $p$-values efficiently.\footnote{Our method cannot estimate those extremely small permutation $p$-values such as $10^{-20}$ reliably. This is simply because only a few genotype profiles can yield such significant results even in the whole genotype space. Nevertheless, those results correspond to unambiguously significant findings even after Bonferroni correction. Therefore, permutation may not be needed. See the Supplementary Materials [\citet{Sun09}] for more details.} This enables a study of the relationship between permutation $p$-values and nominal $p$-values. Such a relationship can provide important guidance for the sample size or power of a new study. Let $x$ and $y$ be $\log_{10}$(nominal $p$-value) and $\log_{10}$(permutation $p$-value estimate) respectively. We compared $x$ and $y$ across the randomly selected 500 gene expression traits used in the previous section [Figure \ref{fig4}(a)] and found an approximate linear relation. \begin{figure} \includegraphics{298f04.eps} \caption{Comparison of permutation $p$-value estimates and nominal $p$-values. \textup{(a)} Scatter plot of permutation $p$-value estimates vs. nominal $p$-value in log10 scale for the 500 gene expression traits. Those unreliable permutation $p$-value estimates are indicated by ``$x$.'' See footnote 2 for explanation. \textup{(b)} Scatter plot for 483 gene expression traits with nominal $p$-value larger than $10^{-20}$. In both \textup{(a)} and \textup{(b)} the solid line is $y = x$. In \textup{(b)}, the broken line fitting the data is obtained by median regression for those 359 genes with nominal $p$-values between $10^{-10}$ and $10^{-3}$.}\label{fig4} \end{figure} We employed median regression (R function rq) to capture the linear pattern [Figure \ref{fig4}(b)].\footnote{Most genes whose fitted values differ from the observed values more than 2-folds are below the linear patterns. These genes often have more outliers than other genes, which may violate the $t$-test assumptions and bring bias to nominal $p$-values.} If the nominal $p$-value was too large or too small, the permutation $p$-value estimate might be inaccurate. Thus, we used the 359 gene expression traits with nominal $p$-value between $10^{-10}$ and $10^{-3}$ to fit the linear pattern (in fact, using all the 483 gene expression traits with nominal $p$-values larger than $10^{-20}$ yielded similar results, data not shown). The fitted linear relation is $y = 2.52 + 0.978 x$. Note $x$ and $y$ are in log scale. In terms of the $p$-values, the relation is $q = \eta p^\kappa= 327.5 p^{0.978}$, where $p$ and $q$ indicate nominal $p$-value and permutation $p$-value, respectively. If $\kappa=1$, ${q} = \eta p$, and $\eta$ can be interpreted as the effective number of independent tests (or the effective number of independent genotype profiles). However, the observation that $\kappa$ is close to but smaller than 1 (lower bound 0.960, upper bound 0.985) implies that the effective number of independent tests, which can be approximated by $q/p = \eta p^{\kappa-1} = \eta p^{-0.022}$, varies according to the nominal $p$-value $p$. For example, for $p = 10^{-3}$ and $10^{-6}$, the expected effective number of independent tests is approximately 381 and 444, respectively. The relation between the effective number of independent tests and the significance level can be explained by the geometric interpretation of permutation \mbox{$p$-values}. Given a nominal $p$-value cutoff, whether two genotype profiles correspond to two independent tests amounts to whether they can be covered by the same significance set. As the $p$-value cutoff becomes smaller, the significance set becomes smaller and, thus, the chance that two genotype profiles belong to one significance set is smaller. Therefore, smaller $p$-value cutoff corresponds to more independent tests. \section{Discussion}\label{sec4} In this paper we have proposed a geometric interpretation of permutation $p$-values and a method to estimate permutation $p$-values based on this interpretation. Both theoretical and empirical results show that our method can estimate permutation $p$-values reliably, except for those extremely small or relatively large ones. The extremely small permutation $p$-values correspond to even smaller nominal $p$-values, for example, $10^{-20}$. They indicate significant linkages/associations even after Bonferroni correction; therefore, permutation $p$-value evaluation is not needed. The relatively large permutation $p$-values, for example, those larger than 0.1, can be estimated by a small number of permutations, although in practice such nonsignificant cases may be of little interest. The major computational advantage of our method is that the computational time is constant regardless of the significance level. This computational advantage enables a study of the relation between nominal $p$-values and permutation $p$-values in a wide range. We find that the effective number of independent tests is not a constant; it increases as the nominal $p$-value cutoff becomes smaller. This interesting observation can be explained by the geometric interpretation of permutation $p$-values and can provide important guidance in designing new studies. Parallel computation is often used to improve the computational efficiency by distributing computation to multiple processors/computers. Both direct permutation and our estimation method can be implemented for parallel computation. In the studies involving a large number of traits (e.g., eQTL studies), one can simply distribute an equal number of traits to each processor. If there are only one or a few traits of interest, for direct permutation, one can distribute an equal number of permutations to each processor. For our estimation method, the most computationally demanding part (which takes more than 80\% of the computational time) is to estimate $P(r,\alpha)$, which can be paralleled by estimating $P(r,\alpha)$ for different $r$'s separately. Furthermore, for a particular $r$, $P(r,\alpha)$ is estimated by evaluating the nominal $p$-values for a large number of genotype profiles whose distances to the best partition are $r$. The computation can be further paralleled by evaluating nominal $p$-values for a subset of such genotype profiles in each processor. As we mentioned at the beginning of this paper, we focus on the genetic studies with high density markers, where the test statistics are evaluated on each of the genetic markers directly. Our permutation $p$-value estimation method cannot be directly applied to interval mapping [\citet{Lander89}, \citet{Zeng93}]. However, we believe that as the expense of SNP genotype array decreases, most genetic studies will utilize high density SNP arrays. In such situations, the interval mapping may be no longer necessary. We have discussed how to estimate the permutation $p$-value of the most significant linkage/association. Permutation $p$-values can also be used to assess the significance of each locus in multiple loci mapping. \citet{Doerge96} have proposed two permutation-based thresholds for multiple loci mapping, namely, the conditional empirical threshold (CET) and residual empirical threshold (RET). Suppose $k$ markers have been included in the genetic model, and we want to test the significance of the ($k+1$)th marker by permutation. The samples can be stratified into $2^k$ genotype classes based on the genotype of the $k$ markers that are already in the model (here we still assume genotype is a binary variable). CET is evaluated based on permutations within each genotype class. Alternatively, the residuals of the $k$-marker model can be used to test the significance of the ($k+1$)th marker. RET is calculated by permuting the residuals across the individuals. RET is more powerful than CET when the genetic model is correct since the permutations in RET are not restricted by the $2^k$ stratifications. Our permutation $p$-value estimation method can be applied to RET estimation without any modification, and it can also be used to estimate CET with some minor modifications. Specifically, let \textit{conditional desired partitions} be the desired partitions that can be generated by the conditional permutations. Then in equation (\ref{eq:05}), $N_{p} $ should be calculated as the number of conditional desired partitions instead of the total number of desired partitions. In equation (\ref {eq:06}), $P(r,\alpha)$ remains the same and $C_{U} (r)$ needs to be calculated by counting the number of conditional desired partitions within distance $r$ from at least one of the observed genotype profiles. There are some limitations in the current implementation of our method, which are also the directions of our future developments. First, we only discuss binary markers in this paper. The counting procedures in Propositions 4 and 5 (see Section~IV in the Supplementary Materials [\citet{Sun09}]) can be extended in a straightforward way to apply to the genotypes with three levels. However, some practical considerations need to be addressed carefully, for example, the definition of the distance between genotype profiles and the choice of the best partition. Second, the serial counting approximation relies on the assumption that the correlated genotype profiles are close to each other. This is true for genotype data in linkage studies, but in general is not true for association studies, where the proximity of correlated markers in haplotype blocks may be too coarse for immediate use. We are investigating a clustering algorithm to reorder the genotype profiles according to correlation rather than physical proximity. Finally, our work here points toward extensions to the use of continuous covariates, which can be applied, for example, to map gene expression traits to the raw measurements of copy number variations [\citet{Stranger07}]. \section*{Acknowledgments} We appreciate the constructive and insightful comments from the editors and the anonymous reviewers, which significantly improved this paper. We acknowledge funding from EPA RD833825. However, the research described in this article was not subjected to the Agency's peer review and policy review and therefore does not necessarily reflect the views of the Agency and no official endorsement should be inferred. \begin{supplement}[id=suppA] \stitle{Supplementary Methods and Results for ``A geometric interpretation of the permutation \textit{p}-value and its application in eQTL studies''} \slink[doi]{10.1214/09-AOAS298SUPP} \slink[url]{http://lib.stat.cmu.edu/aoas/298/supplement.pdf} \sdatatype{.pdf} \sdescription{The Supplementary Methods and Results include four sections: (1) Single marker analysis and the choice of ``best partition,'' (2) Description of genotype data, (3) Justification of the hypersphere assumption for the balanced binary trait, and (4) Propositions and the proofs.} \end{supplement}
1,108,101,565,855
arxiv
\section{Introduction} Precise observation of the Cosmic Microwave Background (CMB), as planned with the Planck space mission \citep{bersanelli00, lamarre00, tauber04}, is of the utmost importance for better understanding, and confronting with precise observational data, the hot big bang model and its theoretical predictions. In this theoretical framework, such observations also permit constraining the parameters of the model, as is currently done to a lesser extent by a number of previous experiments, such as COBE \citep{fixsen97}, WMAP \citep{komatsu09}, ACBAR \citep{reichardt09}, Archeops \citep{beno03,tristram05}, BOOMERANG \citep{mactavish06}, CBI \citep{sievers09}, QUaD \citep{quad08}, and VSA \citep{rebolo04}. With ever more sensitive instruments, the main source of uncertainty in CMB observations, rather than being instrumental noise, is the contamination of the observation by foreground emission. Astrophysical foregrounds comprise millimeter wave emission from the interstellar medium in our own galaxy, as well as emission from compact extragalactic sources. Component separation methods make use of the different emission laws of different astrophysical components to separate them through joint analysis of observations made at different wavelengths \citep{delabrouille07}. Among those methods, the so-called Internal Linear Combination (ILC), which makes few assumptions about the physical properties of the CMB and the foregrounds, has been widely used for the analysis of WMAP data \citep{tegmark03,eriksen04,delabrouille09,kim09}. An important assumption of the ILC is that the frequency scaling of the CMB is assumed to be known. This is, in principle, a safe assumption, as small temperature fluctuations $\Delta T$ of the CMB generate brightness fluctuations proportional to $\Delta T$, which scale in frequency like the derivative of a blackbody with respect to the temperature, at the well measured CMB temperature of $T=2.725$ K. However, calibration coefficients for each channel, which are a multiplicative factor for each frequency, introduce an uncertainty in the frequency scalings of the CMB component in presence of calibration errors. For space-based missions these uncertainties are typically small (well below 1\% for WMAP or Planck). More sophisticated methods for component separation have been extensively studied in the community of statistical signal processing for a variety of applications. These methods are part of a field of activity generically designated as Blind Source Separation (BSS), or equivalently Independent Component Analysis (ICA). ICA methods perform separation on the basis of the assumption that each of the available observations is a different linear mixture of a well defined number of statistically independent components. Such methods generically rely on no prior assumption on the scaling coefficients of the components in the different available observations (i.e. on the coefficients of each component in the `mixtures'). In fact, recovering these coefficients (the so--called `mixing matrix') is precisely the primary target of blind source separation. ICA methods, thus, do not typically assume perfect knowledge of the response of each channel to the CMB -- nor that the CMB contribution is the same in all channels. For CMB studies particularly, this type of approach has led to the development of a large variety of methods, including CCA \citep{bonaldi06, bedini05}, FastICA \citep{hyvarinen99}, SMICA \citep{delabrouille03, cardoso08} and GMCA \citep{bobin08}. These methods have been used on real observational data in a variety of contexts \citep{bonaldi07, maino06, patanchon05}, and compared extensively on simulated data sets \citep{leach08}. The two main differences between the ILC and ICA methods are the following: \begin{itemize} \item Whereas ICA is designed to extract the scaling coefficients of each of the identified components from the data themselves, the ILC assumes perfect knowledge of the scaling coefficients for the component of interest (CMB); \item The ILC does not make any assumption about the properties of foreground contamination, whereas ICA assumes that the data are satisfactorily described by a (noisy) linear mixture of independent components. \end{itemize} Clearly, these methods are bound to be more or less adapted to component separation, depending upon the actual properties of the data set and on the science objectives pursued. In the following we propose to investigate, using realistic simulations of sky emission and of observational data for WMAP and Planck, the relative performance of FastICA and ILC in the presence of calibration errors. Such calibration errors result in the violation of one of the assumptions of the ILC (the prior knowledge of the exact scaling coefficients of the CMB in the observations). By contrast, blind component separation methods are designed from first principles to estimate the scaling coefficients from the data, and in principle should not suffer much from calibration uncertainties. The rest of this paper is organized as follows. In section \ref{sec:ilcica} we describe the ILC and ICA component separation methods. We describe our methodology for comparing the methods in section \ref{sec:method}. In section \ref{sec:results} we present the results of our analysis, followed by our conclusions in section \ref{sec:conclusion}. We also provide a detailed calculation of the effect of calibration errors on the ILC in the appendix \ref{app:ilcweights}. \section{ILC and ICA}\label{sec:ilcica} In the following we assume that the available data (maps $x_i(p)$ of observed sky) can be written as \begin{equation} x_i(p) = a_i s(p) + n_i(p), \end{equation} where $s(p)$ is the map of the component of interest (the CMB), $p$ indexes pixels in the map, and $n_i(p)$ is the contribution from foregrounds and instrumental noise to the map $x_i(p)$. The coefficients $a_i$ scale the relative amplitude of the CMB map in the different available observations. For observations in thermodynamic units, and perfect calibration, we have $\forall i$, $a_i = 1$. \subsection{The ILC} The philosophy behind the ILC is to find the linear combination of the available maps $x_i$ which has minimal variance while retaining unit response to the CMB map. This linear combination, $\sum_i w_i x_i(p)$, is then an estimate $\hat s(p)$ of the true CMB map $s(p)$. The ILC weights $w_i$ are found by solving the problem of minimizing ${\rm var}{\sum_i w_i x_i(p)}$ under the constraint $\sum_i w_i = 1$. In principle, this last constraint guarantees unit response to the CMB, as we have: \begin{eqnarray} \hat s(p) &=& \sum_i w_i x_i(p) \cr &=& s(p) + \sum_i w_i n_i(p). \end{eqnarray} In the presence of foregrounds, which induce correlated errors from channel to channel, the ILC weights adjust themselves so that the linear combination cancels out as much of the foregrounds as possible. The actual weights, however, result from a trade-off between canceling foregrounds and allowing errors due to instrumental noise in the final map. The constrained minimization problem can be solved in a straightforward manner using a Lagrange multiplier method to impose $\sum_i w_i = 1$. The resulting weights are found to be: \begin{equation} {\bf w} = \frac{{\widehat{{\rm R}}}^{-1} \, \boldsymbol{a }}{\boldsymbol{a }^t \, {\widehat{{\rm R}}}^{-1} \, \boldsymbol{a }}, \end{equation} where ${\widehat{{\rm R}}}$ is the empirical covariance matrix of the observations. Note that we have used bold font to denote vectors, and have omitted the reference to the pixel value. From here on, this notation will be used. The ILC estimator of the CMB map $s(p)$ can be written as: \begin{equation} \hat{s}_{\rm ILC} = {\bf w}^t \boldsymbol{x } = \frac{\boldsymbol{a }^t \, {\widehat{{\rm R}}}^{-1}}{\boldsymbol{a }^t \, {\widehat{{\rm R}}}^{-1} \, \boldsymbol{a }} \, \boldsymbol{x }. \label{eq:ILC} \end{equation} The ILC weights, obviously, depend upon the assumed scaling coefficients $a_i$ for the component of interest. It is then clear that an error in the assumed scalings changes the ILC performance, but by how much? As the ILC attempts to minimize the total variance of the output map, the constraint that $\sum w_i a_i = 1$ plays a critical role in guaranteeing that the linear combination does not adjust its coefficients to cancel the CMB as well as foregrounds. It is foreseeable, then, that calibration errors could, in some cases, impact the performance of ILC more severely than just a small overall calibration error on the final output map. \subsection{FastICA} There is a wide choice of possible ICA methods to extract the CMB from multifrequency observations. In this paper, we make use of the standard FastICA algorithm as described in \citet{hyvarinen99}, with a few minor changes: \begin{itemize} \item We subtract an estimate of the instrument noise covariance matrix from the empirical covariance matrix of the data. \item Instead of leaving the estimated signal as being unit variance, we set the CMB scaling to be such that the sum of the weights is equal to one, mirroring the ILC method to ensure unit response to the CMB. \end{itemize} FastICA is based on the general principle that a sum of two different independent probability distributions will always tend to be more Gaussian than either of the distributions are independently. We can thus extract $N$ independent sources from $N$ channels of data by forming the linear combination of the $N$ channels which maximizes the non-Gaussianity of the extracted sources. A measure of the non-Gaussianity of each source is performed using functions such as: \begin{equation} Y(x) \propto \left[E\left\{G(x)\right\} - E\left\{G(y)\right\}\right]^2. \end{equation} where $x$ is data that has unit variance, and $y$ is a random variable drawn from a unit-variance Gaussian distribution. Here $E\{\}$ is the expectation value of the data set or probability distribution enclosed and $G(x)$ is some non-linear function. Popular choices include a Gaussian, a polynomial, or the logarithm of the hyperbolic cosine. Which specific choice is best depends upon precisely how the distribution of $x$ differs from a Gaussian, though it is clear that for any choice of $G(x)$, $Y(x)$ will be zero if $x$ is Gaussian-distributed, and positive definite otherwise. In the present paper, we use $G(x) = x^4$. FastICA assumes a model of the data of the form : \begin{equation} \boldsymbol{x } = {\rm A}\boldsymbol{s } + \boldsymbol{n }, \label{eqn:datamodel} \end{equation} where now vector $\boldsymbol{s }$ comprises all `sources' (CMB + foregrounds), and $\boldsymbol{n }$ is instrumental noise only (for all channels). The objective of the method is to evaluate the mixing matrix ${\rm A}$, and then use this estimate to invert the linear system. In order to optimize estimation of the mixing matrix that determines the linear combination of $x$ which represents the individual sources, FastICA also performs a pre-whitening step. This pre-whitening step exploits the assumption of statistical independence to perform a linear transformation on the data, which sets its covariance matrix to the identity by multiplying the data by the inverse square root of its covariance. The mixing matrix then becomes a simple rotation matrix which, with its smaller number of degrees of freedom, is easier to estimate. For generating the pre-whitening matrix, we do not make direct use of the covariance matrix of the data, as with basic FastICA, but instead use the estimated covariance matrix of the signal as in \citet{maino02}. This can be understood simply by our modeling of the data (equation \ref{eqn:datamodel}). Given this data model, the covariance of the observations is: \begin{eqnarray} {\rm R}_x &=& \left<\left({\rm A}\boldsymbol{s } + \boldsymbol{n }\right)\left({\rm A}\boldsymbol{s } + \boldsymbol{n }\right)^t\right>\cr {\rm R}_x &=& {\rm A}{\rm R}_s{\rm A}^t + {\rm R}_n. \end{eqnarray} Here the correct covariance matrix to use to whiten the signal is ${\rm A}{\rm R}_s{\rm A}^t$, which we estimate as ${\rm R}_x - {\rm R}_n$. The channel-channel noise covariance ${\rm R}_n$ is taken as diagonal with the diagonal elements estimated from our knowledge of the per-pixel noise in each map combined with how much each map was smoothed. We have assumed that the signal and noise are uncorrelated in the above derivation. Having performed the pre-whitening, all extracted sources have unit variance and are uncorrelated. To determine the overall CMB scaling, we first determine which of the sources is the CMB, then use the ILC strategy of setting the sum of the CMB weights equal to one. This ensures that the level of the CMB in the output is, at least in the case of no calibration error, equal to the level of the CMB in the maps. \section{Method}\label{sec:method} We now turn to the investigation of the impact of calibration errors on component separation with ILC and FastICA. The approach of this investigation consists of generating simulated `observations', with varying calibration errors, noise levels, and frequency channels, and compare the performance of ILC and FastICA at recovering the CMB map. Performance is measured in several ways, based on the measurement of reconstruction errors of different types. Denoting as $s(p)$ the (beam-smoothed) CMB map used in the simulation, and as $\hat{s}(p)$ the CMB map obtained from processing the simulated data, the reconstruction error is $\hat{s}(p) - s(p)$. This reconstruction error arises from two terms. A multiplicative term (i.e. a global calibration error) and an additive term. We have $$ \hat{s}(p) = \alpha s(p) + c(p) $$ where $\alpha$ is the global calibration coefficient, and $c(p)$ the additive contamination by foregrounds and noise. Ideally, we aim at $\alpha = 1$ and $c(p) = 0$. In practice, in both ILC and ICA methods, the final map is reconstructed as a linear combination $\sum w_i x_i(p)$ of the input maps $x_i(p)$. Hence, for simulated data, one can compute easily $\alpha = \sum w_i a_i$ and $c(p) = \sum w_i n_i(p)$, where $n_i(p)$ are maps of the sum noise and foregrounds in channel $i$. The comparison of the variance of the reconstruction error, of the overall response $\alpha$, and of the contamination $c(p)$ for ILC and ICA gives insight on the relative performance of the two, and of the main origin of error, in presence of calibration uncertainties. \subsection{Simulations} In preparation for the forthcoming Planck space mission, simulations for the 9 Planck frequency channels, from 30 to 857 GHz, as described in the Planck 'Bluebook'\footnote{\tt \tiny {http://www.rssd.esa.int/SA/PLANCK/docs/Bluebook-ESA-SCI(2005)1\_V2.pdf}}, are made. We also consider simulations in the WMAP frequency channels, between 23 and 94 GHz. Sky simulations are performed using the Planck Sky Model (PSM) package, version 1.6.3\footnote{\tt \tiny {http://www.apc.univ-paris7.fr/APC\_CS/Recherche/Adamis/PSM/psky-en.php}} and using the Healpix pixelization. In the simulated observations, we introduce a small calibration error, so that each of the sky maps is multiplied by a calibration coefficient. We consider calibration errors $\delta a/a$ of 0.1, 0.2, 0.5, and 1\%, which implies calibration coefficients typically somewhere between 0.99 and 1.01.\footnote{The calibration error expected for Planck is less than 1\% up to the 353GHz channel, as given by the Planck 'Bluebook'} We work at the resolution of the lowest frequency channel in our simulations, i.e. 33 arcminute beams for Planck, and 54 arcminute beams for WMAP. Noise compatible with what is expected for the two instruments, for maps smoothed at the resolution of the lowest frequency channel, is added to the sky emission. We then separate components with both an ILC and with FastICA, and analyze and interpret the results. \subsubsection{Planck Sky Model} Sky maps are generated using a four-component model of galactic emission which includes free-free, synchrotron, thermal dust, and spinning dust diffuse components. We also add emission from several populations of compact sources, which comprise ultracompact galactic H-II regions, infra-red and radio sources (both galactic and extragalactic), a far infrared background emission, and thermal SZ effect from a simulated distribution of galaxy clusters. For our Planck simulations, maps are generated at 30GHz, 44GHz, 70GHz, 100GHz, 143GHz, 217GHz, 353GHz, 545GHz, and 857GHz, each at nside=1024. For WMAP simulations, maps are generated at 23GHz, 33GHz, 41GHz, 61GHz, and 94GHz, each at nside=512. Maps are simulated using Gaussian symmetric beams. Only temperature maps are generated. \subsubsection{Post-processing of PSM Outputs} Instrumental noise is added separately after the sky is simulated with the PSM. For Planck, we assume uniform sky coverage, with noise level corresponding to what is given in the Planck `Bluebook'. Since the FastICA and ILC methods require maps that are at the same resolution, we then smooth all maps to the resolution of the 30GHz channel, which has a Gaussian beam FWHM of 33'. As we use a relatively low resolution beam, all maps are set to nside=512 after smoothing. After adding noise and smoothing maps to the same resolution, we simulate the calibration error by drawing a zero-mean Gaussian random variable $x$ with RMS equal to the desired calibration error (e.g. $\sigma=0.002$ for $0.2\%$ error). We then multiply the map by $1 + x$. This is repeated for each frequency channel, with the same calibration RMS error but a different realization of $x$ for each. While it makes no difference whether the calibration error simulation is performed before or after smoothing, we note that it is correct to add the calibration error after the noise, as the overall estimated noise level also depends upon the calibration of the instrument. As we make use of the estimated noise covariance between the channels, the estimated noise level after smoothing is also computed here. \subsection{Masking} For better performance of the FastICA or ILC component separation algorithms, it is safer to mask out particularly bright sources as well as those with strongly-varying spectral properties. The mask is determined making use of a simple magnitude-based algorithm. First, we produce a theoretical estimate of the expected CMB RMS based upon the WMAP power spectrum. We then generate a mask that removes all pixels which contain a value larger than four times the CMB RMS. For our maps, the mask used is a union of the masks computed as above from the 70GHz and 100GHz channels. We make use of the mask as generated from the first realization with no calibration error, and do not recompute the mask between runs. The resultant mask is shown in fig. \ref{fig:mask}. It is possible that we could obtain better component separation performance through more precise masking, but this is not expected to have any impact on the overall results of the present paper. The study could have been performed with any arbitrary mask, as long as the average CMB to foreground ratio is not changed significantly. \begin{figure} \includegraphics[width=85mm]{mask.png} \caption{Mask that removes the brightest pixels from the 70GHz and 100GHz channels.} \label{fig:mask} \end{figure} \subsection{Monte Carlo} In order to investigate both the average of the reconstruction error and its dispersion, we individually execute each of the above steps many times for each chosen set of parameters, the exact number depending upon the test. Summary statistics are then computed across the runs. When comparing different component separation techniques, the exact same set of realizations are used. Different choices of the calibration error level also make use of the same input sky maps. For these simulations, CMB and noise are generated from their statistical properties separately in each simulation. The CMB is a Gaussian realization assuming, for all simulations, the same power spectrum, compatible with WMAP best fit model, but new phases for each realization. Similarly, all realizations of noise are independent. Other components are not fully independent from realization to realization. Galactic components, the model of which is heavily constrained by WMAP observations, do not change much. The Sunyaev Zel'dovich map is fixed (i.e. the same SZ template map is used in all simulations). A fraction of point sources remain similar (they are based on the positions of real sources) although their spectral emission law depends on the realization. An additional population of point sources, generated to correct for the sky coverage of point source surveys to homogenize the point source distribution, is generated independently for each sky realization. \section{Results}\label{sec:results} In this section we present both analytical and numerical results obtained after including the presence of calibration errors in the ILC and ICA component separation methods. The success or the failure of a method will be evaluated as follows. We construct the output CMB map estimates by ILC or ICA as well as the residual map, which is the difference map between the estimated output CMB map and the simulated input CMB map. We compute the RMS value of each of these maps and compare them. We also evaluate both the multiplicative factor $\alpha$ and the additive error $c(p)$ (introduced in section \ref{sec:method}), characterizing the reconstruction errors. \subsection{Compared reconstruction error} The average root mean square of the reconstruction error $\hat s - s$, over all simulations for the Planck experiment, is computed in 10 bands of varying galactic latitude. The relative error, $r = E\left(s - \hat{s}\right) / E\left(s\right)$, for both FastICA and the ILC, is plotted in figures \ref{fig:rel_err_ica} and \ref{fig:rel_err_ilc}. As we expected, FastICA is almost completely unaffected by calibration errors. Because no assumption on the relative calibration is used, the overall calibration error just adds some small extra variance on the overall level of the extracted CMB. \begin{figure \includegraphics[width=90mm]{rel_err_ica.png} \caption{Plot of the relative error of FastICA as a function of galactic latitude. Generated using 128 simulations for each case. As expected, the relative error of FastICA has very little dependence upon the calibration error.} \label{fig:rel_err_ica} \end{figure} \begin{figure \includegraphics[width=90mm]{rel_err_ilc.png} \caption{Plot of the relative error of ILC as a function of galactic latitude. Generated using 128 simulations for each case. Unlike FastICA, ILC shows tremendous sensitivity to the calibration error, causing a noticeable reduction in the quality of the extraction of the CMB even at the optimistic 0.1\% calibration error level.} \label{fig:rel_err_ilc} \end{figure} ILC, however, is not so well behaved as FastICA. While ILC is somewhat better than FastICA at extracting the CMB when calibration is perfect, it quickly becomes worse as calibration errors of increasing magnitude are applied. Fig. \ref{fig:ilc_bad_example} shows the output of a particular realization at 1\% calibration error where ILC performed especially poorly, compared with the input CMB plotted on the same scale. The variance of the ILC output is much lower than the true CMB, and CMB features are strongly suppressed. As ILC attempts to find the minimum-variance output, it finds that with calibration errors it is possible to partially cancel the CMB to get the lowest possible variance output. \begin{figure \includegraphics[width=85mm]{inp_example.jpg} \includegraphics[width=85mm]{ilc_example.jpg} \caption{Input CMB and ILC-estimated CMB plotted on a 0.2mK scale for one realization at 1\% calibration error with particularly bad output (relative error near 1.0). Note that the variance of the ILC output is far below the input CMB, indicating that the input CMB was largely canceled.} \label{fig:ilc_bad_example} \end{figure} \subsection{Interpretation of the ILC failure} The impact of calibration errors on ILC weights, and on the output CMB map, is analytically explored in Appendix \ref{app:ilcweights}. Here we highlight that the signal-to-noise ratio plays a decisive role on this impact. The ILC method is a linear combination of the maps observed in different frequency channels, $\hat{s} = \sum_i w_i x_i.$ The ILC combination has minimum variance under the constraint \begin{equation} \sum_i w_i a_i = 1. \end{equation} The constraint in principle guarantees the CMB conservation, otherwise $w_i = 0$ for all $i$ would minimize the variance. If the calibration $a_i$ is wrong then the CMB conservation is no longer guaranteed. In some cases, when the signal-to-noise ratio is large enough, it can be dramatic for the CMB extraction (see section \ref{subsec:sovern}). As discussed above, the reconstruction error arises from two terms. A multiplicative term, i.e. a global calibration error term, and an additive contamination term. We can write the estimated CMB map as a function of the true CMB map as: $$ \hat{s}(p) = \alpha s(p) + c(p), $$ where $\alpha$ is the global calibration coefficient, and $c(p)$ the contamination by foregrounds and noise. Figure \ref{fig:cmb_contrib} shows this parameter $\alpha$ versus the input map calibration error. \begin{figure \includegraphics[width=90mm]{cmb_contrib.png} \caption{This figure shows the overall calibration coefficient of the output CMB map, computed from the known calibration errors in the inputs and the weights applied to obtain the output. The error bars represent the RMS of $\alpha$ among the 128 realizations. For FastICA, the calibration coefficient is centered very near one, with an RMS of approximately 1.5 times the map calibration error. By contrast, ILC has a CMB calibration that is perfect if the map calibration is perfect, but this quickly turns into a significant bias with large uncertainties as to the final calibration value.} \label{fig:cmb_contrib} \end{figure} The presence of calibration errors $\delta_{a_i}$ modifies the calibration coefficients in each channel as $a_i\rightarrow a_i+\delta_{a_i}$, where $\delta_{a_i}\ll a_i$. We may explicitely expand the multiplicative error and the additive error in terms of the calibration errors $\delta_{a_i}$ and the ILC weights $w_i$: \begin{eqnarray} \hat{s}(p) & = & \sum_i w_i x_i(p)\cr & = & \sum_i w_i \left(a_i+\delta_{a_i}\right)s(p)+\sum_i w_i n_i(p)\cr & = & \left(1+\sum_i w_i\delta_{a_i}\right)s(p)+\sum_i w_i n_i(p), \end{eqnarray} where the ILC weights $w_i$ satisfy the constraint $\sum_i w_ia_i = 1$. Thus we have \begin{eqnarray} c(p) & = & \sum_i w_i n_i(p),\\ \alpha & = & 1+\sum_i w_i\delta_{a_i}. \end{eqnarray} The additive error term $c(p)=\sum_i w_i n_i(p)$ is responsible for a bias in the CMB estimation because of foreground and noise contaminations even in the absence of calibration errors. Delabrouille et al. (2009) have explored the impact of this term on the ILC estimation of the CMB and have found that, in addition to the standard reconstruction error due to foreground and noise contamination, there is a bias, $E\left(s\cdot\left(\hat{s}-s\right)\right)$, due to the estimation of second order statistics on samples of finite size. Both errors contribute to the variance of the output CMB map as $$ E\left(\hat{s}^2\right) = E\left(s^2\right)+E\left(\left(\hat{s}-s\right)^2\right)+2E\left(s\cdot\left(\hat{s}-s\right)\right). $$ The multiplicative error term $\alpha = 1+\sum_i w_i\delta_i$ becomes non trivial in presence of calibration errors because the ILC weights $w_i$, as derived in equation (\ref{qqq:weights}) of Appendix \ref{app:ilcweights}, do not depend only on the calibration errors $\delta_{a_i}$ but also on the signal-to-noise ratio $\sigma^2{R_n^{-1}}_{ij}$, where $\sigma^2 = E(s^2)$ and $(R_n)_{ij}=E(n_i n_j)$ denote respectively the variance of the CMB signal and the covariance matrix of the noise (including foregrounds). \subsection{Importance of the signal to noise ratio}\label{subsec:sovern} \label{sec:snr} From the exact expression (\ref{qqq:weights}) of the weights we may write the multiplicative factor $\alpha = 1+\boldsymbol{w }^t\boldsymbol{\delta }_a$ as \begin{eqnarray}\label{qqq:alpha} \alpha & = & \frac{\boldsymbol{a }^t{\rm R}_n^{-1}\boldsymbol{a }+\boldsymbol{a }^t{\rm R}_n^{-1}\boldsymbol{\delta }_a}{\boldsymbol{a }^t{\rm R}_n^{-1}\boldsymbol{a }+\sigma^2\left[(\boldsymbol{a }^t{\rm R}_n^{-1}\boldsymbol{a })(\boldsymbol{\delta }_a^t{\rm R}_n^{-1}\boldsymbol{\delta }_a)-(\boldsymbol{a }^t{\rm R}_n^{-1}\boldsymbol{\delta }_a)^2\right]}.\qquad \end{eqnarray} The immediate consequence of equation (\ref{qqq:alpha}) is the existence of two regimes. If the signal-to-noise ratio is small enough compared to the inverse of the calibration error, typically, $$ \mbox{if }\quad\sigma^2\boldsymbol{\delta }_a^t{\rm R}_n^{-1}\boldsymbol{\delta }_a\ll 1\quad\mbox{then}\quad\alpha \approx 1+\mathcal{O}(\vert\boldsymbol{\delta }_a\vert/\vert \boldsymbol{a }\vert), $$ because the expression proportional to $\sigma^2$ becomes negligible in (\ref{qqq:alpha}). So we tend to recover the almost perfect CMB reconstruction close to the case of no calibration error ($\boldsymbol{\delta }_a = 0$). If the signal-to-noise ratio becomes large enough then the reconstruction of the CMB signal may be dramatically damaged. This is the main result of this paper. Typically, $$ \mbox{if }\quad\sigma^2\boldsymbol{\delta }_a^t{\rm R}_n^{-1}\boldsymbol{\delta }_a\gg 1\quad\mbox{then}\quad\alpha \approx 0, $$ the multiplicative factor goes to zero since the expression proportional to $\sigma^2$ dominates all the other terms in (\ref{qqq:alpha}), in which case the ILC estimation completely ``kills'' the expected CMB signal, $\hat{s}(p) \approx c(p)$. Let us complete the discussion by relating the first and second moments of the output CMB $\hat{s}$ and the reconstruction error $\hat{s}-s$ to the multiplicative and the additive errors. Considering that the CMB and the noise (including foregrounds) are independent random signals, $E(n_is) = 0$, and assuming that $E(n_i) = 0$, we get \begin{eqnarray} \label{qqq:moments} E(\hat{s}-s) & = &(\alpha-1)E(s),\cr E\left(s\cdot\left(\hat{s}-s\right)\right) & = & (\alpha-1)E(s^2),\cr E\left(\left(\hat{s}-s\right)^2 \right) & = & (\alpha-1)^2E(s^2)+E\left(c(p)^2\right), \end{eqnarray} where $E\left(c(p)^2\right) = \boldsymbol{w }^t R_n \boldsymbol{w }$. The detailed expression of these moments in terms of the calibration errors and the signal-to-noise ratio is derived in Appendix \ref{app:ilcweights}. From (\ref{qqq:moments}), once again, if the signal-to-noise is large enough then the reconstruction of the CMB is biased since $\alpha$ moves away from one to reach zero. \subsection{A simple example} Here we show a schematic description of the process using a simple example. We consider a two-channel case: \begin{eqnarray} x_1 &=& 0.99 s + n_1\cr x_2 &=& s + n_2. \end{eqnarray} Here $s$ is the CMB, $x_i$ is the $i^{th}$ channel of the data, and $n_i$ is the foregrounds plus instrument noise. The calibration coefficients are equal to one and a calibration error of one percent has been considered in the first channel. If the signal-to-noise ratio is large enough, \emph{e.g} $n_i/s \ll 0.99$, then the noise is negligible in the observed maps \begin{eqnarray} x_1 &\approx& 0.99 s \cr x_2 &\approx& s. \end{eqnarray} The ILC estimate of the CMB thus reduces in that case to \begin{eqnarray} \hat{s} & \approx & 100x_1-99 x_2, \end{eqnarray} where the weights satisfy the constraint $100-99=1$, which would guarantee the CMB conservation if the calibration was correctly estimated. Consequently, the CMB estimate is of minimum variance since $E(\hat{s}^2) \approx 0$, but of course completely removes the expected input CMB, rendering the ILC totally irrelevant. We may explain the process as follows. In presence of a calibration error in one channel the ILC algorithm minimizes the variance of \begin{equation} \hat{s} = (0.99 w_1 + w_2) s + w_1n_1 + w_2n_2. \end{equation} We can contrast this what we would get without calibration errors, \begin{equation} \hat{s} = (w_1 + w_2) s + w_1n_1 + w_2n_2. \end{equation} With the constraint that $w_1 + w_2 = 1$, the contribution of the CMB signal to $\hat{s}$ is always $s$. This indicates that the weights will take whatever values they need to take to minimize the contribution of the noise. However, in the presence of calibration errors, it becomes possible for the contribution of $s$ to $\hat{s}$ to vary depending upon the choice of weights, indicating that a minimization of the variance of $\hat{s}$ will introduce some competition between minimizing $(0.99 w_1 + w_2) s$ and minimizing $w_1n_1 + w_2n_2$. For the following weights \begin{eqnarray} w_1 &=& 100,\cr w_2 &=& -99, \end{eqnarray} the contribution of the CMB to $\hat{s}$ will be identically zero. This is what the ILC produces in the limit of the signal-to-noise ratio becoming very large with respect to the calibration error. In the opposite limit, that of small signal-to-noise ratio, it is the minimization of the second term, $w_1n_1 + w_2n_2$, that drives the minimization of $\hat{s}$, which mimics the behavior under the assumption of no calibration error. \subsection{The case of Planck} In table \ref{tab:planck} we present the results of ten simulations of the sky with an ILC estimation of the CMB in presence of $0.1~\%$, $0.5~\%$ and $1~\%$ calibration errors for the Planck experiment ($9$ frequency channels). \begin{table*}[htbp] \begin{center} \begin{tabular}{|p{6.5cm}|*{3}{c|}} \hline \emph{Planck} & $1~\%$ & $0.5~\%$ & $0.1~\%$ \\ \hline \hline \bfseries mult. factor $\boldsymbol{\alpha}$ & $0.66500$ & $0.85258$ & $0.99237$ \\ \hline \bfseries $\mbox{add. error }\boldsymbol{E(c(p))}\mbox{ (mK)}$ & $6.208$e$-2$ & $3.455$e$-2$ & $1.845$e$-2$\\ \hline $E(c(p)^2)$ (${\mbox{mK}}^2$) & $1.231$e$-2$ & $3.46$e$-3$ & $5.6$e$-4$\\ \hline \hline $E((\hat{s}-s)^2)$ (${\mbox{mK}}^2$) & $4.26$e$-3$ & $1.91$e$-3$ & $5.5$e$-4$\\ \hline $E(s\cdot(\hat{s}-s))$ (${\mbox{mK}}^2$) & $-3.26$e$-3$ & $-1.18$e$-3$ & $-1.3$e$-4$\\ \hline $E(s^2)$ (${\mbox{mK}}^2$) & $7.42$e$-3$ & $7.42$e$-3$ & $7.42$e$-3$\\ \hline $E(\hat{s}^2)$ (${\mbox{mK}}^2$) & $5.16$e$-3$ & $6.99$e$-3$ & $7.71$e$-3$\\ \hline \end{tabular} \end{center} \caption{ILC reconstruction errors for Planck in presence of $1~\%$, $0.5~\%$ and $0.1~\%$ calibration errors.} \label{tab:planck} \end{table*} For $1~\%$ we observe a significant bias affecting the CMB reconstruction by ILC. The multiplicative factor $\alpha = 0.665$ (table \ref{tab:planck}) indicates that the CMB estimate eliminates roughly $33~\%$ of the input CMB. The high sensitivity of Planck means a large signal-to-noise ratio, comparable to the inverse of the calibration error, which leads to a poorly extraction of the CMB by ILC, as expected from the formula (\ref{qqq:alpha}). For $0.5~\%$ calibration errors, $15~\%$ of CMB is eliminated by the ILC estimation. Finally for $0.1~\%$ calibration errors, $1~\%$ of CMB is eliminated by the ILC estimation, which is nevertheless ten times the calibration error -- and clearly not acceptable for precision cosmology with Planck. \subsection{The case of WMAP} In table \ref{tab:wmap} we present the results of ten simulations of the sky with an ILC estimation of the CMB in presence of $1~\%$ calibration errors for the WMAP experiment. We observe a negligible bias affecting the CMB reconstruction by ILC. The multiplicative factor $\alpha \approx 0.99$ (table \ref{tab:wmap}) indicates that the percentage of eliminated input CMB by ILC is for WMAP of order of the calibration error, \emph{i.e} $1~\%$, as expected from formula (\ref{qqq:alpha}) when the signal-to-noise ratio is small enough. The sensitivity of WMAP is small enough to render the ILC estimation of the CMB insensitive to calibration errors. \begin{table}[htbp] \begin{center} \begin{tabular}{|p{4.5cm}|*{3}{c|}} \hline \emph{WMAP} & $1~\%$ \\ \hline \hline \bfseries mult. factor $\boldsymbol{\alpha}$ & $0.98709$ \\ \hline \bfseries $\mbox{add. error }\boldsymbol{E(c(p))}\mbox{ (mK)}$ & $1.129$e$-2$ \\ \hline $E(c(p)^2)$ (${\mbox{mK}}^2$) & $6.9$e$-4$ \\ \hline \hline $E((\hat{s}-s)^2)$ (${\mbox{mK}}^2$) & $6.5$e$-4$ \\ \hline $E(s\cdot(\hat{s}-s))$ (${\mbox{mK}}^2$) & $-2.4$e$-4$ \\ \hline $E(s^2)$ (${\mbox{mK}}^2$) & $5.15$e$-3$ \\ \hline $E(\hat{s}^2)$ (${\mbox{mK}}^2$) & $5.33$e$-3$ \\ \hline \end{tabular} \end{center} \caption{ILC reconstruction errors for WMAP in presence of $1~\%$ calibration errors.} \label{tab:wmap} \end{table} \subsection{Actual WMAP ILC} The above result for WMAP was obtained assuming that the ILC is performed on the masked sky of figure \ref{fig:mask}. In fact, ILC weights used by the WMAP team have been computed in a different way, by subdividing the sky into twelve regions. Since the value of their weights are known, as well as the mean calibration error, we may easily evaluate the error of the reconstruction performed by the WMAP team. The order of magnitude of the ILC weights $w_i^{\rm WMAP}$ computed by the WMAP team is comprised between $10^{-2}$ and $3$ \citep{hinshaw07} and the relative calibration errors have been estimated by the WMAP team to be of the order of $\delta_{a,i}\sim 0.2 \%$. In the subdivision of the sky by the WMAP team the region zero \citep{hinshaw07} corresponds to the part of the sky outside the galaxy and thus dominated by the CMB signal. A priori, since the signal-to-noise ratio is the highest in that high galactic latitude region, one might expect the effect of the calibration errors to be large. This is not the case, however. We may estimate the maximum percentage of eliminated CMB in the region zero as follows: \begin{eqnarray} \left\vert 1-\alpha \right\vert & = &\left\vert\sum_i w_i^{\rm WMAP}\delta_{a_i}\right\vert\cr & \leq & 0.002\sum_i \left\vert w_i^{\rm WMAP}\right\vert \cr & \leq & 7\cdot 10^{-3} = 0.7\%, \end{eqnarray} where $w_i^{\rm WMAP}$ are the ILC weights computed by the WMAP team in the region zero \citep{hinshaw07}. Therefore, the maximum percentage of eliminated CMB has the order of magnitude of the calibration error, \emph{i.e} ${\rm few} \times 10^{-1}~\%$, which is small.\footnote{It should be noticed that this bound is a rough estimation since we do not have access to the real value of the calibration error for each frequency channel.} So the multiplicative factor for the actual WMAP ILC in presence of $0.2~\%$ calibration errors is close to one, with a minor loss of CMB power: $$ \alpha \geq 0.993. $$ Interestingly, the ILC weights used at high galactic latitude by the WMAP team \citep{hinshaw07} have been computed in a low galactic latitude region of the sky, where the signal-to-noise ratio is sufficiently small. This certainly explains why the ILC weights are close to those expected with no calibration errors and why the multiplicative factor is close to one. Therefore the calibration uncertainties do not have a strong impact on the ILC weights computed in the WMAP third year data release. The bias due to calibration errors is negligible. The price paid for this, as emphasized by \citet{delabrouille09}, is that at high galactic latitude the WMAP weights are chosen to cancel galactic foregrounds rather than instrumental noise, a sub--optimal choice away from the galaxy, particularly for small scales. \subsection{Other ILC performed on WMAP} Several authors have used a version of the ILC to analyze WMAP data. The present paper warns the users of the corresponding data sets that in presence of calibration errors, some CMB power may be lost in the maps obtained. Further investigation would be needed to evaluate the exact impact for each individual recovered CMB map. \subsection{De--biasing} A natural question to ask is whether, since the effect of calibration errors is to introduce a loss of CMB power, it would not be possible to correct from this effect and `recalibrate' a posteriori in some way. First of all, this can not be the optimal solution, as the noise contribution to the total error would be increased accordingly. The proper solution would be to get the right calibration beforehand. As we can see from figure \ref{fig:cmb_contrib} that the variance of $\alpha$ seems to be of order $1-\alpha$. This indicates that the maximum improvement on the level of the CMB is to reduce the expectation value of $|1-\alpha|$ by around a factor of two. As $1 - \alpha$ becomes very large very quickly, this will not help when the calibration is not already very good compared to the signal to noise ratio. Finally, even the knowledge of the expectation value of $\alpha$ is not very easy to get. Simulations give an estimate of its amplitude, but the actual value may depend on details, for which simulations are not guaranteed to be representative. Hence, we leave this question open for further investigations. \subsection{Impact of the number of channels} Tests performed varying the number of channels used to perform the ILC with Planck data show that ILC does better with calibration errors if fewer channels are used. To add new data, but end up with worse estimation of the desired products, indicates that the new data is not being used effectively, to say the least. The reason for this degradation of the performance of the ILC when more channels are added is easy to understand. As discussed in section \ref{sec:snr}, the ILC can erroneously cancel out part of the CMB if the signal to noise ratio is larger than the inverse of the calibration error, i.e. if \begin{equation} \sigma^2\boldsymbol{\delta }_a^t{\rm R}_n^{-1}\boldsymbol{\delta }_a\gg 1. \label{eq:snr-condition} \end{equation} As ${\rm R}_n^{-1}$ and ${\rm R}_n$ are symmetric matrices, they can be diagonalized, and we can write: $${\rm R}_n^{-1} = {\rm O}^t {\rm D}_n^{-1} {\rm O} ,$$ where ${\rm O}$ is an orthonormal matrix, and ${\rm D}_n^{-1}$ a diagonal matrix. The condition of eq. (\ref{eq:snr-condition}) then becomes: \begin{equation} \sigma^2 ({\rm O} \boldsymbol{\delta }_a)^t {\rm D}_n^{-1} ({\rm O} \boldsymbol{\delta }_a) \gg 1. \end{equation} Matrix ${\rm O}$ preserves the norm, and thus elements of ${\rm O} \boldsymbol{\delta }_a$ are of the same order as those of $\boldsymbol{\delta }_a$. It then suffices that one of the eigenvalues of ${\rm D}_n$ be small for $\sigma^2\boldsymbol{\delta }_a^t{\rm R}_n^{-1}\boldsymbol{\delta }_a$ to be large, causing the CMB power loss discussed in this paper. Now recalling that ${\rm R}_n$ is the covariance matrix of noise + foregrounds, it is easy to understand why more channels cause more problems with Planck. Foregrounds are significantly brighter than the noise, and comparable in amplitude to the CMB over a fraction of the sky. If they span a space of dimension equal or greater than the number of channels, matrix ${\rm D}_n$ will have no small eigenvalue. If on the other hand they span a space of dimension less than the number of channels, matrix ${\rm D}_n$ will have at least one small eigenvalue, generating the `CMB loss' problem. Physically, this is understood in the following way: if there are few channels, the minimization of the variance of the ILC linear combination will be achieved by canceling foregrounds primarily. If however there are additional channels which are not needed to cancel out the foregrounds, the extra channels leave more freedom for the ILC weights to adjust themselves so as to cancel part of the CMB as well. \section{Conclusion}\label{sec:conclusion} The primary conclusion of our work is that some care is required for performing component separation in presence of calibration errors, in particular for sensitive multichannel instruments such as Planck. We have shown that two different component separation algorithms, FastICA and ILC, behave very differently in the presence of calibration errors. FastICA is completely unaffected, while ILC can become biased by a significant amount with even small calibration errors. We propose that those attempting to make use of these or other component separation techniques pay close attention to how calibration errors affect their results. Some techniques will doubtlessly be completely unaffected, as FastICA was, while others may be very sensitive like ILC. We also note that due to the fact that ILC in the presence of sufficient calibration errors biases the variance of the CMB low, and because we have a lower limit upon the variance of the CMB from WMAP, through its measurement of the CMB power spectrum up to about $\ell=900$, the variance of the ILC output may prove a useful diagnostic test if the calibration of Planck was performed well. The ability to use this as a cross-check on calibration also indicates that for a Planck-style mission we expect to recover, at a minimum, around 0.1\%-0.2\% relative calibration error. The reasoning for this is that if the calibration error is worse, then ILC will produce a CMB map that is of lower-variance than a similar map from WMAP, which, in turn, tells us that the calibration wasn't very good. If we have information that the calibration wasn't as good as it could have been, then it is reasonable to expect that it is possible to improve said calibration. Note that even though FastICA is not biased where ILC is, it is not clear that FastICA is better. ILC does seem to produce lower errors in extracting the CMB, as seen in figures \ref{fig:rel_err_ica} and \ref{fig:rel_err_ilc}. The biasing is troubling, but ILC retains lower extraction error up to somewhere between 0.1\% and 0.2\% calibration error, at least at high galactic latitudes. If the calibration error is good enough, then we still expect ILC to remain a very useful method for extracting the CMB. \section*{Acknowledgments} \thanks{ We would like to thank Carlo Baccigalupi, Jean-Fran\c{c}ois Cardoso, Maude Le Jeune, and Radek Stompor for useful conversations related to this work.}
1,108,101,565,856
arxiv
\section{Introduction} Recent investigations have shown that a fourth generation is not ruled out by the precision electroweak data if it is heavy with masses in the few hundred GeV range (For recent works see \cite{Kribs:2007nz,Hung:2007ak,Holdom,Novikov:2002tk,Dubicki:2003am,Murdock:2008rx,Cakir:2008su} and for early works see \cite{Hewett:1986uu,Barger:1989dk,Frampton:1999xi}). These investigations have typically assumed that the fourth generation is a sequential generation with $V-A$ type interactions. However, an intriguing possibility exists that the new generation could be a mirror generation with $V+A$ interactions. Mirror generations do arise in unified models of fundamental interactions\cite{grs,Georgi:1979md,Wilczek:1981iz,Babu:2002ti}, and thus it is natural that one consider the existence of a mirror generation. Normally one assumes the so called survival hypothesis\cite{Georgi:1979md} where with $n_f$ number of ordinary families and $n_{mf}$ number of mirror families, only $n_f-n_{mf}$ (for $n_f>n_{mf}$) remain light, and the remainder acquire GUT or string scale size masses. However, this need not always be the case. Indeed there are many escape mechanisms where residual symmetries in breaking at the string scale or GUT scale will keep some mirror families light while others become superheavy \cite{Bagger:1984rk,Senjanovic:1984rw}. Mixings between ordinary families and mirrors can arise from non-rernormalizable interactions after spontaneous breaking (see, e.g., \cite{Senjanovic:1984rw,Chang:1985jd}). Additional work on model building using mirrors can be found in \cite{Bars:1980mb,delAguila:1984qs,Maalampi:1988va,mirrors,Adler:2002yg,Chavez:2006he} and further implications of mirrors are explored in \cite{Nandi:1981qr,Langacker:1988ur,Choudhury:2001hs,Csikor:1994jg,Montvay:1997zq,Triantaphyllou:1999uh}.\\ In this work we make the specific assumption that there is indeed a light mirror generation with masses below the TeV scale which would be accessible at the LHC. The assumption of a full mirror generation leaves the theory anomaly free. Essentially all of the analyses valid for a sequential fourth generation regarding consistency with the precision electroweak data and other constraints should be valid for a mirror generation and we assume this to be the case. The analysis we present here differs from previous works in many respects. First we propose an extension of the minimal supersymmetric standard model with a full mirror generation which is light (mirMSSM), i.e., with masses below the TeV scale which will be accessible at the LHC. Such an extension is not considered in any of the previous works. Indeed most of the previous analyses are not in supersymmetric frameworks. Second we assume that the mixings of the mirror generation occur mostly with the third generation, and are negligible with the first two generations if they occur at all. With this assumption, the $V-A$ structure of the weak interactions for the first two generations remains intact, while the third generation can develop a small $V+A$ component. Current data on the third generation do not necessarily rule out this possibility. \\ If a mirror generation exists, it would be discovered at the LHC with the same amount of luminosity as for the a sequential fourth generation which is estimated to be $50 fb^{-1}$. A mirror generation will lead to interesting and even dramatic multilepton and jets signatures which can discriminate between a mirror generation and a sequential fourth generation. Further, tests of the mirror generation can come from the decay of the heavy Higgs and via measurements of the forward -backward asymmetry. Another effect of the mixings of the mirror generation with the third generation is on magnetic moments. We analyze these in the leptonic sector in detail and show that the tau neutrino magnetic moment is enhanced by several orders of magnitude beyond what one has in the Standard Model. We note in passing that the term mirror has also been used in an entirely different context of mirror worlds\cite{okun,Mohapatra:2005ng} where one has mirror matter with their own mirror gauge group. The analysis here has no relationship with those theories. \\ The outline of the rest of the paper is as follows. In Sec.(2) we present an extension of the minimal supersymmetric standard model (MSSM) to include a fourth generation which we assume is a mirror generation and allow for a mixing of this generation with the 3rd generation. Here the interactions in the charged and neutral current sectors are worked out including the supersymmetric interactions involving the mirrors, the chargions and the neutralinos. Further details of mixing and interactions are given in Appendix A. An analysis of the $\tau$ neutrino magnetic moment is given in Sec.(3). Here contributions arise from exchanges of the leptons from the third generation and from the mirror generation, and also from the exchanges of the sleptons and mirror sleptons. An analysis of the $\tau$-lepton anomalous magnetic moment when mixings with the mirror family are allowed is given in Sec.(4) again including exchanges from the 3rd generation leptons and sleptons and from the mirror leptons and mirror sleptons. A discussion of the constraints on a mirror generation and a quantitative analysis of the sizes is given in Sec.(5) in the framework of an extended supergravity unified model\cite{msugra} which includes the mirror sector. When compared with the magnetic moment analyses in MSSM with or without CP violation\cite{susyg2,cpg2,Ibrahim:2007fb} one finds that the tau neutrino magnetic moment can be orders of magnitude larger than in the Standard Model while the magnetic moment of the tau lies within experimental bounds. A qualitative analysis of the signatures of the mirror generation at the LHC is given in Sec.(6). Here it is shown that some characteristic signatures arise, such as dominance of $\tau$s in the decay patterns of the mirror leptons which should allow one to discriminate this model from other supersymmetric models. Further, we discuss how one may distinguish a mirror generation from a sequential fourth generation. Here aside from the leptonic signatures, the decay of the heavy Higgs bosons, and the analysis of the forward-backward asymmetry would allow one to discriminate a mirror generation from a sequential fourth generation. Further details of the decay of heavy Higgs to mirror fermions are given in Appendix B. Conclusions are given in Sec.(7). \section{Extension of MSSM with a Mirror Generation} The fourth generation which we assume to be mirror will in general mix with the other three generations. However, as is the case for the first three generations the mixings between the generations get smaller as the ratio of the masses get further apart. Thus, for example, $V_{ub}<< V_{us}$, and we expect a similar phenomenon for mixings involving the fourth (mirror) generation, i.e., we expect $V_{uB} <<V_{ub}$ where $B$ is the 4th (mirror) generation bottom quark. As an example, the mixing between the first and the second can be estimated by the Gatto-Sartori-Tonin-Oakes relation\cite{gsto} $V_{us}= \sqrt{m_d/m_s}$ which gives $V_{us}$ to be about $0.2$. The mixing of the first with the third can be very roughly estimated so that $V_{ub}=\sqrt{m_d/m_b}$ which gives about $.03$, i.e., a factor about 10 smaller than $V_{us}$\footnote{This is actually a significant over estimate since the most recent CKM fits give a value which is even smaller, i.e., $V_{ub}=(3.93\pm .36)\times 10^{-3}$\cite{pdg}.}. If we extend this rough estimate to the fourth generation one will have mixing between the first and the fourth as $V_{uB}= \sqrt{m_d/m_B} = .005 $(for $m_B=200$ GeV). Assuming similar mixings will hold in the leptonic sector one will have mixings between the first and the fourth as $ \sqrt{m_e/m_E} = .0016$ (for $M_E$=200 GeV) where $E$ is the 4th (mirror) generation lepton. More detailed analyses using error bars on electroweak data show that the constraints on the enlarged CKM matrix are more relaxed\cite{Kribs:2007nz} (see also Sec.V). Conversely it means that with the current limits on the mixing angles the effects of the 4th generation on the analysis of the electroweak data lie well within the error bars. Here the electroweak parameters which require special attention are the S, T, U variables where larger contributions from the 4th generation are possible, but still the data can be made compatible with a 4th generation. Returning to the mixing of the 4th generation with the first two one can easily check that small mixings of the type discussed above lead to negligible effect of the 4th generation on the phenomenology of the first two generations. For this reason we will make a simplifying assumption of neglecting the mixing effects of the fourth with the first two generations and consider below the mixing of just the third and the fourth. However, the following analysis can be straightforwardly extended to the full four generations by letting the generation index run from 1-4 keeping in mind that the 4th generation is a mirror generation. Thus under $SU(3)_C\times SU(2)_L \times U(1)_Y$ the leptons transform as follows \begin{eqnarray} \psi_L\equiv \left( \begin{array}{c} \nu_L\\ \tau_L \end{array}\right) \sim(1,2,- \frac{1}{2}), \tau^c_L\sim (1,1,1), \nu^c_L\sim (1,1,0), \end{eqnarray} where the last entry on the right hand side of each $\sim$ is the value of the hypercharge $Y$ defined so that $Q=T_3+ Y$. These leptons have $V-A$ interactions. Let us now consider mirror leptons which have $V+A$ interactions. Their quantum numbers are as follows \begin{eqnarray} \chi^c\equiv \left( \begin{array}{c} E_{\tau L}^c\\ N_L^c \end{array}\right) \sim(1,2,\frac{1}{2}), E_{\tau L}\sim (1,1,-1), N_L\sim (1,1,0). \end{eqnarray} The analogous relations for the quarks are \begin{eqnarray} q\equiv \left( \begin{array}{c} t_L\\ b_L \end{array}\right) \sim(3,2,\frac{1}{6}), t^c_L\sim (3^*,1,-\frac{2}{3}), b^c_L\sim (3^*,1,\frac{1}{3}), \end{eqnarray} and for the mirror quarks \begin{eqnarray} {Q}^c \equiv \left( \begin{array}{c} B^c_L\\ T^c_L \end{array}\right) \sim(3^*,2,-\frac{1}{6}), T_L\sim (3,1,\frac{2}{3}), B_L\sim (3^*,1, -\frac{1}{3}). \end{eqnarray} For the Higgs multiplets we have the MSSM Higgs doublets which give \begin{eqnarray} H_1\equiv \left(\begin{array}{c} H_1^1\\ H_1^2 \end{array}\right) \sim(1,2,-\frac{1}{2}), ~H_2\equiv \left(\begin{array}{c} H_2^1\\ H_2^2\end{array}\right) \sim(1,2,\frac{1}{2}). \end{eqnarray} We assume that the mirror generation escapes acquiring mass at the GUT scale and remains light down to the elctroweak scale where the superpotential of the model for the lepton part, may be written in the form \begin{eqnarray} W= \epsilon_{ij} [f_{1} \hat H_1^{i} \hat \psi_L ^{j}\hat \tau^c_L +f_{1}' \hat H_2^{j} \hat \psi_L ^{i} \hat \nu^c_L +f_{2} \hat H_1^{i} \hat \chi^c{^{j}}\hat N_{L} +f_{2}' \hat H_2^{j} \hat \chi^c{^{i}} \hat E_{\tau L}]\nonumber\\ + f_{3} \epsilon_{ij} \hat \chi^c{^{i}}\hat\psi_L^{j} + f_{4} \hat \tau^c_L \hat E_{\tau L} + f_{5} \hat \nu^c_L \hat N_{L}. \label{superpotential} \end{eqnarray} In the above we have assumed mixings between the third generation and the mirror generation. Such mixings can arise via non-renormalizable interactions\cite{Senjanovic:1984rw}. Consider, for example, a term such as $1/M_{Pl} \nu^c_LN_L \Phi_1\Phi_2$. If $\Phi_1$ and $\Phi_2$ develop VEVs of size $10^{9-10}$, a mixing term of the right size can be generated. To get the mass matrices of the leptons and the mirror leptons we replace the superfields in the superpotential by their component scalar fields. The relevant parts in the superpotential that produce the lepton and mirror lepton mass matrices are \begin{eqnarray} W=f_1 H_1^1 \tilde{\tau}_L \tilde{\tau}_R^* +f_1' H_2^2 \tilde{\nu}_{L} \tilde{\nu}^*_{R}+ f_2 H_1^1 \tilde{N}_R^* \tilde{N}_L+f_2' H_2^2 \tilde{E}^*_{\tau R} \tilde{E}_{\tau L}\nonumber\\ +f_3 \tilde{E}^*_{\tau R} \tilde{\tau}_L -f_3 \tilde{N}_R^* \tilde{\nu}_{L}+ f_4 \tilde{\tau}_R^* \tilde{E}_{\tau L} +f_5 \tilde{\nu}^*_{R} \tilde{N}_L \end{eqnarray} The mass terms for the lepton and their mirrors arise from the part of the lagrangian \begin{equation} {\cal{L}}=-\frac{1}{2}\frac{\partial ^2 W}{\partial{A_i}\partial{A_j}}\psi_ i \psi_ j+H.c. \end{equation} where $\psi$ and $A$ stand for generic two-component fermion and scalar fields. After spontaneous breaking of the electroweak symmetry, ($<H_1^1>=v_1/\sqrt{2} $ and $<H_2^2>=v_2/\sqrt{2}$), we have the following set of mass terms written in 4-spinors for the fermionic sector \begin{eqnarray} -{\cal L}_m = \left(\begin{array}{c}\bar \tau_R ~ \bar E_{\tau R} \end{array}\right) \left(\begin{array}{c} f_1 v_1/\sqrt{2} ~ f_4\\ f_3 ~ f_2' v_2/\sqrt{2}\end{array}\right) \left(\begin{array}{c} \tau_L\\ E_{\tau L}\end{array}\right) + \left(\begin{array}{c}\bar \nu_R ~ \bar N_R\end{array}\right) \left(\begin{array}{c} f'_1 v_2/\sqrt{2} ~ f_5\\ -f_3 ~ f_2 v_1/\sqrt{2}\end{array}\right) \left(\begin{array}{c} \nu_L\\ N_L\end{array}\right) + H.c. \end{eqnarray} Here the mass matrices are not Hermitian and one needs to use bi-unitrary transformations to diagonalize them. Thus we write the linear transformations \begin{eqnarray} \left(\begin{array}{c}\tau_R\\ E_{\tau R}\end{array}\right)=D^{\tau}_R \left(\begin{array}{c}\tau_{1_R}\\ E_{\tau 2_R} \end{array}\right),\nonumber\\ \left(\begin{array}{c} \tau_L\\ E_{\tau L}\end{array}\right)=D^{\tau}_L \left(\begin{array}{c} \tau_{1_L}\\ E_{\tau 2_L}\end{array}\right), \end{eqnarray} such that \begin{equation} D^{\tau \dagger}_R \left(\begin{array}{c} f_1 v_1/\sqrt{2} ~ f_4\\ f_3 ~ f_2' v_2/\sqrt{2}\end{array}\right) D^{\tau}_L=diag(m_{\tau_1},m_{\tau_2}). \label{put1} \end{equation} The same holds for the neutrino mass matrix \begin{equation} D^{\nu \dagger}_R \left(\begin{array}{c} f'_1 v_2/\sqrt{2} ~ f_5\\ -f_3 ~ f_2v_1/\sqrt{2}\end{array}\right) D^{\nu}_L=diag(m_{\nu_1},m_{\nu_2}). \label{put2} \end{equation} Here $\tau_1, \tau_2$ are the mass eigenstates and we identify the tau lepton with the eigenstate 1, i.e., $\tau=\tau_1$, and identify $\tau_2$ with a heavy mirror eigenstate with a mass in the hundreds of GeV. Similarly $\nu_1, \nu_2$ are the mass eigenstates for the neutrinos, where we identify $\nu_1$ with the light neutrino state and $\nu_2$ with the heavier mass eigen state. By multiplying Eq.(\ref{put1}) by $D^{\tau \dagger}_L$ from the right and by $D^{\tau}_R$ from the left and by multiplying Eq.(\ref{put2}) by $D^{\nu \dagger}_L$ from the right and by $D^{\nu}_R$ from the left, one can equate the values of the parameter $f_3$ in both equations and we can get the following relation between the diagonlizing matrices $D^{\tau}$ and $D^{\nu}$ \begin{equation} m_{\tau 1} D^{\tau}_{R 21} D^{\tau *}_{L 11} +m_{\tau 2} D^{\tau}_{R 22} D^{\tau *}_{L 12}= -[m_{\nu 1} D^{\nu}_{R 21} D^{\nu *}_{L 11} +m_{\nu 2} D^{\nu}_{R 22} D^{\nu *}_{L 12}]. \label{condition} \end{equation} Eq.(\ref{condition}) is an important relation as it constraints the symmetry breaking parameters and this constraint must be taken into account in numerical analyses. Let us now write the charged current interaction in the leptonic sector for the 3rd generation and for the mirror generation with the W boson. \begin{eqnarray} {\cal{L}}_{CC}= -\frac{g_2}{2\sqrt 2} W^{\dagger}_{\mu} \left[ \bar \nu\gamma^{\mu} (1-\gamma_5) \tau +\bar N\gamma^{\mu} (1+\gamma_5) E_{\tau} \right] + H.c. \end{eqnarray} In the mass diagonal basis the charged current interactions are given by \begin{eqnarray} {\cal{L}}_{CC}=-\frac{g_2}{2\sqrt 2} W^{\dagger}_{\mu} \sum_{\alpha,\beta,\gamma,\delta=1,2} \bar \nu_{\alpha} \gamma^{\mu} [D^{\nu\dagger}_{L\alpha\gamma} g_{\gamma\delta}^L D^{\tau}_{L\delta\beta} (1-\gamma_5)+ \nonumber\\ + D^{\nu\dagger}_{R\alpha\gamma} g_{\gamma\delta}^R D^{\tau}_{R\delta\beta} (1+\gamma_5)] \tau_{\beta} +H.c. \label{LR} \end{eqnarray} where $g^{L,R}_{\alpha\beta}$ are defined so that \begin{eqnarray} g^L_{11}=1, g^L_{12}=0= g^L_{21}=g^L_{22}, \nonumber\\ g^R_{11}=0= g^R_{12}= g^R_{21}, g^R_{22}=1. \end{eqnarray} Next we consider the chargino interactions of the mirror leptons. The interaction terms in two-component notation is \begin{equation} {\cal{L}}=ig\sqrt{2} T^a_{ij} \lambda^a \psi_j A^*_i-\frac{1}{2}\frac{\partial ^2 W}{\partial{A_i}\partial{A_j}}\psi_ i \psi_ j+H.c. \label{general} \end{equation} Here $T^a=\tau^a/2$ where $\tau^a$ (a=1,2,3) are the Pauli matrices, and for the chargino interaction we use the generators $T^1$ and $T^2$, and $W$ is the part of Eq.(\ref{superpotential}) given by \begin{equation} W= -f_2 H_1^2 \tilde{E}^*_{\tau R} \tilde{N}_L -f_2' H_2^1 \tilde{N}^*_R \tilde{E}_{\tau L}. \end{equation} Using the above superpotential and the fermions of the mirror generation and the supersymmetric partners of the charged Higgs for $\psi$ and the mirror sleptons and charged Higgs for $A$, the interaction of the $V+A$ fourth generation with charginos in the two-component notation is given by \begin{eqnarray} {\cal{L}}=ig[\lambda^+ N^c_L \tilde{E}_{\tau R} +\lambda^- E^c_{\tau L} \tilde{N}_R]\nonumber\\ +\frac{gm_{N}}{\sqrt{2}M_W\cos\beta}[\tilde{N}_L \psi_{H_1^-} E^c_{\tau L}+\tilde{E}^*_{\tau R} \psi_{H_1^-}N_L]\nonumber\\ +\frac{gm_{E}}{\sqrt{2}M_W\sin\beta}[\tilde{N}^*_R \psi_{H_2^+} E_{\tau L}+\tilde{E}_L \psi_{H_2^+}N^c_L]+H.c., \label{fourth1} \end{eqnarray} where $\lambda^{\pm}=\frac{\lambda^1\mp i\lambda^2}{\sqrt{2}}$. Now we go from two-spinor to four-spinor by defining the two four-spinors: \begin{eqnarray} \tilde{W}= \left(\begin{array}{c} -i\lambda^+\\ i\bar \lambda^-\end{array}\right) , ~\tilde{H}= \left(\begin{array}{c}\psi_{H_2^+}\cr \bar \psi_{H_1^-}\end{array}\right). \end{eqnarray} By using these two four-spinors, Eq. (\ref{fourth1}) for the $V+A$ generation interaction is given by \begin{eqnarray} {\cal{L}}=-g[\bar{\tilde{W}} P_R N \tilde{E}^*_{\tau R}+\bar{\tilde{W^c}} P_R E_{\tau} \tilde{N}^*_R]\nonumber\\ +\frac{gm_{E}}{\sqrt{2} M_W \sin\beta} [\bar{\tilde{H}} P_R N \tilde{E}^*_{\tau L}+\bar{E_{\tau}} P_R \tilde{H^c} \tilde{N}_R]\nonumber\\ +\frac{gm_{N}}{\sqrt{2} M_W \cos\beta} [\bar{N} P_R \tilde{H} \tilde{E}_{\tau R}+\bar{\tilde{H^c}} P_R E_{\tau} \tilde{N}^*_L]+H.c. \label{fourth21} \end{eqnarray} Now we use the two-component mass eigen states \begin{eqnarray} \psi^+_1=-i\lambda^+,~\psi^+_2=\psi_{H^+_2}\nonumber\\ \psi^-_1=-i\lambda^-,~\psi^-_2=\psi_{H^-_1} \end{eqnarray} By defining the two-component spinors $\chi_i^+$ and $\chi_i^-$ as \begin{eqnarray} \chi^+_i=V_{ij}\psi^+_j\nonumber\\ \chi^-_i=U_{ij}\psi^-_j \end{eqnarray} the four-component mass eigen states are \begin{eqnarray} \tilde{\chi_1}^+= \left(\begin{array}{c}\chi^+_1\\ \bar \chi^-_1\end{array}\right) , ~\tilde{\chi_2}^+= \left(\begin{array}{c}\chi^+_2\cr \bar\chi^-_2\end{array}\right) \end{eqnarray} The matrix elements $U$ and $V$ that diagonalize the chargino mass matrix $M_C$ are given by \begin{equation} U^* M_C V^{-1}= diag (m_{\tilde{\chi_1}}^+,m_{\tilde{\chi_2}}^+). \end{equation} One can use the definitions of $P_L$, $P_R$ and the above relations to get the following useful relations \begin{eqnarray} P_L \tilde{W}=P_L \sum_{i=1}^2 V^*_{i1}\tilde{\chi_i}^+,~ P_L \tilde{W}^c=P_L\sum_{i=1}^2 U^*_{i1}\tilde{\chi_i}^c \nonumber\\ P_L \tilde{H}=P_L \sum_{i=1}^2V^*_{i2}\tilde{\chi_i}^,~ P_R \tilde{H}=P_R \sum_{i=1}^2 U_{i2}\tilde{\chi_i}^+\nonumber\\ P_R \tilde{H}^c=P_R \sum_{i=1}^2 V_{i2}\tilde{\chi_i}^c,~ P_L \tilde{H}^c=P_L \sum_{i=1}^2 U^*_{i2}\tilde{\chi_i}^c \end{eqnarray} Using these relations and Eq.(\ref{fourth21}), the interactions of the mirror generation with chargino mass-eigen states is given by \begin{eqnarray} -{\cal{L}}_{N - E_{\tau}- \chi^+}= g\bar N [V^*_{i1} P_L -\kappa_{N} U_{i2} P_R] \tilde{\chi_i}^+ \tilde E_{\tau R}\nonumber\\ + g\bar N [ -\kappa_{E_{\tau}} V^*_{i2} P_L] \tilde{\chi_i}^+ \tilde E_{\tau L} + g\bar E_{\tau} [U^*_{i1} P_L -\kappa_{E_{\tau}} V_{i2} P_R] \tilde{\chi_i}^c \tilde N_R\nonumber\\ + g\bar E_{\tau} [ -\kappa_{N} U^*_{i2} P_L] \tilde{\chi_i}^c \tilde N_L + H.c. \label{c2} \end{eqnarray} where $\tilde{\chi_i}^c$ is the charge conjugate of $\tilde {\chi_i}$ and where \begin{eqnarray} \kappa_N=\frac{m_N}{\sqrt{2} M_W \cos\beta},~ \kappa_{E_{\tau}}=\frac{m_{E_{\tau}}}{\sqrt{2} M_W \sin\beta} \end{eqnarray} The interaction of the leptons with the chargino is given by \begin{eqnarray} -{\cal{L}}_{\nu - \tau- \chi^+}= g\bar \nu [U_{i1} P_R -\kappa_{\nu} V_{i2}^* P_L] \tilde{\chi_i}^+ \tilde \tau_L\nonumber\\ + g\bar \nu [ -\kappa_{\tau} U_{i2} P_R] \tilde{\chi_i}^+ \tilde \tau_R + g\bar \tau [V_{i1} P_R -\kappa_{\tau} U_{i2}^* P_L] \tilde{\chi_i}^c \tilde \nu_L\nonumber\\ + g\bar \tau [ -\kappa_{\nu} V_{i2} P_R] \tilde{\chi_i}^c \tilde \nu_R + H.c., \label{c1} \end{eqnarray} where \begin{eqnarray} \kappa_{\tau}=\frac{m_{\tau}}{\sqrt{2} M_W \cos\beta},~\kappa_{\nu}= \frac{m_{{\nu}}}{\sqrt{2} M_W \sin\beta}. \end{eqnarray} A full analysis of the mirror sparticle couplings will be given elsewhere. Next we consider the mixings of the charged sleptons and the charged mirror sleptons. The mass matrix in the basis $(\tilde \tau_L, \tilde E_L, \tilde \tau_R, \tilde E_R)$ takes the form \begin{eqnarray} (M^2)_{\tilde \tau}= \left(\begin{array}{c} M^2_{11} ~ M^2_{12} ~ M^2_{13} ~ M^2_{14} \\ M^{2}_{21} ~ M^2_{22} ~ M^2_{23} ~ M^2_{24} \\ M^{2}_{31} ~ M^2_{32} ~ M^2_{33} ~ M^2_{34}\\ M^2_{41} ~ M^2_{42} ~M^2_{43} ~ M^2_{44} \end{array}\right). \end{eqnarray} Here the terms $M^2_{11}, M^2_{13}, M^2_{31}, M^2_{33}$ arise from soft breaking in the sector $\tilde \tau_L, \tilde \tau_R$. Similarly the terms $M^2_{22}, M^2_{24},$ $M^2_{42}, M^2_{44}$ arise from soft breaking in the sector $\tilde E_L, \tilde E_R$. The terms $M^2_{12}, M^2_{21},$ $M^2_{23}, M^2_{32}$, $M^2_{14}, M^2_{41}$, $M^2_{34}, M^2_{43},$ arise from mixing between the staus and the mirrors. We assume that all the masses are of the electroweak scale so all the terms enter in the diagonalization. We diagonalize the hermitian mass$^2$ matrix by the following unitary transformation \begin{eqnarray} \tilde D^{\tau \dagger} M^2_{\tilde \tau} \tilde D^{\tau} = diag (M^2_{\tilde \tau_1}, M^2_{\tilde \tau_2}, M^2_{\tilde \tau_3}, M^2_{\tilde \tau_4}). \end{eqnarray} A similar mass matrix exists in the sneutrino sector. In the basis $(\tilde \nu_L, \tilde N_L, \tilde \nu_R, \tilde N_R)$ it takes the form \begin{eqnarray} (M^2)_{\tilde \nu}= \left(\begin{array}{c} m^2_{11} ~ m^2_{12} ~ m^2_{13} ~ m^2_{14} \\ m^{2}_{21} ~ m^2_{22} ~ m^2_{23} ~ m^2_{24} \\ m^{2}_{31} ~ m^2_{32} ~ m^2_{33} ~ m^2_{34}\\ m^2_{41} ~ m^2_{42} ~ m^2_{43} ~ m^2_{44} \end{array}\right). \end{eqnarray} As in the charged slepton sector here also the terms $m^2_{11}, m^2_{13}, m^2_{31}, m^2_{33}$ arise from soft breaking in the sector $\tilde \nu_L, \tilde \nu_R$. Similarly the terms $m^2_{22}, m^2_{24},$ $m^2_{42}, m^2_{44}$ arise from soft breaking in the sector $\tilde N_L, \tilde N_R$. The terms $m^2_{12}, m^2_{21},$ $m^2_{23}, m^2_{32}$, $m^2_{14}, m^2_{41}$, $m^2_{34}, m^2_{43},$ arise from mixing between the physical sector and the mirror sector. Again as in the charged lepton sector we assume that all the masses are of the electroweak size so all the terms enter in the diagonalization. The above matrix can be diagonalized by the following unitary transformation \begin{eqnarray} \tilde D^{\nu\dagger} M^2_{\tilde \nu} \tilde D^{\nu} = diag (M^2_{\tilde \nu_1}, M^2_{\tilde \nu_2}, M^2_{\tilde \nu_3}, M^2_{\tilde \nu_4}). \end{eqnarray} The physical tau and neutrino states are $\tau\equiv \tau_1, \nu\equiv \nu_1$, and the states $\tau_2, \nu_2$ are heavy states with mostly mirror particle content. The states $\tilde \tau_i, \tilde \nu_i; ~i=1-4$ are the slepton and sneutrino states. For the case of no mixing these limit as follows \begin{eqnarray} \tilde \tau_1\to \tilde \tau_L, ~\tilde \tau_2\to \tilde E_L, ~\tilde \tau_3\to \tilde \tau_R, ~ \tilde \tau_4\to \tilde E_R\nonumber\\ \tilde \nu_1\to \tilde \nu_L, ~\tilde \nu_2\to \tilde N_L, ~\tilde \nu_3\to \tilde \nu_R, ~ \tilde \nu_4\to \tilde N_R. \end{eqnarray} A further discussion of the scalar mass$^2$ matrices is given in Appendix A. In the mass diagonal basis the interactions of the neutrino $\nu$ and of the stau which include the mixing effects with the mirrors are given by \begin{eqnarray} -{\cal{L}}_{\nu - \tilde{\tau}- \chi^+}= \sum_{\alpha =1-2} \sum_{j=1-4} g\bar\nu_{\alpha}[ D^{\nu\dagger}_{L \alpha 1}U_{i1}P_R- D^{\nu\dagger}_{R \alpha 1}\kappa_{\nu}V^*_{i2}P_L]\tilde{\chi}^+_i \tilde D^{\tau}_{1 j}\tilde \tau_j\nonumber\\ +g\bar\nu_{\alpha}[- D^{\nu\dagger}_{L \alpha 1}\kappa_{\tau}U_{i2}P_R]\tilde{\chi}^+_i \tilde D^{\tau}_{3 j}\tilde \tau_j\nonumber\\ +g\bar\nu_{\alpha}[ D^{\nu\dagger}_{R \alpha 2}V^*_{i1}P_L- D^{\nu\dagger}_{L \alpha 2}\kappa_{N}U_{i2}P_R]\tilde{\chi}^+_i \tilde D^{\tau}_{4 j}\tilde \tau_j\nonumber\\ +g\bar\nu_{\alpha}[- D^{\nu\dagger}_{R \alpha 2}\kappa_{E_{\tau}}V^*_{i2}P_L]\tilde{\chi}^+_i \tilde D^{\tau}_{2 j}\tilde \tau_j+H.c \label{chargino} \end{eqnarray} For ${\cal{L}}_{\tau - \tilde{\nu}- \chi^+}$ we have \begin{eqnarray} -{\cal{L}}_{\tau - \tilde{\nu}- \chi^+}= \sum_{\alpha =1-2} \sum_{j=1-4} g\bar\tau_{\alpha}[ D^{\tau\dagger}_{L \alpha 1}V_{i1}P_R- D^{\tau\dagger}_{R \alpha 1}\kappa_{\tau}U^*_{i2}P_L]\tilde{\chi}^c_i \tilde D^{\nu}_{1 j}\tilde \nu_j\nonumber\\ +g\bar\tau_{\alpha}[- D^{\tau\dagger}_{L \alpha 1}\kappa_{\nu}V_{i2}P_R]\tilde{\chi}^c_i \tilde D^{\nu}_{3 j}\tilde \nu_j\nonumber\\ +g\bar\tau_{\alpha}[ D^{\tau\dagger}_{R \alpha 2}U^*_{i1}P_L- D^{\tau\dagger}_{R \alpha 2}\kappa_{E_{\tau}}V_{i2}P_R]\tilde{\chi}^c_i \tilde D^{\nu}_{4 j}\tilde \nu_j\nonumber\\ +g\bar\tau_{\alpha}[- D^{\tau\dagger}_{R \alpha 2}\kappa_{N}U^*_{i2}P_L]\tilde{\chi}^c_i \tilde D^{\nu}_{2 j}\tilde \nu_j+H.c \label{charginoa} \end{eqnarray} Next we look at the neutral current interactions and focus on the charged leptons. Here the Z boson interactions are given by \begin{eqnarray} {\cal{L}}_{NC}= -\frac{g}{4 \cos\theta_W} Z_{\mu}\left[ \bar \tau\gamma^{\mu} (4x-1+\gamma_5)\tau + \bar E_{\tau}\gamma^{\mu} (4x-1-\gamma_5)E_{\tau} \right], \label{nc} \end{eqnarray} where $x=\sin^2\theta_W$. We write the result in the mass diagonal basis and get \begin{eqnarray} {\cal{L}}_{NC}=- \frac{g}{2 \cos\theta_W} Z_{\mu} \sum_{\alpha=1,2}\sum_{\beta=1,2} ( \bar \tau_{\alpha}\gamma^{\mu} \tau_{\beta}) \nonumber\\ (x\{D^{\tau \dagger}_{L \alpha 1}D^{\tau}_{L 1\beta}+D^{\tau \dagger}_{R \alpha 1}D^{\tau}_{R 1\beta} +D^{\tau \dagger}_{L \alpha 2}D^{\tau}_{L 2\beta}+D^{\tau \dagger}_{R \alpha 2}D^{\tau}_{R 2\beta} \}\nonumber\\ -\frac{1}{2}\{D^{\tau \dagger}_{L \alpha 1}D^{\tau}_{L 1\beta}+D^{\tau \dagger}_{R \alpha 2}D^{\tau}_{R 2\beta}\})\nonumber\\ +( \bar \tau_{\alpha}\gamma^{\mu}\gamma_5 \tau_{\beta}) (x\{-D^{\tau \dagger}_{L \alpha 1}D^{\tau}_{L 1\beta}+D^{\tau \dagger}_{R \alpha 1}D^{\tau}_{R 1\beta} -D^{\tau \dagger}_{L \alpha 2}D^{\tau}_{L 2\beta}+D^{\tau \dagger}_{R \alpha 2}D^{\tau}_{R 2\beta}\}\nonumber\\ +\frac{1}{2}\{D^{\tau \dagger}_{L \alpha 1}D^{\tau}_{L 1\beta}-D^{\tau \dagger}_{R \alpha 2}D^{\tau}_{R 2\beta}\}). \label{zinteractions} \end{eqnarray} Next we discuss the neutralino interaction. Using the parts of Eq. (\ref{general}) that produce the interaction of the mirror lepton with the neutralino we have \begin{equation} {\cal{L}}=i\frac{g}{\sqrt{2}} \tau^3_{ij} \lambda^3 \psi_j A^*_i +ig'\sqrt{2}Y_i\delta_{ij}\lambda'\psi_j A^*_i -\frac{1}{2}\frac{\partial ^2 W}{\partial{A_i}\partial{A_j}}\psi_ i \psi_ j+H.c. \end{equation} The part of interest in the superpotential here is \begin{eqnarray} W= f_2 H_1^1 \tilde{N}_R^* \tilde{N}_L+f_2' H_2^2 \tilde{E}^*_{\tau R} \tilde{E}_{\tau L} \end{eqnarray} By using the fermions of the mirror generation and the supersymmetric partners of the neutral Higgs for $\psi$ and the mirror sleptons and neutral Higgs for $A$ one gets the following lagrangian for the interactions of the mirror leptons with neutralino in the two component notation \begin{eqnarray} {\cal{L}}=i\frac{g}{\sqrt{2}}\lambda^3 [E^c_{\tau L} \tilde{E}_{\tau R}-N^c_L\tilde{N}_R] +i\frac{g'}{\sqrt{2}}\lambda' [E^c_{\tau L} \tilde{E}_{\tau R}+N^c_L\tilde{N}_R]\nonumber\\ -i\sqrt{2}g'\lambda' E_{\tau L} \tilde{E}^*_{\tau L} -\frac{gm_{N}}{\sqrt{2}M_W\cos\beta}[\tilde{N}_L \psi_{H_1^0} N^c_{L}+\tilde{N}^*_{R} \psi_{H_1^0}N_L]\nonumber\\ -\frac{gm_{E}}{\sqrt{2}M_W\sin\beta}[\tilde{E}_{\tau L} \psi_{H_2^0} E^c_{\tau L}+\tilde{E}^*_{\tau R} \psi_{H_2^0}\tilde{E}_{\tau L}]+H.c. \label{fourth2} \end{eqnarray} Now we go from two-spinor to four-spinor by defining the four Majorana spinors \begin{eqnarray} \tilde{B}= \left(\begin{array}{c}-i\lambda'\\ i\bar \lambda'\end{array}\right) , ~\tilde{W}_3= \left(\begin{array}{c}-i\lambda^3\cr i\bar \lambda^3\end{array}\right) ,~\tilde{H}_1= \left(\begin{array}{c}\psi_{H_1^0}\cr \bar \psi_{H_1^0}\end{array}\right) ,~\tilde{H}_2= \left(\begin{array}{c} \psi_{H_2^0}\cr \bar \psi_{H_2^0}\end{array}\right). \end{eqnarray} The lagrangian in terms of these fields reads \begin{eqnarray} {\cal{L}}=\frac{1}{\sqrt{2}}\tilde{N}_R [g\bar N P_L \tilde{W}_3-g'\bar N P_L \tilde{B}] -\frac{1}{\sqrt{2}}\tilde{E}_{\tau R} [g\bar E_{\tau} P_L \tilde{W}_3+g'\bar E_{\tau} P_L \tilde{B}]\nonumber\\ +\sqrt{2}g' \tilde{E}^*_{\tau L} \bar{\tilde{B}} P_L E_{\tau} -\frac{gm_{N}}{\sqrt{2}M_W\cos\beta}[\tilde{N}_L \bar N P_L \tilde{H_1}+ \tilde{N}^*_R \bar{\tilde{H_1}} P_L N]\nonumber\\ -\frac{gm_{E}}{\sqrt{2}M_W\sin\beta}[\tilde{E}_{\tau L} \bar{E_{\tau}} P_L \tilde{H_2}+ \tilde{E}^*_{\tau R} \bar{\tilde{H_2}} P_L E_{\tau}]. \label{neutralino2} \end{eqnarray} We can write this interaction in the neutralino mass eigen state basis $\tilde{\chi}^0_j$ where \begin{equation} X^T M_{\tilde{\chi}^0} X=diag(m_{{\chi^0}_1}, m_{{\chi^0}_2}, m_{{\chi^0}_3}, m_{{\chi^0}_4}) \end{equation} In writing Eq.(\ref{neutralino2}) in this basis the following relations are found useful \begin{eqnarray} P_L \tilde{W}_3 = P_L \sum_{j=1}^4 X_{2j} \tilde{\chi}^0_j,~P_L \tilde{B} = P_L \sum_{j=1}^4 X_{1j} \tilde{\chi}^0_j,\nonumber\\ P_L \tilde{H}_1 = P_L \sum_{j=1}^4 X_{3j} \tilde{\chi}^0_j, P_L \tilde{H}_2 = P_L \sum_{j=1}^4 X_{4j} \tilde{\chi}^0_j,\nonumber\\ \bar{\tilde{H_1}} P_L=\sum_{j=1}^4 X_{3j}\bar{\tilde{\chi}}^0_j P_L,~ \bar{\tilde{H_2}} P_L=\sum_{j=1}^4 X_{4j}\bar{\tilde{\chi}}^0_j P_L,~ \bar{\tilde{B}} P_L=\sum_{j=1}^4 X_{1j}\bar{\tilde{\chi}}^0_j P_L \end{eqnarray} Using the above the interactions of the mirror lepton $E_{\tau}$ with the neutralino mass eigen states is given by \begin{eqnarray} -{\cal{L}}_{E_{\tau}-\tilde{E}_{\tau}-\chi^0}= \frac{1}{\sqrt 2} \sum_{j=1-4}\left[ \bar E_{\tau}\left(a'_j- b'_j\gamma_5\right) \tilde \chi^0_j \tilde E_{\tau R} + \bar E_{\tau}\left(c'_j- d'_j\gamma_5\right) \tilde \chi^0_j \tilde E_{\tau L} \right]+H.c. \end{eqnarray} Here \begin{eqnarray} a_j'= (\alpha_{E_{\tau}j} + \beta_{E_{\tau}j}),~~ b_j'=(-\alpha_{E_{\tau}j} + \beta_{E_{\tau}j}), \nonumber\\ c_j'=-(\gamma_{E_{\tau}j} +\delta_{E_{\tau}j}), d_j'= (\gamma_{E_{\tau}j} -\delta_{E_{\tau}j}), \end{eqnarray} and $\alpha_{E_{\tau j}}$, $\beta_{E_{\tau j}}$, $\gamma_{E_{tau j}}$ and $\delta_{E_{\tau j}}$ are defined so that \begin{eqnarray}\label{alphabk} \alpha_{E_{\tau j}} =\frac{g m_{E} X^*_{4j}}{2m_W\sin\beta},~~ \beta_{E_{\tau j}}=eX_{1j}^{'} +\frac{g}{\cos\theta_W} X_{2j}^{'} (\frac{1}{2}-\sin^2\theta_W),\nonumber\\ \gamma_{E_{\tau j}}=e X_{1j}^{'*}-\frac{g\sin^2\theta_W}{\cos\theta_W} X_{2j}^{*'}, ~~ \delta_{E_{\tau j}}=-\frac{g m_{E} X_{4j}}{2m_W \sin\beta} \end{eqnarray} and \begin{eqnarray} X'_{1j}= (X_{1j}\cos\theta_W + X_{2j} \sin\theta_W), \nonumber\\ X'_{2j}= (-X_{1j}\sin\theta_W + X_{2j} \cos\theta_W). \end{eqnarray} The above may be compared with the interactions of the $\tau$ lepton with neutralinos which are given by \begin{eqnarray} -{\cal{L}}_{\tau-\tilde \tau-\chi^0}= \frac{1}{\sqrt 2} \sum_{j=1-4}\left[ \bar \tau\left(a_j+ b_j\gamma_5\right) \tilde \chi^0_j \tilde \tau_L + \bar \tau\left(c_j+ d_j\gamma_5\right) \tilde \chi^0_j \tilde \tau_R \right]+H.c \end{eqnarray} Here \begin{eqnarray} a_j= (\alpha_{\tau j} + \beta_{\tau j}), ~~b_j=(-\alpha_{\tau j} +\beta_{\tau j}), \nonumber\\ c_j=-(\gamma_{\tau j} +\delta_{\tau j}), ~~d_j= (\gamma_{\tau j}- \delta_{\tau j}), \end{eqnarray} where \begin{eqnarray} \alpha_{\tau j} =\frac{g m_{\tau} X_{3j}}{2m_W\cos\beta},~~ \beta_{\tau j}=-eX_{1j}^{'*} +\frac{g}{\cos\theta_W} X_{2j}^{'*} (-\frac{1}{2}+\sin^2\theta_W),\nonumber\\ \gamma_{\tau j}=-e X_{1j}'+\frac{g\sin^2\theta_W}{\cos\theta_W} X_{2j}', ~~ \delta_{\tau j}=-\frac{g m_{\tau} X^*_{3j}}{2m_W \cos\beta}. \end{eqnarray} Rotation into the mass diagonal basis of the leptons and sleptons gives the result \begin{eqnarray} -{\cal{L}}_{\tau-\tilde \tau-\chi^0} = \frac{1}{\sqrt 2} \sum_{\alpha =1-2} \sum_{k=1-4} \sum_{j=1-4} \bar \tau_{\alpha} [(D^{\tau\dagger}_+)_{\alpha 1} a_j + (D^{\tau\dagger}_-)_{\alpha 1} b_j \nonumber\\ +\gamma_5 \left((D^{\tau\dagger}_-)_{\alpha 1} a_j + (D^{\tau\dagger}_+)_{\alpha 1} b_j \right)] \tilde \chi^0_j (\tilde D^{\tau})_{1 k} \tilde \tau_k\nonumber\\ +\bar \tau_{\alpha} [(D^{\tau\dagger}_+)_{\alpha 1} c_j + (D^{\tau\dagger}_-)_{\alpha 1} d_j +\gamma_5 \left((D^{\tau\dagger}_-)_{\alpha 1} c_j + (D^{\tau\dagger}_+)_{\alpha 1} d_j \right)] \tilde \chi^0_j (\tilde D^{\tau})_{3 k} \tilde \tau_k\nonumber\\ +\bar \tau_{\alpha} [(D^{\tau\dagger}_+)_{\alpha 2} a'_j - (D^{\tau\dagger}_-)_{\alpha 2} b'_j +\gamma_5 \left((D^{\tau\dagger}_-)_{\alpha 2} a'_j - (D^{\tau\dagger}_+)_{\alpha 2} b'_j \right)] \tilde \chi^0_j (\tilde D^{\tau})_{4 k} \tilde \tau_k\nonumber\\ +\bar \tau_{\alpha} [(D^{\tau\dagger}_+)_{\alpha 2} c'_j - (D^{\tau\dagger}_-)_{\alpha 2} d'_j +\gamma_5 \left((D^{\tau\dagger}_-)_{\alpha 2} c'_j - (D^{\tau\dagger}_+)_{\alpha 2} d'_j \right)] \tilde \chi^0_j (\tilde D^{\tau})_{2 k} \tilde \tau_k +H.c. \label{neutralinoo} \end{eqnarray} where \begin{eqnarray} D_{\pm}^{\tau}= \frac{1}{2} (D^{\tau}_L\pm D^{\tau}_R). \end{eqnarray} Our final result including the mixings of leptons and mirror leptons and the mixings of sleptons and of mirror sleptons are given by Eq.(\ref{LR}) for the W boson interactions, Eq.(\ref{chargino}) and Eq. (\ref{charginoa}) for the chargino interactions and by Eq.(\ref{zinteractions}) for the Z boson interactions, and by Eq.(\ref{neutralinoo}) for the neutralino interactions. \section{Neutrino magnetic moment\label{magnetic}} The discovery of neutrino masses from the solar and atmospheric data \cite{Abdurashitov:1999zd,Ahmad:2002jz,Altmann:2000ft,Ambrosio:2001je,Fukuda:2000np,Hampel:1998xg} has very significantly advanced our understanding of the basic nature of these particles. One outcome of non-vanishing neutrino masses is the possibility that they could possess non-vanishing magnetic and electric dipole moments if the neutrinos are Dirac particles while only transition magnetic moments are allowed if they are Majorana. In this analysis we assume the Dirac nature of the neutrinos. In this case the neutrinos will have non-vanishing magnetic and electric dipole moments and such moments could enter in several physical phenomena\cite{pheno}. One phenomena where the moments may play a role is in the neutrino spin flip processes such as\cite{Kuznetsov:2007ct} $\nu_L\to \nu_R +\gamma^*$ or $\nu_L +\gamma^*\to \nu_R$. From experiment, there already exist limits on both the magnetic and the electric dipole moments of neutrinos. Our focus will be the magnetic moment of the tau neutrino which is affected by the mixing effects from the mirror leptons. (For previous work on neutrino magnetic moment with mirror effects in a different context see \cite{Maalampi:1988va}) The current limits on the magnetic moment of the $\tau$ neutrino is\cite{exp3} \begin{eqnarray} |\mu(\nu_{\tau})|\leq 1.3\times 10^{-7} \mu_B \end{eqnarray} where $\mu_B=(e/2m_e)$ is the Bohr magneton. The magnetic moment of the neutrino arises in the Standard Model at one loop via the exchange of the W boson assuming one extends the Standard Model to include a right handed neutrino (see Fig.(\ref{smfig})), and in the supersymmetric models there are additional contributions arising from the chargino exchange contributions (see Fig.(\ref{susyfig})). \begin{figure} \vspace{-7cm} \scalebox{2.25} { \hspace{-2cm} \includegraphics[width=10cm,height=12cm]{g2fig1.pdf} } \vspace{-12cm} \caption{The loop contributions to the magnetic dipole moment of neutrinos ($\nu_i$) via exchange of $W^+$ boson and via the exchange of leptons and mirror leptons denoted by $\tau_j$.} \label{smfig} \end{figure} Neutrino masses for the first three generations are very small, i.e., from WMAP data one has $\sum_i |m_{\nu_i}|\le (.7-1)$ eV\cite{Spergel:2003cb}. If the neurtinos are Dirac one would need to explain, how such tiny Dirac masses are generated which would typically require fine tunings of $O(10^{-10})$ or more. However, unlike the Majorana neutrino case for which there is a standard mechanism for the generation of small neutrino masses, i.e., see-saw, there is no standard mechanism for the generation of small Dirac neutrino masses. Indeed this topic continues to a subject of ongoing research and several recent works can be found in \cite{hung,diracmass}. Here, we do not go into details on this topic which would take us far afield. Thus in this work we do not make any attempt to deduce the smallness of the neutrino masses but rather assume this is the case. With this assumption we discuss below the tau neutrino magnetic moment in the extended MSSM with mirrors for the case when there is mixing with the mirror leptons. The contributions to be discussed arise from loops containing (1) lepton (mirror lepton)- W boson and (2) scalar leptons (scalar mirrors)- charginos. From Eq.(\ref{LR}) one can calculate the W boson, charged lepton and charged mirror lepton contributions arising from Fig.(1) to the magnetic moment of the $\tau$ neutrino in $\mu_B$ units to be \begin{eqnarray} \mu^{(1)}_{\nu}=\frac{-G_Fm_e}{8\pi^2\sqrt{2}}\sum_{\gamma =1}^{2} \sum_{\delta =1}^{2} \sum_{\beta =1}^{2} m_{\tau_\beta} G_1(\frac{m_{\tau_\beta}}{M_W})\nonumber\\ (|(D^{\nu \dagger}_L)_{1\gamma}g^L_{\gamma \delta}(D^{\tau}_L)_{\delta \beta} +(D^{\nu \dagger}_R)_{1\gamma}g^R_{\gamma \delta}(D^{\tau}_R)_{\delta \beta}|^2\nonumber\\ -|(D^{\nu \dagger}_L)_{1\gamma}g^L_{\gamma \delta}(D^{\tau}_L)_{\delta \beta} -(D^{\nu \dagger}_R)_{1\gamma}g^R_{\gamma \delta}(D^{\tau}_R)_{\delta \beta}|^2)\nonumber\\ +\frac{3G_Fm_{\nu}m_e}{16\pi^2\sqrt{2}} \sum_{\gamma =1}^{2} \sum_{\delta =1}^{2} \sum_{\beta =1}^{2}G_2(\frac{m_{\tau_\beta}}{M_W})\nonumber\\ (|(D^{\nu \dagger}_L)_{1\gamma}g^L_{\gamma \delta}(D^{\tau}_L)_{\delta \beta} +(D^{\nu \dagger}_R)_{1\gamma}g^R_{\gamma \delta}(D^{\tau}_R)_{\delta \beta}|^2\nonumber\\ +|(D^{\nu \dagger}_L)_{1\gamma}g^L_{\gamma \delta}(D^{\tau}_L)_{\delta \beta} -(D^{\nu \dagger}_R)_{1\gamma}g^R_{\gamma \delta}(D^{\tau}_R)_{\delta \beta}|^2), \label{leptonmirror} \end{eqnarray} where the form factor functions $G_1(r)$ and $G_2(r)$ are given by \begin{eqnarray} G_1(r)=\frac{4-r^2}{1-r^2}+\frac{3r^2}{(1-r^2)^2}\ln(r^2),\nonumber\\ G_2(r)=\frac{2-5r^2+r^4}{(1-r^2)^2}-\frac{2r^4}{(1-r^2)^3}\ln(r^2). \end{eqnarray} As noted already Eq.(\ref{leptonmirror}) includes the contributions from the tau and from the mirror lepton. We parametrize the mixing between $\tau$ and $E_{\tau}$ by the angle $\theta$, where \begin{eqnarray} {\left(\begin{array}{c} \tau\cr E_\tau \end{array}\right)}= {\left( \begin{array}{cc} \cos\theta & \sin\theta \cr -\sin\theta & \cos\theta \end{array}\right)}{\left(\begin{array}{c} \tau_1\cr \tau_2\end{array}\right)}, \end{eqnarray} and the mixing between $\nu$ and $N$ by the angle $\phi$ where \begin{eqnarray} {\left(\begin{array}{c} \nu\cr N\end{array}\right)}= {\left( \begin{array}{cc} \cos\phi & \sin\phi \cr -\sin\phi & \cos\phi \end{array}\right)}{\left(\begin{array}{c} \nu_1\cr \nu_2\end{array}\right)}. \end{eqnarray} where we take $D^{\tau}_L=D^{\tau}_R$ and $D^{\nu}_L=D^{\nu}_R$ or $\theta_L=\theta_R=\theta$ and $\phi_L=\phi_R=\phi$. These are simplicity assumptions to get the size of numerical estimates and are easily improved with better understanding of mixings with mirror and ordinary leptons. We identify $\tau_1$ with the physical $\tau$ and $\tau_2$ with the mirror generation lepton. When there is no risk of confusion we will set $\tau_1=\tau$ and $\tau_2=E$, and similarly for the $\nu_1$ and $\nu_2$ where we set $\nu_1=\nu_{\tau}$ and $\nu_2=N$. Now we see that the first term of Eq.(\ref{leptonmirror}) is proportional to the fermion mass $m_{\tau_{\beta}}$ which could be a lepton or a mirror lepton. For the lepton loop $\beta =1$, the first term in Eq.(\ref{leptonmirror}) is proportional to $[\cos^2(\theta -\phi)-\cos^2(\theta +\phi)]$ and the second term is proportional to $[\cos^2(\theta -\phi)+\cos^2(\theta +\phi)]$. For the mirror lepton loop $\beta =2$, and the first term in Eq.(\ref{leptonmirror}) is proportional to $[\sin^2(\theta -\phi)-\sin^2(\theta +\phi)]$ while the second term in Eq.(\ref{leptonmirror}) is proportional to $[\sin^2(\theta -\phi)+\sin^2(\theta +\phi)]$. Thus if the mixing between lepton and mirror leptons exist, the first term for the case of $\beta =2$ can produce a large contribution to the neutrino magnetic moment if the mirror lepton mass is in the region of few hundreds GeV. Also if this mixing is absent, the contribution would come only from the $\tau$-lepton loop. In this case, the first term does not contribute and the second term gives the result \begin{equation} \frac{3m_{\tau}m_{\nu_{\tau}}G_F}{4\sqrt 2\pi^2}, \end{equation} taking into account the limit $G_2(0)=2$. Thus Eq.(\ref{leptonmirror}) gives for the neutrino magnetic moment the value of $3.2\times 10^{-19}(\frac{m_{\nu}}{eV})\mu_B$ and agrees with the previous analyses given in the Standard Model \cite{CabralRosetti:1999ad,Dvornikov:2003js}. We note that the underlying assumptions of \cite{CabralRosetti:1999ad,Dvornikov:2003js} regarding a small Dirac mass is identical to ours except that our analysis is more general in that it includes both supersymmetry and mirror contributions.\\ \begin{figure} \vspace{-7cm} \scalebox{2.25} { \hspace{-2cm} \includegraphics[width=10cm,height=12cm]{g2fig2.pdf} } \vspace{-12cm} \caption{The supersymmetric loop contributions to the magnetic dipole moment of neutrinos ($\nu_i$) via exchange of charginos ($\chi_j^+$), sleptons and mirror sleptons denoted by $\tilde \tau_k$.} \label{susyfig} \end{figure} Next we compute the supersymmetric contributions to the $\nu_{\tau}$ magnetic moment which include the chargino, the slepton and the mirror slepton contributions which can be calculated using Eq.(\ref{chargino}). The result in $\mu_B$ units is \begin{eqnarray} \mu^{(2)}_{\nu}=-\frac{g^2m_e}{16\pi^2}\sum_{k =1}^{2} \sum_{j =1}^{4} \frac{1}{m_{\chi^+_k}} \{\kappa_{\nu}|\tilde{D}^{\tau}_{1j}|^2 Re(D^{\nu \dagger}_{L_{11}}U_{k1}D^{\nu}_{R_{11}}V_{k2})\nonumber\\ +\kappa_N |\tilde{D}^\tau_{4j}|^2Re(D^{\nu \dagger}_{R_{12}}V^*_{k1}D^{\nu}_{L_{12}}U^*_{k2})\} G_3(\frac{M_{\tilde{\tau}_j}}{m_{\chi^+_k}})\nonumber\\ +\frac{g^2m_em_{\nu_{\tau}}}{96\pi^2}\sum_{k =1}^{2} \sum_{j =1}^{4} \frac{1}{m^2_{\chi^+_k}} \{|\tilde{D}^\tau_{1j}|^2[|D^{\nu \dagger}_{L_{11}}U_{k1}|^2+\kappa^2_{\nu}|D^{\nu \dagger}_{R_{11}}V^*_{k2}|^2 ]+\kappa^2_{\tau} |\tilde{D}^\tau_{3j}|^2|D^{\nu \dagger}_{L_{11}}U_{k2}|^2\nonumber\\ +|\tilde{D}^\tau_{4j}|^2[|D^{\nu \dagger}_{R_{12}}V^*_{k1}|^2+\kappa^2_{N}|D^{\nu \dagger}_{L_{12}}U_{k2}|^2 ]+\kappa^2_{E_{\tau}}|\tilde{D}^\tau_{2j}|^2|D^{\nu \dagger}_{R_{12}}V^*_{k2}|^2\} G_4(\frac{M_{\tilde{\tau}_j}}{m_{\chi^+_k}}) \end{eqnarray} where \begin{eqnarray} G_3(r)=\frac{-2}{r^2-1}+\frac{2r^2}{(r^2-1)^2}\ln(r^2),\nonumber\\ G_4(r)=\frac{3(1+r^2)}{(1-r^2)^2}+\frac{6r^2}{(1-r^2)^3}\ln(r^2). \end{eqnarray} The numerical sizes of the neutrino moments $\mu_{\nu}^{(1)}$ and $\mu_{\nu}^{(2)}$ will be discussed in Sec.(5). \section{$\tau$ anomalous magnetic moment} An evaluation of the anomalous magnetic moment in the standard model gives $a_{\tau}^{SM}=117721 (5) \times 10^{-8}$, where $a_{\tau}=\frac{g_{\tau}-2}{2}$. The experimental limits on this parameter are\cite{eidelman} $-0.052<a_{\tau}^{exp}<0.013$ and so the sensitivity is more than one order of magnitude below where one can see the effects of the $\tau$ anomalous magnetic moment. Here, we calculate the corrections to the $\tau$ anomalous magnetic moment including new physics effects from the supersymmetrized mirror sector which mixes with the $\tau$ lepton sector. Specifically we compute 4 different types of loops corrections to $a_{\tau}$. These include the following exchanges in the loops: (1) W boson and neutral mirror leptons; (2) Z boson and charged mirror leptons ; (3) chargino and scalar neutrinos- mirror scalar neutrinos, and (4) neutralino, charged scalar leptons- mirror scalar leptons. Using Eq.(\ref{LR}), one can write the contribution from the W boson loop so that \begin{eqnarray} \Delta^{(1)} a_{\tau}=\frac{g^2}{8}\frac{m_{\tau}}{16\pi^2 M_W} \sum_{\alpha,\gamma,\delta=1,2} [|(D^{\nu\dagger}_{L})_{\alpha\gamma}g^L_{\gamma \delta}(D^{\tau}_{L})_{\delta 1}\nonumber\\ +(D^{\nu\dagger}_{R})_{\alpha\gamma}g^R_{\gamma \delta}(D^{\tau}_{R})_{\delta 1}|^2 -|(D^{\nu\dagger}_{L})_{\alpha\gamma}g^L_{\gamma \delta}(D^{\tau}_{L})_{\delta 1} -(D^{\nu\dagger}_{R})_{\alpha\gamma}g^R_{\gamma \delta}(D^{\tau}_{R})_{\delta 1}|^2 ]h_2(\frac{m_{\nu_{\alpha}}}{M_W}), \end{eqnarray} where \begin{equation} h_2(r)=\frac{6r^5}{(r^2-1)^3}\ln r^2 +\frac{r^5-11r^3+4r}{(r^2-1)^2}. \end{equation} Using Eq.(\ref{zinteractions}), one can write the contribution from the Z boson loop \begin{eqnarray} \Delta^{(2)} a_{\tau}=\frac{g^2}{4\cos^2\theta_W}\frac{m_{\tau}}{16\pi^2 M_Z} \sum_{j=1,2} |x[-(D^{\tau \dagger}_L)_{j1}(D^{\tau}_L)_{11}\nonumber\\ +(D^{\tau \dagger}_R)_{j1}(D^{\tau}_R)_{11} -(D^{\tau \dagger}_L)_{j2}(D^{\tau}_L)_{21} +(D^{\tau \dagger}_R)_{j2}(D^{\tau}_R)_{21}]\nonumber\\ +\frac{1}{2}[(D^{\tau \dagger}_L)_{j1}(D^{\tau}_L)_{11} -(D^{\tau \dagger}_R)_{j2}(D^{\tau}_R)_{21}] |^2 h_1(\frac{m_{\tau_j}}{M_Z}), \end{eqnarray} where $x$ is as defined by Eq.(\ref{nc}) and \begin{equation} h_1(r)=-\frac{6r^3}{(r^2-1)^3}\ln r^2 +\frac{r^5+r^3+4r}{(r^2-1)^2}. \end{equation} Next using Eq.(\ref{charginoa}), one can write the contribution from the chargino, scalar neutrino and scalar mirror neutrino as \begin{eqnarray} \Delta^{(3)} a_{\tau}=\frac{g^2m_{\tau}}{16\pi^2}\sum_{i =1}^{2} \sum_{j =1}^{4} \frac{1}{m_{\chi^+_i}} \{\kappa_{\tau}|\tilde{D}^{\nu}_{1j}|^2 Re(D^{\tau \dagger}_{L_{11}}V_{i1}D^{\tau}_{R_{11}}U_{i2})\nonumber\\ +\kappa_{E_{\tau}} |\tilde{D}^\nu_{4j}|^2Re(D^{\tau \dagger}_{R_{12}}U^*_{i1}D^{\tau}_{L_{12}}V^*_{i2})\} F_3(\frac{M^2_{\tilde{\nu}_j}}{m^2_{\chi^+_i}})\nonumber\\ +\frac{g^2m^2_{\tau}}{96\pi^2}\sum_{i =1}^{2} \sum_{j =1}^{4} \frac{1}{m^2_{\chi^+_i}} \{|\tilde{D}^\nu_{1j}|^2[|D^{\tau \dagger}_{L_{11}}V_{i1}|^2+\kappa^2_{\tau}|D^{\tau\dagger}_{R_{11}}U^*_{i2}|^2 ]+\kappa^2_{\nu} |\tilde{D}^\nu_{3j}|^2|D^{\tau \dagger}_{L_{11}}V_{i2}|^2\nonumber\\ +|\tilde{D}^\nu_{4j}|^2[|D^{\tau \dagger}_{R_{12}}U^*_{i1}|^2+\kappa^2_{E_{\tau}}|D^{\tau \dagger}_{L_{12}}V_{i2}|^2 ]+\kappa^2_{N}|\tilde{D}^\nu_{2j}|^2|D^{\tau \dagger}_{R_{12}}U^*_{i2}|^2\} F_4(\frac{M^2_{\tilde{\nu}_j}}{m^2_{\chi^+_i}}) \end{eqnarray} where \begin{equation} F_3(x)=\frac{1}{(x-1)^3}(3x^2-4x+1-2x^2 \ln{x}), \end{equation} and \begin{equation} F_4(x)=\frac{1}{(x-1)^4}(2x^3+3x^2-6x+1-6x^2 \ln{x}). \end{equation} Further, using Eq.(\ref{neutralinoo}), one can write the contribution from the neutralino, scalar lepton and scalar mirror lepton as \begin{eqnarray} \Delta^{(4)} a_{\tau}=-\frac{m_{\tau}}{32\pi^2}\sum_{k =1}^{4} \sum_{j =1}^{4} \frac{1}{m_{\chi^0_j}} F_1(\frac{M^2_{\tilde{\tau}_k}}{m^2_{\chi^0_j}})\nonumber\\ |\tilde{D}^{\tau}_{1k}|^2\{|(D^{\tau \dagger}_+)_{11}a_j+(D^{\tau \dagger}_-)_{11}b_j|^2 -|(D^{\tau \dagger}_-)_{11}a_j+(D^{\tau \dagger}_+)_{11}b_j|^2\}\nonumber\\ +|\tilde{D}^{\tau}_{4k}|^2\{|(D^{\tau \dagger}_+)_{12}a'_j-(D^{\tau \dagger}_-)_{12}b'_j|^2 -|(D^{\tau \dagger}_-)_{12}a'_j-(D^{\tau \dagger}_+)_{11}b'_j|^2\}\nonumber\\ +|\tilde{D}^{\tau}_{3k}|^2\{|(D^{\tau \dagger}_+)_{11}c_j+(D^{\tau \dagger}_-)_{11}d_j|^2 -|(D^{\tau \dagger}_-)_{11}c_j+(D^{\tau \dagger}_+)_{11}d_j|^2\}\nonumber\\ +|\tilde{D}^{\tau}_{2k}|^2\{|(D^{\tau \dagger}_+)_{12}c'_j-(D^{\tau \dagger}_-)_{12}d'_j|^2 -|(D^{\tau \dagger}_-)_{12}c'_j-(D^{\tau \dagger}_+)_{12}d'_j|^2\}\nonumber\\ +\frac{m^2_{\tau}}{96\pi^2}\sum_{k =1}^{4} \sum_{j =1}^{4} \frac{1}{m^2_{\chi^0_j}}F_2(\frac{M^2_{\tilde{\tau}_k}}{m^2_{\chi^0_j}})\nonumber\\ |\tilde{D}^{\tau}_{1k}|^2\{|(D^{\tau \dagger}_+)_{11}a_j+(D^{\tau \dagger}_-)_{11}b_j|^2 +|(D^{\tau \dagger}_-)_{11}a_j+(D^{\tau \dagger}_+)_{11}b_j|^2\}\nonumber\\ +|\tilde{D}^{\tau}_{4k}|^2\{|(D^{\tau \dagger}_+)_{12}a'_j-(D^{\tau \dagger}_-)_{12}b'_j|^2 +|(D^{\tau \dagger}_-)_{12}a'_j-(D^{\tau \dagger}_+)_{11}b'_j|^2\}\nonumber\\ +|\tilde{D}^{\tau}_{3k}|^2\{|(D^{\tau \dagger}_+)_{11}c_j+(D^{\tau \dagger}_-)_{11}d_j|^2 +|(D^{\tau \dagger}_-)_{11}c_j+(D^{\tau \dagger}_+)_{11}d_j|^2\}\nonumber\\ +|\tilde{D}^{\tau}_{2k}|^2\{|(D^{\tau \dagger}_+)_{12}c'_j-(D^{\tau \dagger}_-)_{12}d'_j|^2 +|(D^{\tau \dagger}_-)_{12}c'_j-(D^{\tau \dagger}_+)_{12}d'_j|^2\}, \end{eqnarray} where \begin{equation} F_1(x)=\frac{1}{(x-1)^3}(1-x^2+2x \ln{x}), \end{equation} and \begin{equation} F_2(x)=\frac{1}{(x-1)^4}(-x^3+6x^2-3x-2-6x \ln{x}). \end{equation} The numerical sizes of $\Delta^{(1)} a_{\tau}- \Delta^{(4)} a_{\tau}$ are discussed in in the next section. \section{Constraints and Size Estimates} There are severe phenomenological constraints on extra matter beyond the Standard Model. These constraints can be listed as follows: (1) constraints from the data on the Z width; (2) constraints from direct searches; (3) unitarity constraints on the enlarged $4\times 4$ CKM matrix; (4) constraints from the oblique electroweak effects; and (5) constraints on Yukawas arising from keeping the theory perturbative, i.e., avoid developing a Landau pole. Many of these constraints have been investigated in the context of a sequential fourth generation \cite{Kribs:2007nz,Hung:2007ak,Holdom,Novikov:2002tk,Barger:1989dk,Frampton:1999xi,Carena:1995ep} with the analysis of \cite{Kribs:2007nz} being the most recent and the most detailed. We summarize the main results of these analyses below. First of all the constraint (1) can be easily avoided by making the masses of the new particles greater than half the Z boson width, while (2) can be satisfied by putting lower bounds on new matter from all collider data. For example, the LEP II data puts bounds on charged leptons of about a 100 GeV, while the Tevatron puts bounds on the fourth generation quark masses so that \cite{Kribs:2007nz} that $m_{u_4} > 258$ GeV (95\% CL) and $m_{d_4} > 268$ GeV (at 95\% CL). (3) Regarding the CKM unitarity constraints the enlarged CKM matrix allows a small window for mixings with the fourth generation so that \cite{Kribs:2007nz} $|V_{14}|\leq .04$, $|V_{41}|\leq .08$, $|V_{24}|\leq .17$ and there are similar constraints on the other mixings which allow for non-negligible elements for mixings with the 4th generation.\\ Perhaps the most stringent of the constraint is (4) which comes from the oblique parameters $(S,T,U)$\cite{Peskin:1991sw,Altarelli:1991fk} and specfically from the oblique parameter $S$ (For a recent review of the S,T,U fits to the electroweak data see Ref.\cite{yao,lepweg}). Here a complete fourth generation with degenerate masses gives a contribution of about 0.2. However, this correction can be reduced when one considers splittings of the up and the down fermions in the same multiplet. Using such splittings analyses including the fourth generation allow for consistent $(S,T,U)$ fits to the data (see, e.g., \cite{Kribs:2007nz,Holdom}). (5) Finally it has been shown that the Yukawa couplings can remain perturbative up to the grand unification scale for a range of fourth generation masses and Higgs boson parameters. Thus problems such as generation of Landau pole singularities for a large 4th generation up quark mass can be avoided with appropriate parameter choices. Essentially all of the considerations valid for the sequential fourth generation are also valid for the a mirror generation. Thus for, example, consider a fourth generation with up and down fermions $(\psi_1, \psi_2)$ with hypercharge $Y$ and masses $(M_1,M_2)$. The transformation that takes us from fermions to mirror fermions is \begin{eqnarray} {\rm fermions} ~(\psi_1, \psi_2) \leftrightarrow {\rm mirror ~fermions} ~(\psi^c_2, \psi^c_1),\nonumber\\ Y \leftrightarrow -Y, M_1 \leftrightarrow M_2. ~~~~~~~~~~~~~~~~~~ \label{transformation} \end{eqnarray} Using the above one finds that $\Delta S$ contribution from the mirror generation is the same as for the 4th sequential generation\cite{He:2001tp}. Without going into further details, we assume that fits to the electroweak data similar to those for the sequential fourth generation can be carried out for the case of the mirror generation. \\ Beyond the constraints on a new generation discussed above a mirror generation encounters two more issues. The first concerns avoidance of the survival hypothesis\cite{Georgi:1979md}, i.e., a mirror generation and an ordinary generation can combine to get super heavy masses of GUT size or string scale size. However, it is well known that some of the mirror generations do escape gaining super heavy masses and remain light up to the electroweak scale\cite{Senjanovic:1984rw,Bagger:1984rk}. We assume in this analysis that this indeed is the case for one mirror generation. The second issue concerns the mixing of the mirror generation with the ordinary generations. In this work we assume that the mixing primarily occurs with the third generation. In this circumstance the third generation will develop a small $V+A$ structure in addition to the expected $V-A$ structure. Indeed such a $V+A$ component for some of the third generation particles has been looked at for some time\cite{Jezabek:1994zv,Nelson:1997xd}. We here point out that the current data regarding the third generation leaves open the possibility of new physics. For instance, the analysis of \cite{Choudhury:2001hs} finds a better fit to the precision electroweak data, and specifically a better fit to the forward-backward asymmetry $A_{FB}^b$ of the b -quark, with additional bottom like quarks. Similarly, a model-independent measurement of the W boson helicity in the top quark decay $t\to Wb$ at D\O\ \cite{Abazov:2007ve}, gives for the longitudinal fraction $f_0$ and for the right handed fraction $(f_+)$ the result $f_0=.425\pm .166(\rm{stat}) \pm .102(\rm{syst})$ and $f_+=.119\pm .090(\rm{stat})\pm .053(\rm{syst})$ while $f_-$ is determined via the constraint $f_0+f_++f_-=1$. While the model independent analysis above is consistent with the Standard Model prediction with $V-A$ structure of $f_0=.697, f_+=3.6\times 10^{-4}$, the analysis shows that a different Lorentz structure such as $V+A$ is not ruled out at the level of a few percent. A similar situation occurs in the analysis of $\tau$ lepton decays where new physics at the level of a few percent is not necessarily ruled out \cite{Dova:1998uj,Singh:1987hn}.\\ The mixing parameters and the masses of the mirror fermion sector are determined by the input parameters $\theta$, $\phi$, $m_N$ and $m_{E_{\tau}}$, where we assume that $\theta_L=\theta_R=\theta$ and $\phi_L=\phi_R=\phi$ for the purpose of numerical investigation. However, these parameters are not independent but constrained by the symmetry breaking relation (\ref{condition}) which we use to determine $\phi$ in terms of the other parameters. The scalar sector is determined by the mixing angles $\tilde{\theta}_{1,2}$ and $\tilde{\phi}_{1,2}$ and the simplifying assumption that the scalar (mass)$^2$ $4\times 4$ matrix factorizes into two $2\times 2$ block diagonal matrices. If we further assume that $M^2_{i j}=M^2_{i+2 j+2}$ we have the conditions $\tilde{\theta}_1=\tilde{\theta}_2$ and $\tilde{\phi}_1=\tilde{\phi}_2$. The remaining parameters are $M^2_{11}$ and $M^2_{22}$ for both the scalar $\tau$ and scalar neutrino (mass)$^2$ matrices. The scalar spectrum is then calculated from the formulas given in Appendix A. \begin{center} \begin{tabular}{|c|c|c|c|c|c|c|c|c|} \multicolumn{8}{c}{Table~1: } \\ \hline $\theta$ & $\tilde{\phi}$ &$\tilde{\theta}$ & $\Delta^{(1)} a_{\tau}$ & $\Delta^{(2)} a_{\tau}$ & $\Delta^{(3)} a_{\tau}$ & $\Delta^{(4)} a_{\tau}$& $\mu^{(1)}_{\nu}$/$\mu_B$& $\mu^{(2)}_{\nu}$/$\mu_B$ \\ & & & $\times 10^{6}$ &$\times 10^{7}$ &$\times 10^{7}$ &$\times 10^{8}$ &$\times 10^{10}$&$\times 10^{10}$ \\ \hline $0.2$ & $0.3$ & $0.4$ & $5.0 $ & $18.$ & $2.4$ & $-8.1 $ & $-24.$ & $15. $\\ \hline $0.15$ & $0.35$ & $0.45$ & $2.8$ & $10. $ & $1.4 $ & $-4.8 $ & $-14. $ & $8.7 $\\ \hline $0.10$ & $0.2$ & $0.3$ & $1.3 $ & $4.7 $ & $.59$ & $-1.92 $ & $-6.2 $ & $3.8 $\\ \hline $0.09$ & $0.0$ & $0.2$ & $1.06$ & $3.8.$ & $.47 $ & $-1.52 $ & $-4.90 $ & $3.1 $\\ \hline $0.08$ & $0.2$ & $0.1$ & $.84$ & $3.0$ & $ .38$ & $-1.19 $ & $-3.95$ & $2.4 $\\ \hline $0.07$ & $0.1$ & $0.0$ & $.65$ & $2.30 $ & $.29 $ & $-.91 $ & $-3.04$ & $1.8 $\\ \hline $0.06$ & $0.0$ & $0.2 $ & $.48$ & $1.70 $ & $.21 $ & $-.67 $ & $-2.23 $ & $1.4 $\\ \hline $0.05$ & $0.2$ & $0.1$ & $.33$ & $1.18 $ & $.15 $ & $-.64$ & $-1.55 $ & $.94 $\\ \hline $0.04$ & $0.1$ & $0.0$ & $.21$ & $.76$ & $.09 $ & $-.30$ & $-.99 $ & $.60 $\\ \hline $0.03$ & $0.0$ & $0.2$ & $.12 $ & $.43 $ & $.05 $ & $-.17$ & $-.56 $ & $.34$\\ \hline $0.02$ & $0.2$ & $0.1$ & $.05 $ & $.19$ & $.03$ & $-.07 $ & $-.25$ & $.15 $\\ \hline $0.01$ & $0.1$ & $0.0$ & $.013 $ & $.048 $ & $.006$ & $-.02 $ & $-.062 $ & $.037 $\\ \hline \end{tabular}\\~\\ \noindent \end{center} Table caption: Contributions to the magnetic moments of $\nu_{\tau}$ and of $\tau$ including corrections from the mirror particles and mirror sparticles for a variety of mixing angles between the third generation and the mirror generation consistent with the symmetry breaking constraint of Eq.(\ref{condition}). The other input parameters are $\tan\beta=20$, $m_0=400$, $m_{1/2}=150$, $A_0=400$, $m_E=200$, $m_N=220$, $M_{\tilde{\tau}_{11}}=400$, $M_{\tilde{\tau}_{22}}=500$, $m_{\tilde{\nu}_{11}}=420$ and $m_{\tilde{\nu}_{22}}=520$, and $\mu>0$. All masses are in units of GeV and all angles are in radian.\\ The mixings between the third generation and the mirrors can affect among other things the magnetic moments. This is specifically true for the magnetic moment of the $\tau$ neutrino which we discuss next. In this case there will be two contributions, one from the non-susy sector (see Fig.(1)) and the other from the SUSY sector (see Fig.(2)). Similar contributions also arise for the anomalous magnetic moment of the $\tau$. An analysis of these moments is given in Table 1. Here we exhibit numerical sizes of the different contributions to the tau neutrino magnetic moments, i.e., $\mu_{\nu}^{(1)}$ and $ \mu_{\nu}^{(2)}$ and to the anomalous magnetic moment of the $\tau$, i.e., $\Delta^{(1)} a_{\tau}- \Delta^{(4)} a_{\tau}$. The numerical results of the table show that the contribution to the $\tau$ neutrino magnetic moment is as much as eight orders of magnitude larger than what the model without mirror mixings will give. These results may be compared with the prediction of the Standard Model (extended with a right handed neutrino) which is $\mu_{\nu}=O(10^{-19}) (m_{\nu}/eV)\mu_B$. The SM value for the magnetic moment is too small and falls beyond any reasonable possibility of observation. In contrast the result arising from mixing with the mirror sector is only 2-3 orders of magnitude below the current limits and thus not outside the realm of observability. At the same time, we note that the contribution of the mirror sector to the anomalous magnetic moment of the $\tau$ lepton gives only a small correction to the Standard Model prediction. \section{LHC Signatures of the mirror sector\label{sig}} Before discussing the LHC signatures of the mirror sector it is useful to list the new particles that arise in the model beyond those that appear in MSSM. In the fermionic sector the new particles are \begin{eqnarray} B, T, E, N \label{mirror-1} \end{eqnarray} where all fields including $N$ are Dirac. In the bosonic sector the new particles in the mass diagonal states are \begin{eqnarray} \tilde B_1, \tilde B_2, \tilde T_1, \tilde T_2, \tilde E_1, \tilde E_2, ~\tilde \nu_{1}, ~\tilde \nu_{2}, ~\tilde \nu_{3}. \label{mirror-2} \end{eqnarray} We note the appearance of three sneutrino states in Eq.(\ref{mirror-2}). This is so because, we started out with two extra chiral singlets, one in the MSSM sector and another in the mirror generation. Along with the two chiral neutrino states that arise from the doublets they produce four sneutrino states, one of which is in the MSSM sector and the other three are listed in Eq.(\ref{mirror-2}).\\ In the extended MSSM with mirrors, the mirror fermions and their supersymmetric partners, the mirror sfermions, could produce interesting signatures at the LHC and at the ILC. Thus, for example, if the mirror generation mixes only with the third generation one will have decays of the following type (if $M_{N}> M_E+ M_W$), \begin{eqnarray} N\to E^-W^+, ~ E^-\to \tau^-Z\to \tau^-e^+e^-, \tau^-\mu^+\mu^-, \tau^+\tau^+\tau^- \end{eqnarray} This signal is unique in the sense that there is always at least one $\tau$. Specifically, there is no corresponding signal where one has all three leptons of the first generation, or of the second generation or a mixture there of. These signatures are uniquely different from the leptonic signatures in MSSM, for example, from those arising from the decay of an off -shell $W^*$\cite{Nath:1987sw}, where $\tilde W^*\to \tilde W +\chi_2^0\to l_1 l_2\bar l_2$, i.e.,with a $W^*$ decaying into a chargino and the second lightest neutralino. Here all leptonic generations appear in all final states. Another interesting signature is the Drell-Yan process \begin{eqnarray} p p \to Z^* \to E^+E^- \to 2\tau 4l, 4\tau 2l, 6\tau, \end{eqnarray} where $l_1,l_2= e, \mu$. Additionally, of course, there can be events with taus, leptons and jets. In each case one has two opposite sign taus. Similarly one can have $pp\to Z^*\to N\bar N$ production. One can also have the production of mirrors via $W^*$ exchange, i.e., via the process \begin{eqnarray} pp\to W^*\to EN \to [\tau l_i\bar l_i, 3\tau, (\tau +2 jets)] + E_T^{\rm miss} \end{eqnarray} Again the leptonic events always have a $\tau$ with no events of the type $l_1l_2\bar l_2$. Similarly decay chains exist with other mass hierachies, e.g., when $N$ is lighter than $E$. Additionally for the supersymmetric sector of mirMSSM one has production and decays of $\tilde E_{1,2}$ and $\tilde \nu_{i}$ (i=1,2,3). For example, for the case, when $\tilde \nu_{i}$ are heavier than $\tilde{E_k}$ one has decays \begin{eqnarray} \tilde \nu_{i} \to \tilde E_k^- W^+, E^-\tilde \chi^+ \end{eqnarray} with subsequent decays of $E^-,\tilde E_k^-$ etc. Thus one has processes of the type \begin{eqnarray} pp \to \tilde \nu_{i} {\tilde \nu^*_{i}} \to \tilde E^+_k \tilde E^-_kW^+W^-, \tilde E^+_kE^-W^{\mp} \tilde \chi^{\pm} \end{eqnarray} Combined with the decays of the $\tilde E^+\tilde E^-$ one can get signatures with $\tau s +{\rm leptons} + {\rm jets} + E_T^{\rm miss}$ with as many 8 leptons, where all the leptons could be $\tau$s. Another important signature is the radiative decay\cite{De Rujula:1980qd} of $N$ where \begin{eqnarray} N\to \nu_{\tau} \gamma. \end{eqnarray} This decay occurs via the transition electric and magnetic moments. The lifetime for the decay is very short and once $N$ is produced it will decay inside the detector. The signal will consist of a very energetic photon with energy in the 100 GeV range. Thus if kinematically allowed $h^0, A^0$ will have decays of the following typess \begin{eqnarray} (h^0, H^0, A^0)\to N\bar N \to 2\gamma+ E_T^{\rm miss}. \end{eqnarray} Once a new generation is seen, a study of their production and decay can reveal if they are a sequential generation or a mirror generation. Let us consider the sequential fourth generation first with the superpotential \begin{eqnarray} W_{4th-seq}= \epsilon_{ij}[y_{4e}\hat H^i_1 \hat \psi^j_{4L} \hat e^c_{4L} +y_{4d} \hat H^i_1 \hat q^j_{4L} \hat d^c_{4L} +y_{4u} \hat H^j_2 \hat q^i_{4L} \hat u^c_{4L} ++y_{4\nu} \hat H^j_2 \hat \psi^i_{4L} \hat \nu^c_{4L}] \end{eqnarray} which relate the Yukawas with the fermion masses for the 4th generation so that \begin{eqnarray} y_{4u}= \frac{g m_{4u}}{\sqrt 2 M_W\sin\beta}, y_{4\nu}= \frac{g m_{4\nu}}{\sqrt 2 M_W\sin\beta},\nonumber\\ y_{4e}= \frac{g m_{4e}}{\sqrt 2 M_W\cos\beta}, ~y_{4d}= \frac{g m_{4d}}{\sqrt 2 M_W\cos\beta}. \end{eqnarray} For the mirror generation we have \begin{eqnarray} W_{4th-m}=\epsilon_{ij}[f_2 \hat H_1^i \hat \chi^{cj} \hat N_L +f_2' \hat H_2^j \hat \chi^{ci} \hat E_{\tau L} +Y_B \hat H_2^j \hat Q^{ci} \hat B_L +Y_T \hat H_1^i \hat Q^{cj} \hat T_L] \end{eqnarray} and the relation among the Yukawas and the mirror fermions masses are \begin{eqnarray} f_2= \frac{g M_{N}}{\sqrt 2 M_N\cos\beta}, ~~Y_{T}= \frac{g M_{T}}{\sqrt 2 M_W\cos\beta},\nonumber\\ f_2'= \frac{ M_{E}}{\sqrt 2 M_W\sin\beta}, ~Y_{B}= \frac{g M_{B}}{\sqrt 2 M_W\sin\beta}. \end{eqnarray} The neutral Higgs mass eigen states $h^0$, $H^0$ and $A^0$ are related to the electroweak eigen states $H_1^1$ and $H_2^2$ by \begin{eqnarray} H_1^1=\frac{1}{\sqrt{2}}[v_1+H^0 \cos\alpha -h^0 \sin\alpha +i A^0 \sin\beta]\nonumber\\ H_2^2=\frac{1}{\sqrt{2}}[v_2+H^0 \sin\alpha +h^0 \cos\alpha +i A^0 \cos\beta] \end{eqnarray} The neutral Higgs couplings of $h^0, H^0$ and of the CP odd Higgs boson $A^0$ with the sequential 4th generation in the Lagrangian takes the form \begin{eqnarray} -{\cal{L}}=\frac{g}{2M_W} (\frac{ m_{4e}\cos\alpha}{ \cos\beta} \bar e_4 e_4 + \frac{ m_{4d}\cos\alpha}{ \cos\beta} \bar d_4 d_4+ \frac{ m_{4u}\sin\alpha}{ \sin\beta} \bar u_4u_4+ \frac{ m_{4\nu}\sin\alpha}{ \sin\beta} \bar \nu_4 \nu_4)H^0\nonumber\\ + \frac{g}{2M_W} (-\frac{ m_{4e}\sin\alpha}{ \cos\beta} \bar e_{4}e_4 -\frac{ m_{4d}\sin\alpha}{ \cos\beta} \bar d_{4}d_4 + \frac{ m_{4u}\cos\alpha}{ \sin\beta} \bar u_{4}u_4 +\frac{ m_{4\nu}\cos\alpha}{ \sin\beta} \bar \nu_{4}\nu_4)h^0\nonumber\\ -\frac{ig}{2M_W}(m_{4e}\bar e_4\gamma_5e_4 \tan\beta + m_{4d} \bar d_4 \gamma_5 d_4 \tan\beta + m_{4u} \bar u_4 \gamma_5 u_4 \cot\beta +m_{4\nu} \bar \nu_4 \gamma_5 \nu_4 \cot\beta) A^0, \label{h01} \end{eqnarray} while for the mirror generation it takes the form \begin{eqnarray} -{\cal{L}}=\frac{g}{2M_W} (\frac{ M_{E}\sin\alpha}{ \sin\beta} \bar E E + \frac{ M_{B}\sin\alpha}{ \sin\beta} \bar B B + \frac{ M_{T}\cos\alpha}{ \cos\beta} \bar T T+ \frac{ M_{N}\cos\alpha}{ \cos\beta} \bar N N )H^0\nonumber\\ +\frac{g}{2M_W} (\frac{ M_{E}\cos\alpha}{ \sin\beta} \bar E E + \frac{ M_{B}\cos\alpha}{ \sin\beta} \bar B B - \frac{ M_{T}\sin\alpha}{ \cos\beta} \bar T T- \frac{ M_{N}\sin\alpha}{ \cos\beta} \bar N N )h^0\nonumber\\ -\frac{ig}{2M_W}(M_{E}\bar E\gamma_5E \cot\beta + M_{B} \bar B \gamma_5 B \cot\beta + M_{T} \bar T \gamma_5 T \tan\beta + M_{N} \bar N \gamma_5 N \tan\beta ) A^0. \label{h02} \end{eqnarray} A comparison of Eq.(\ref{h01}) and of Eq.(\ref{h02}) shows a rearrangment of $\alpha$ and $\beta$ dependence. Thus while the down quark and the lepton vertices for a sequential generation are enhanced for large $\tan\beta$, it is the up quark vertex for a mirror generation that is enhanced. The above leads to some interesting features that distinguish a mirror generation from a sequential fourth generation. One important consequence of the above is the following. Suppose the $H^0$ is heavy enough to decay into a pair of fourth generation quarks or a pair of mirror quarks ($m_{H^0}>2m_q, q=u_4, d_4$). Then let us define the ratio of branching ratios $R_{d_4/u_4}^{H^0}$ as \begin{eqnarray} R_{d_4/u_4}^{H^0}= BR(H^0\to d_4\bar d_4)/ BR(H^0\to u_4\bar u_4). \end{eqnarray} Using the vertices in Eq.(\ref{h01}) we find \begin{eqnarray} R_{d_4/u_4}^{H^0}= \frac{m_{d_4}^2}{m_{u_4}^2} (\cot\alpha\tan\beta)^2 P_{d_4/u_4}^{H^0}, \end{eqnarray} where $P_{d_4/u_4}^{H^0}$ is a phase space factor defined by $P_{d_4/u_4}^{H^0}=(1-4m_{d_4}^2/m_H^2)^{3/2}(1-4m_{u_4}^2/m_H^2)^{-3/2}$ (see Appendix B). Similarly if the heavy Higgs can decay into the mirror quarks ($m_{H^0}>2m_Q, Q=B,T$) one has \begin{eqnarray} R_{B/T}^{H^0}= \frac{m_{B}^2}{m_{T}^2} (\tan\alpha\cot\beta)^2 P_{B/T}^{H^0}, \end{eqnarray} where we have neglected the loop effects. Thus with a knowledge of the parameters of the Higgs sector, i.e., $\alpha$ and $\beta$ one has a way of differentiating a mirror generation from a sequential fourth generation. Even a more dramatic differentiation arises from the branching ratios involving the decay of the CP odd Higgs. Here one finds \begin{eqnarray} R_{d_4/u_4}^{A^0} =\frac{m_{d_4}^2}{m_{u_4}^2} \tan^4\beta P_{d_4/u_4}^{A^0}, \end{eqnarray} where $P_{d_4/u_4}^{A^0}=(1-4m_{d_4}^2/m_A^2)^{1/2}(1-4m_{u_4}^2/m_A^2)^{-1/2}$ while a similar ratio for the decay into the mirror quarks gives (see Appendix B) \begin{eqnarray} R_{B/T}^{A^0} = \frac{m_{B}^2}{m_{T}^2} \cot^4\beta P_{B/T}^{A^0}, \end{eqnarray} where again we have neglected possible loop effects. The above implies that for $\tan\beta\geq 2$, $A^0$ will dominantly decay into $d_4\bar d_4$ for the sequential fourth generation case, while it will decay dominantly into $T\bar T$ for a mirror generation. Another important way to discriminate between a sequential generation and a mirror generation is to look at the forward backward asymmetry. Thus for the process $f\bar f \to f' \bar f'$ one may define, the forward-backward asymmetry $A_{FB}$ = $( \int_0^1 dz (d\sigma/dz)$ - $\int_{-1}^0 dz (d\sigma/dz))$ $/$ $(\int_{-1}^1 dz (d\sigma/dz))$. This asymmetry is sensitive to the $V+A$ vs $V-A$ structure of the $f'$ fermion interaction and a measurement of it can help discriminate between a sequential generation and a mirror generation. In the above we have given a broad outline of the ways in which one might distinguish a mirror generation from a sequential fourth generation. There are many other possible chains for decay of the mirrors and mirror sparticles depending on their mass patterns. Further, more detailed analyses of signatures for the model with mirrors based on detector simulations would be useful along the line of the analysis of signatures for sugra models\cite{msugra} and for string models (For, a sample of recent works see\cite{Feldman:2007zn,kks,arnowitt,mmt,bps}). Finally we comment on the flavor changing neutral current (FCNC) issues. It is well known that mixing with mirrors frustrates the GIM mechanism which suppresses FCNC. For the current model this does not pose a problem because the mirrors do not mix with the first two generations. On the other hand one does have couplings of the $Z$ boson which are off diagonal, $Z\bar \tau E$, $Z \bar b B$, $Z\bar t T$ etc which would allow production via a Drell -Yan process of $pp\to Z^*\to \tau^+E^-, t \bar T, b\bar B$ etc, which are not allowed for a sequential generation. Of course the processes are suppressed by mixing angles. \section{Conclusion\label{conclusion}} In this work we consider an extension of MSSM with an extra mirror generation which remains light down to the electroweak scale. Recent analyses indicate that an extra sequential generation is not inconsistent with the precision electroweak data, and similar considerations apply to a mirror generation. In the model we consider, we allow for mixings of the mirror generation with the third generation, and investigate some of the phenomenological implications of the model. One important effect arises on the magnetic moment of the $\tau$ neutrino, where one finds that it is enhanced by up to eight to nine orders of magnitude over what is predicted in the Standard Model. We also discussed the possible signatures of the mirror generation at the LHC, and find that several characteristic signatures exist which would distinguish it from a sequential generation. One such crucial test is the measurement of the forward -backward asymmetry which can discriminate between the $V-A$ vs $V+A$ interactions. It is further shown that the couplings of the mirror generation have different $\tan\beta$ dependences than those of an ordinary generation or of a sequential 4th generation.\\ If a mirror generation exists, it has important implications for string model building. (For some recent work in D brane and string model building see \cite{Blumenhagen:2001te,Cvetic:2001nr,Kobayashi:2004ya,Bouchard:2005ag,Braun:2005nv,Lebedev:2007hv}). Typically in string model building one puts in the constraints that the difference between the number of generations $n_f$ and the mirror generations $n_{mf}$ (with $n_f >n_{mf})$ equal three. This assumes that the $n_{mf}$ number of generations and mirror generations follow the survival hypothesis \cite{Georgi:1979md} and become superheavy. However, in unified models there are many instances where mirror generations may remain massless up to the electroweak scale. This opens a new direction for model building. Suppose, then, that one imposes only the constraint $n_f-n_{mf}=2$ along with the condition that one mirror generation remains massless down to the electroweak scale. In this case we will have three ordinary generations and one mirror generation all light at the electroweak scale, i.e., the extended MSSM model with mirrors.\\ If the scenario outlined above holds, the string model building may need a revision in that the constraint of three massless generations will be relaxed. Specifically, for example, in Kac-Moody level 2 heterotic string constructions one has problems getting 3 massless generations (see,e.g., \cite{Kakushadze:1996jm}). On the other hand, if 3 ordinary generations and one mirror generations are massless, the rules of construction for string models change and one may need to take a fresh look at model building in string theory. Of course, the light mirror particles even if they exist need not necessarily fall into a full generation. Thus while a full generation is the simplest possibility for the cancellation of anomalies, it may happen that such cancellations may involve some exotic mirrors. This would make model building even more challenging. Many open question remain for further study the most important of which is a detailed dynamical model for the mixings of ordinary and mirror particles below the grand unification scale. In the analysis given in this work we assumed a phenomenological approach where we introduce mixings between the two sectors. However, a concrete mechanism is desirable to achieve a more complete understanding of the mixings of the ordinary matter and mirror matter. \noindent {\large\bf Acknowledgments}\\ Interesting conversations with Emanuela Barberis, Patrick Huber, Stuart Raby and Akin Wingerter are acknowledged. This research is supported in part by NSF grant PHY-0757959. \section{Appendix A: Further details of mixings and interactions} In this section we give more explicit forms for the interactions including mixing with mirrors. We first discuss the non-supersymmetric sector where the contributions arise from the W and Z exchanges. By parametrizing the mixing between $\tau$ and $E_{\tau}$ by the angle $\theta$, and between $\nu$ and $N$ by the angle $\phi$, in the simple case where $\theta_L=\theta_R=\theta$ and $\phi_L=\phi_R=\phi$, we can write ${\cal{L}}_{CC}+{\cal{L}}_{NC}$ as \begin{eqnarray} {\cal{L}}_{CC}+{\cal{L}}_{NC}= -\frac{g}{2\sqrt 2} W_{\mu}^{\dagger} \{\bar \nu_1\gamma^{\mu} \tau_1 \cos(\theta-\phi) +\bar \nu_1 \gamma^{\mu} \tau_2 \sin(\theta-\phi) \nonumber\\ -\bar \nu_1\gamma^{\mu} \gamma_5 \tau_1 \cos(\theta+\phi) -\bar \nu_1 \gamma^{\mu} \gamma_5 \tau_2\sin(\theta+\phi)\nonumber\\ -\bar \nu_2\gamma^{\mu} \tau_1 \sin(\theta-\phi) -\bar \nu_2 \gamma^{\mu}\gamma_5 \tau_1 \sin(\theta+\phi) \nonumber\\ +\bar \nu_2\gamma^{\mu} \tau_2 \cos(\theta-\phi) +\bar \nu_2 \gamma^{\mu} \gamma_5 \tau_2\cos(\theta+\phi)\}+H.c\nonumber\\ -\frac{g}{4\cos\theta_W}Z_{\mu} \{\bar \tau_1\gamma^{\mu} (4\cos^2\theta_W -1+\cos 2\theta \gamma_5)\tau_1 \nonumber\\ +\bar \tau_2\gamma^{\mu} (4\cos^2\theta_W -1-\cos 2\theta \gamma_5)\tau_2\nonumber\\ +\bar \tau_1\gamma^{\mu}\gamma_5 \sin 2\theta \tau_2+ \bar \tau_2\gamma^{\mu}\gamma_5 \sin 2\theta \tau_1\}, \label{a1} \end{eqnarray} where $\tau_1, \tau_2$ are the mass eigen states for the charged leptons, with $\tau_1$ identified as the physical tau state, and $\nu_1, \nu_2$ are the mass eigen states for the neutrino with $\nu_1$ identified as the observed neutrino. We note that Eq.(\ref{a1}) conicides with Eq.(1) of \cite{mirrors} except for the typo in the middle sign of their third line.\\ In the supersymmetric sector, the mass terms of the scalar leptons and scalar mirror leptons arise from the F-term, the D-term and the soft supersymmetry breaking terms in the scalar potential. For example, the mixing terms between $\tilde{\tau}_L$ and $\tilde{\tau}_R$ can arise from the $\mu$ term in the superpotenital and from the trilinear coupling term of the soft breaking potential $V_{soft}$. This gives us the terms $M^2_{13}=M^2_{31}=m_{\tau}(A_{\tau}-\mu \tan\beta)$. The corresponding mixing terms between $\tilde{E}_{\tau L}$ and $\tilde{E}_{\tau R}$ are $M^2_{24}=M^2_{42}=m_{E_{\tau}}(A_{E_{\tau}}-\mu \cot\beta)$. We assume here that the couplings are real otherwise, we would have $M^2_{31}=m_{\tau}(A^*_{\tau}-\mu^* \tan\beta)$ and $M^2_{42}=m_{E_{\tau}}(A^*_{E_{\tau}}-\mu^* \cot\beta)$. In the general parameter space of MSSM one can fix these mixings to be zero by a proper choice of the parameters $\mu$, $A_{\tau}$ and $A_{E_{\tau}}$. The other elements of the scalar mass$^2$ matrix can also be easily worked out. As an example, the F-term produces a part of the mixing between $\tilde{\tau}_R$ and $\tilde{E}_{\tau R}$ as follows \begin{equation} V=F_i^* F_i,~ F_i=\frac{\partial W}{\partial A_i}. \end{equation} Here $A_i$ is the scalar $\tilde{E}_{\tau L}$ and \begin{equation} \frac{\partial W}{\partial \tilde{E}_{\tau L}}=f_2' H_2^2 \tilde{E}^*_{\tau R}+f_4 \tilde{\tau}^*_R-f_2'H_2^1 \tilde{N}^*_R, \end{equation} which gives \begin{equation} V_F=(f_2' H_2^{2*} \tilde{E}_{\tau R}+f_4 \tilde{\tau}_R-f_2'H_2^{1*} \tilde{N}_R)(f_2' H_2^2 \tilde{E}^*_{\tau R}+f_4 \tilde{\tau}^*_R-f_2'H_2^1 \tilde{N}^*_R). \end{equation} After breaking of the electroweak symmetry the $V_F$ part of the scalar potential given above produces the following mass terms \begin{equation} -{\cal{L}}_{m}=f^{'2}_2 \frac{v^2_2}{2}\tilde{E}_{\tau R} \tilde{E}^{*}_{\tau R} +f_4 f_2' \frac{v_2}{\sqrt{2}}\tilde{E}^{*}_{\tau R} \tilde{\tau}_R +f_4 f_2' \frac{v_2}{\sqrt{2}}\tilde{E}_{\tau R} \tilde{\tau}^{*}_R +f^2_4 \tilde{\tau}^{*}_R \tilde{\tau}_R \end{equation} Here one finds that the mixing between $\tilde{\tau}_R$ and $\tilde{E}_{\tau R}$ occurs such that the corresponding elements in the mass$^2$ matrix $M^2_{34}$ and $M^2_{43}$ are equal. For illustrative purposes, we assume a simple mixing scenario for mixings in the scalar sector. Specifically we assume mixings among scalars and mirror scalars of the same chirality. Thus for the charged leptons we assume mixings between $\tilde \tau_L$ and $\tilde E_L$ and similarly mixings between $\tilde \tau_R$ and $\tilde E_R$, but no mixing between $\tilde \tau_L, \tilde \tau_R$ and between $\tilde E_L$ and $\tilde E_R$. These are obviously approximations to the more general analysis given in Sec.(2). Under the above approximations the diagnolizing matrices $\tilde{D}^{\tau}$ and $\tilde{D}^{\nu}$ would have the following simple structures \begin{eqnarray} \tilde{D}^{\tau}={\left( \begin{array}{cccc} \cos\tilde{\theta}_1 & \sin\tilde{\theta}_1 &0&0 \cr -\sin\tilde{\theta}_1 & \cos\tilde{\theta}_1 &0&0\cr 0&0&\cos\tilde{\theta}_2 & \sin\tilde{\theta}_2\cr 0&0&-\sin\tilde{\theta}_2 & \cos\tilde{\theta}_2 \end{array}\right)}, \end{eqnarray} and \begin{eqnarray} \tilde{D}^{\nu}={\left( \begin{array}{cccc} \cos\tilde{\phi}_1 & \sin\tilde{\phi}_1 &0&0 \cr -\sin\tilde{\phi}_1 & \cos\tilde{\phi}_1 &0&0\cr 0&0&\cos\tilde{\phi}_2 & \sin\tilde{\phi}_2\cr 0&0&-\sin\tilde{\phi}_2 & \cos\tilde{\phi}_2 \end{array}\right)}.t \end{eqnarray} In the charged leptonic sector, assuming the independent set of parameters to be $\tilde{\theta}_1$, $\tilde{\theta}_2$, $M^2_{11}$, $M^2_{22}$, $M^2_{33}$ and $M^2_{44}$, one can determine the elements $|M^2_{12}|$ and $|M^2_{34}|$ through the relations \begin{eqnarray} \tan 2\tilde{\theta}_1=\frac{2|M^2_{12}|}{M^2_{11}-M^2_{22}},\nonumber\\ \tan 2\tilde{\theta}_2=\frac{2|M^2_{34}|}{M^2_{33}-M^2_{44}}. \end{eqnarray} The eigen values for the masses are then given by \begin{eqnarray} M^2_{{\tilde{\tau}}_1}=\frac{1}{2}(M^2_{11}+M^2_{22})+\frac{1}{2}\sqrt{(M^2_{11}-M^2_{22})^2+4|M^2_{12}|^2},\nonumber\\ M^2_{{\tilde{\tau}}_2}=\frac{1}{2}(M^2_{11}+\frac{1}{2}M^2_{22})-\frac{1}{2}\sqrt{(M^2_{11}-M^2_{22})^2+4|M^2_{12}|^2},\nonumber\\ M^2_{{\tilde{\tau}}_3}=\frac{1}{2}(M^2_{33}+M^2_{44})+\frac{1}{2}\sqrt{(M^2_{33}-M^2_{44})^2+4|M^2_{34}|^2},\nonumber\\ M^2_{{\tilde{\tau}}_4}=\frac{1}{2}(M^2_{33}+M^2_{44})-\frac{1}{2}\sqrt{(M^2_{11}-M^2_{44})^2+4|M^2_{34}|^2}. \end{eqnarray} Similar relations hold for the scalar neutrino sector. \section{Appendix B: Decay of the heavy Higgs Bosons $H^0$ and $A^0$ into mirrors} The heavy Higgs decays into mirrors would produce some very characteristic signatures if the masses of the heavy Higgs bosons $H^0$ and $A^0$ are large enough to kinematically allow such decays. We give below the decay widths for the processes with charged mirrors \begin{eqnarray} H^0\to E\bar E, B\bar B, T\bar T,\nonumber\\ A^0\to E\bar E, B\bar B, T\bar T, \end{eqnarray} using the interactions of Eq.(\ref{h02}). For the decay of $H^0$ into charged mirrors we have \begin{eqnarray} \Gamma(H^0\to E\bar E)= \frac{g^2m_{H^0}}{32\pi} (\frac{\sin\alpha}{\sin\beta})^2 (\frac{M_E}{M_W})^2 (1-\frac{4M_E^2}{M_H^{02}})^{3/2}, \nonumber\\ \Gamma(H^0\to B\bar B)= \frac{3g^2m_{H^0}}{32\pi} (\frac{\sin\alpha}{\sin\beta})^2 (\frac{M_B}{M_W})^2 (1-\frac{4M_B^2}{M_H^{02}})^{3/2}, \nonumber\\ \Gamma(H^0\to T\bar T)= \frac{3g^2m_{H^0}}{32\pi} (\frac{\cos\alpha}{\cos\beta})^2 (\frac{M_T}{M_W})^2 (1-\frac{4M_T^2}{M_H^{02}})^{3/2}. \end{eqnarray} These may be compared with the decays of $H^0$ into a 4-th sequential generation which are \begin{eqnarray} \Gamma(H^0\to e_4\bar e_4)= \frac{g^2m_{H^0}}{32\pi} (\frac{\cos\alpha}{\cos\beta})^2 (\frac{m_{e_4}}{M_W})^2 (1-\frac{4m_{e_4}^2}{M_H^{02}})^{3/2}, \nonumber\\ \Gamma(H^0\to d_4\bar d_4)= \frac{3g^2m_{H^0}}{32\pi} (\frac{\cos\alpha}{\cos\beta})^2 (\frac{m_{d_4}}{M_W})^2 (1-\frac{4m_{d_4}^2}{M_H^{02}})^{3/2}, \nonumber\\ \Gamma(H^0\to u_4\bar u_4)= \frac{3g^2m_{H^0}}{32\pi} (\frac{\sin\alpha}{\sin\beta})^2 (\frac{m_{u_4}}{M_W})^2 (1-\frac{4m_{u_4}^2}{M_H^{02}})^{3/2}. \end{eqnarray} For the decay of $A^0$ into charged mirrors we have \begin{eqnarray} \Gamma(A^0\to E\bar E)= \frac{g^2m_{A^0}}{32\pi} \cot^2\beta (\frac{M_E}{M_W})^2 (1-\frac{4M_E^2}{M_A^{02}})^{1/2}, \nonumber\\ \Gamma(A^0\to B\bar B)= \frac{3g^2m_{A^0}}{32\pi} \cot^2\beta (\frac{M_B}{M_W})^2 (1-\frac{4M_B^2}{M_A^{02}})^{1/2}, \nonumber\\ \Gamma(A^0\to T\bar T)= \frac{3g^2m_{A^0}}{32\pi} \tan^2\beta (\frac{M_T}{M_W})^2 (1-\frac{4M_T^2}{M_A^{02}})^{1/2}. \end{eqnarray} These may be compared with the decays of $A^0$ into a 4-th sequential generation which are \begin{eqnarray} \Gamma(A^0\to e_4\bar e_4)= \frac{g^2m_{A^0}}{32\pi} \tan^2\beta (\frac{m_{e_4}}{M_W})^2 (1-\frac{4m_{e_4}^2}{M_A^{02}})^{1/2}, \nonumber\\ \Gamma(A^0\to d_4\bar d_4)= \frac{3g^2m_{A^0}}{32\pi} \tan^2\beta (\frac{m_{d_4}}{M_W})^2 (1-\frac{4m_{d_4}^2}{M_A^{02}})^{1/2}, \nonumber\\ \Gamma(A^0\to u_4\bar u_4)= \frac{3g^2m_{A^0}}{32\pi} \cot^2\beta (\frac{m_{u_4}}{M_W})^2 (1-\frac{4m_{u_4}^2}{M_A^{02}})^{1/2}. \end{eqnarray} A study of the branching ratios will differentiate between a sequential fourth generation and a mirror fourth generation.
1,108,101,565,857
arxiv
\section{Introduction}\label{sec:intro} Roughly 75\% of all known exoplanets have been discovered via transit surveys, giving transiting planets an outsized influence on our ability to constrain both exoplanet demographics and planet formation models. In most cases, our best answers remain data-limited: the precision of available transit measurements is insufficient to distinguish between theoretical models. Until recently, uncertainties on transit parameters were dominated by uncertainties on stellar quantities (especially stellar radii), but with recent advances in stellar characterization via \textit{Gaia} astrometry \citep{GaiaDR2, Berger2018} and via high-quality spectroscopy \citep[e.g.][]{Petigura2017, Johnson2017}, the achievable precision on planetary radii and orbital elements derived from transit surveys is now limited predominantly by the quality of the transit fits themselves \citep{Petigura2020}. Obtaining higher precision planet parameter estimates will allow us to answer several pressing open questions in exoplanet science. For example, is the so-called radius valley fully or only partially depleted of planets \citep{Fulton2017, FultonPetigura2018, VanEylen2018, HardegreeUllman2020, Petigura2020}? Are patterns in the mass-period-radius distribution sculpted primarily by photoevaporation \citep{Owen2017}, by core-powered mass loss \citep{Ginzburg2018, Gupta2019}, or by some combination of mechanisms \citep{NeilRogers2020}. What do correlations between radii, periods, inclinations, and eccentricities tell us about the processes by which planets form and evolve \citep{Hansen2012, Chiang2013}? Techniques for analyzing exoplanet demographics and exo-system architectures have already been extensively developed \citep{Howard2012, Fabrycky2014, Milholland2017, Weiss2018, He2019, Mills2019, Christiansen2020, GilbertFabrycky2020, HardegreeUllman2020, Zink2020}, so once sufficiently precise estimates of planet properties for a suitably large sample of objects become available, their astrophysical implications will become almost immediately apparent. The keystone transit parameter which must be accurately estimated in order to allow reliable measurements of all underlying planet properties is the impact parameter. Unfortunately, impact parameters are notoriously difficult to estimate and consequently have seldom been the focus of transit lightcurve analyses. Instead, most previous studies have focused on measuring transit depths and durations, both of which are usually well-constrained by observations \citep[e.g.][]{ Mullally2015, Thompson2018}. Still, the limiting factor when converting transit observables into planetary radii, inclinations, and eccentricities is more often than not the least well constrained variable, so we must address the challenge of measuring impact parameters head-on. This problem is vital because transit observables can rarely be translated into planet properties on a one-to-one basis. For planetary radii, this condition arises because transit depths contain information about stellar limb darkening in addition to information about planetary radius, while for inclinations and eccentricities the condition arises because transit durations contain information about transit chord length as well as information about orbital velocity. In all cases, accurately deriving planet parameters from transit observations requires knowing how far from the stellar center the planet transits, which is precisely what the impact parameter measures. Even quantities which are usually considered to by well constrained - most notably the planet-to-star radius ratio - depend implicitly (and sometimes sensitively) on the assumed impact parameter. The methods developed in this paper thus first and foremost represent a means of obtaining the highest quality impact parameter measurements possible as a stepping stone toward obtaining correspondingly high quality estimates of planetary radii and other orbital elements. The degeneracies between transit parameters become most severe for planets on grazing or near-grazing trajectories. In particular, although the planet-to-star radius ratio, $r \equiv r_p/R_{\star}$, and the normalized impact parameter, $b$, are largely uncorrelated for non-grazing orbits, these two parameters become highly correlated for grazing geometries. Because grazing orbits are rare \citep{KippingSandford2016}, the standard approach has been to reject suspected grazing or near-grazing bodies from statistical studies all together \citep[e.g.][]{Petigura2020}. However, for many low to moderate signal-to-noise cases, a significant fraction of the posterior distribution is nonetheless consistent with a grazing geometry, even for planets which most likely orbit on non-grazing trajectories. A failure to accurately model grazing transits can therefore lead to biased inferences even for cases where the planet is not actually on a grazing orbit. Although the $r-b$ grazing degeneracy has been known about for some time \citep{Rowe2014, Rowe2015}, the severity of the problem as it pertains to near-grazing orbits (i.e. orbits with $0.7 \lesssim b < 1-r$) has only recently begun to be appreciated. Because planetary orbits are (probably) isotropically oriented, the unavoidable conclusion is that as many as one third of all existing transit measurements may be corrupted by incomplete consideration of grazing transit geometries. The true situation is probably not so dire, but without a reliable way to distinguish grazing from non-grazing orbits, it is difficult to know which transit measurements should be trusted. The goal of this paper is twofold: first, to find a way to reliably identify grazing and near-grazing transits, and second, to accurately fit these transits using a method that produces robust estimates of exoplanet properties. The tool for the job is umbrella sampling \citep{TorrieValleau1977}, a statistical technique which is closely related to importance sampling. Although the application of umbrella sampling is standard practice is the field of molecular dynamics where it originated, umbrella sampling methods have only recently begun to be applied to astrophysical problems \citep{Matthews2018}. Umbrella sampling is a powerful tool for sampling multimodal and other complicated posterior distributions and is suitable for many astrophysical applications. So, in addition to addressing the specific problem of transit lightcurve fitting, we aim for this paper to serve as an accessible introduction to umbrella sampling for astronomers unfamiliar with this fruitful technique. Our present work was primarily inspired by \citet{Matthews2018}, which also stemmed from the twin motivations of applying umbrella sampling to an astrophysical problem (in their case, sampling the low-probability tails of distributions in order to compare cosmological models) and also introducing umbrella sampling to astronomers in a pedagogically accessible manner. This paper is organized as follows. In \S\ref{sec:transits} we review the geometry of transits and discuss the particular challenges of modeling lightcurves of exoplanets on grazing trajectories. In \S\ref{sec:new_basis} we introduce a new parameter basis designed to efficiently sample grazing transits models. In \S\ref{sec:umbrella} we introduce the concept of umbrella sampling and describe our new method for fitting exoplanet transits. In \S\ref{sec:sampler_comparison} we present several case studies illustrating the efficacy of our method in a variety of contexts and demonstrate that umbrella sampling outperforms standard sampling techniques. In \S\ref{sec:real_systems} we apply our method to several real {\sl Kepler}\ Objects of Interest with high impact parameters reported on the NASA exoplanet archive \citep{Akeson2013}.\footnote{\url{https://exoplanetarchive.ipac.caltech.edu}; all data for this work was downloaded 18 July 2021} In \S\ref{sec:summary} we summarize our main results and provide recommendations for future lightcurve modeling efforts. \section{The geometry of grazing transits}\label{sec:transits} In this section, we review a few salient aspects of transit lightcurve modeling relevant to the analysis of grazing transits. Exoplanet experts will likely be familiar with much of \S 2.1, which we include in order to aid other astronomers who may wish to adopt umbrella sampling for other problems. A full pedagogical introduction to the geometry of transit lightcurves is presented in \citet{Winn2010}. \subsection{The transit model} The observables which can be directly recovered from a single transit light curve are the mid-transit time, $t_0$, the transit depth, $\delta$, the transit duration, $T$, and the ingress/egress timescale, $\tau$ (Figure \ref{fig:transit_geometry}). When multiple transits are observed, the orbital period, $P$, can be inferred as well. If the planet is assumed to be on a circular orbit around an isolated, uniform surface brightness host star, one can immediately derive four physical quantities: the planet-to-star radius ratio, $r \equiv r_p/R_{\star}$, the scaled separation, $a/R_{\star}$, the mean stellar density, $\rho_{\star}$, and the normalized impact parameter, $b$. \begin{figure} \centering \includegraphics[width=0.45\textwidth]{transit_geometry.pdf} \caption{Schematic illustration of a non-grazing transit geometry with the corresponding lightcurve approximated as a trapezoid. The transit depth, $\delta$; transit duration, $T$; ingress/egress duration $\tau$; mid-transit time, $t_0$; impact parameter, $b$; and $1^{\rm st}$ through $4^{\rm th}$ contact points, $t_I$ - $t_{IV}$, are indicated. Note that for this study, the transit duration, $T$, will always refer to the full first-to-fourth contact duration, $T_{14}$, unless otherwise specified because this is the only duration which is defined for all grazing and non-grazing geometries. The approximation $\tau_{12} = \tau_{34}$ (i.e. ingress and egress timescales are equal) is valid as long as eccentricities are not very large. Figure adapted from \citet{Winn2010}.\smallskip} \label{fig:transit_geometry} \end{figure} The transit lightcurve (again, assuming a circular orbit and a solitary star) can be can be fully specified by any non-degenerate combination using five of the nine above parameters, or any derivable quantity thereof \citep{SeagerMallenOrnelas2003, Winn2010}. These five parameters constitute the model \textit{basis set}. In practice, $P$, $t_0$, and $\delta$, are usually well constrained by the data, so most reasonable basis sets will include $P$, $t_0$, and either $\delta$ or $r$ (or slight modifications of these quantities) as the first three free parameters. The transit duration, $T$, is usually well constrained by the data as well, but for even modestly noisy lightcurves, the ingress/egress timescale, $\tau$, is difficult to resolve, making the selection of the final basis parameter a non-trivial task. Moreover, because $\tau \approx 5$ min is shorter than the observing cadence in many cases ($\Delta t \approx 30$ min for Kepler long cadence data and TESS primary mission full frame images), data binning often precludes a precise ingress/egress characterization even for high signal-to-noise transits \citep{Kipping2010, PriceRogers2014}. The ratio $T/\tau$ is as important as the values of $T$ and $\tau$ in isolation, so the choices of these final two basis parameters must be considered in tandem. Throughout this study, $T$ will refer to the full first-to-fourth contact transit duration $T_{14}$ unless otherwise noted because this is the only transit duration which is readily defined for all grazing and non-grazing geometries. In \S\ref{sec:summary} we will discuss how to modify our model to incorporate different transit durations which may be better constrained by the data. For the time being, limiting consideration to $T_{14}$ simplifies our discussion considerably and allows us to focus on the ideas which are unique to this paper. A straightforward approach to selecting the final basis pair is to use $\{T, \tau\}$ directly, although one might just as easily choose $\{b, \rho_{\star}\}$, $\{T_{14}, T_{23}\}$ or even more exotic pairs such as $\{b^2, 1/T\}$. Numerous basis sets have been proposed in the literature \citep[e.g.][]{Bakos2007, Carter2008, Pal2008, Eastman2013}, although none has yet been adopted as the standard set applied in all cases. This lack of a standard basis set and the wide variety of different parameterizations in use speaks to the subtle challenge of transit model fitting. For the present work, we will primarily use the basis pair $\{T, b\}$ as our final two model parameters. To account for nonzero stellar limb darkening, we adopt the standard approach and employ a quadratic limb darkening profile \citep{Claret2000, MandelAgol2002} using the efficient $\{q_1, q_2\}$ parameterization introduced by \citet{Kipping2013}. To account for nonzero eccentricity, we employ the photoeccentric effect \citep{FordQuinnVeras2008, DawsonJohnson2012} to compare the stellar density implied by a circular transit model, $\rho_{\rm circ}$, to an independent measurement (e.g. from spectroscopy or asteroseismology) of the stellar density, $\rho_{\rm obs}$, via the relation \begin{equation}\label{eq:photoeccentric} \frac{\rho_{\rm circ}}{\rho_{\rm obs}} = \Big( \frac{1 + e\sin\omega}{\sqrt{1-e^2}} \Big)^3 \end{equation} where $e$ is the eccentricity and $\omega$ is the longitude of periastron. The main advantage of this indirect method over the straightforward approach - i.e. fitting $e$ and $\omega$ directly - is that circular orbits are both faster to compute and require two fewer variables to describe than do non-circular orbits. With all of this in mind, we will take as our fiducial basis set the parameters $\{P, t_0, \ln r, b, \ln T, q_1, q_2\}$, where the logarithms on $r$ and $T$ enforces positivity of the two scale parameters and facilitates sampling over multiple orders of magnitude. As we will see shortly, this basis set performs quite well for non-grazing transits but performs poorly for grazing transits due to an emergent degeneracy between $r$ and $b$ in the grazing regime. \subsection{Model degeneracy in the grazing regime}\label{subsec:degeneracy} We will now consider the specific challenges that arise when modeling transit lightcurves of exoplanets on grazing trajectories. In the discussion that follows, the term ``grazing'' refers to any transit geometry for which the planetary disk does not fully overlap the stellar disk at the mid-transit point. In other words, we consider a transit to be non-grazing if $b \leq 1 - r$ and grazing if $b > 1 - r$. As a planet's trajectory moves from low-$b$ to high-$b$, both the transit duration, $T$, and transit depth, $\delta$, are reduced (Figure \ref{fig:grazing_transit_shape}). The reduction in $T$ occurs because the transit chord is shortened, and the reduction in $\delta$ occurs because at high $b$ the planet crosses a dimmer region of the limb-darkened stellar disk. When the planet crosses the grazing boundary at $b = 1-r$, the transit switches from U-shaped to V-shaped. For high signal-to-noise cases, this change in morphology can be used to distinguish between grazing and non-grazing transits, but for lower signal-to-noise cases, there is enough model flexibility that the transit shape - and by extension the transit geometry - remains ambiguous. \begin{figure} \centering \includegraphics[width=0.45\textwidth]{transit_lightcurve_variable_b.pdf} \caption{Lightcurve model illustrating how the transit shape changes as a function on impact parameter, $b$. This model is for a warm Neptune orbiting a Sun-like star with model parameters $r=0.05$, $P=13.0$ days, $R_{\star} = R_{\odot}$, $M_{\star} = M_{\odot}$, $u=(0.4,0.25)$. Warm colored, solid lines indicate grazing or near-grazing transits (informally defined here as $b \gtrsim 0.8$), while cool-colored, dashed lines indicate non-grazing transits. As $b$ increases, transit duration decreases (due to the shorter transit chord) and transit depth decreases (due to stellar limb darkening). There is little change in the transit shape between $0 \leq b \lesssim 0.5$, making differences between low impact parameters difficult to resolve. The transit depth and duration both change more rapidly above $b \gtrsim 0.5$. At $b = 1 - r = 0.95$, the transit morphology switches from ``U-shaped'' to ``V-shaped'', providing a diagnostic avenue for distinguishing grazing from non-grazing transits, although the effects of limb darkening blur this transition somewhat.} \label{fig:grazing_transit_shape} \end{figure} This ambiguity means that it is often necessary to sample from the grazing regime even for planets which are not actually on grazing trajectories. Unfortunately, two complications arise when attempting to simultaneously sample from both the grazing and non-grazing regions of the posterior distribution. First, although our fiducial basis set performs well when $b < 1-r$, for grazing trajectories $r$ and $b$ become highly correlated, producing a narrow degeneracy ridge that is difficult to explore (more on this in a moment). Second, because the posterior topology is extremely different on either side of the grazing transition, a ``bottleneck'' or ``funnel'' arises and the sampler often struggles to cross this threshold. In the grazing regime, as a planet moves to higher $b$ with fixed $r$, the overlap area between the stellar and planetary disk will decrease, thereby reducing the transit depth. However, if $r$ is allowed to float as a free parameter, a large $b$ can be compensated for by a commensurate increase in $r$. Thus $r$ and $b$ become almost perfectly positively correlated and sometimes a sampler will find an extremely large radius ($r_p \gg R_{\star}$) and extremely high impact parameter ($b \gg 1$), which is obviously unphysical. This is a well known problem in transit fitting \citep{Rowe2014, Rowe2015} and is a clear case where common sense is in conflict with the analysis. The effects of the $r-b$ degeneracy can be readily seen upon inspection of real Kepler data (Figure \ref{fig:koi_supergiants}). Compared to isotropic expectations for the cumulative Kepler Object of Interest (KOI) catalog, there is an overabundance of super-giant planets ($r_p \gtrsim 2 R_J$) found on grazing trajectories, but for objects with astrophysically sensible radii, the fraction of planets inferred to be on grazing trajectories is roughly in line with expectations. Furthermore, nearly every implausibly large super-giant planet has a quoted impact parameter consistent with a grazing trajectory, and many of these suspicious objects cluster at the $b=1+r$ boundary that marks where planets are not only on grazing orbits, but on \textit{extremely} grazing orbits for which the planetary and stellar disks barely overlap at all. While it is possible there is some complicated selection effect at play wherein only super-giants on grazing orbits pass all vetting thresholds necessary to be included in the KOI database, the simpler explanation is that the majority of these supposed super-giants are actually super-Earths or mini-Neptunes on non-grazing orbits, with inferred $r$ and $b$ values that are artifacts of a transit fitting procedure gone awry. \begin{figure*} \centering \includegraphics[width=0.9\textwidth]{KOI_inclined_supergiants_overabundance.pdf} \caption{Distribution of $r_p/R_{\star}$ and $b$ for cumulative Kepler Object of Interest (KOI) planet candidates. \textit{Left panel}: joint 2D distribution of $r_p/R_{\star}$ and $b$. Each point represents an individual validated or candidate planet. The red shaded region highlights planets inferred to orbit on a grazing trajectory. There is a suspicious pile-up of planets near $b\approx1.25$ at the $b=1-r$ boundary, hinting that the radius and impact parameter measurements derived for these planets may not be reliable. Non-isotropic structure in the distribution among planets on non-grazing trajectories - particularly near $b=0$ - suggests that measurements for these planets should be approached with some skepticism as well. \textit{Right panel}: fraction of planets inferred to be on grazing trajectories as a function of radius ratio. The red line plots the relation $f=2r/(1+r)$, which is the geometric upper limit on how many planets are expected to be on grazing orbits, ignoring any reduced detection efficiencies for grazing transits. For planets with physically plausible radii ($r_p \lesssim 2 R_J$), the observed fraction of grazing transits is in line with expectations, but for super-giant planets an overabundance of KOIs are found on grazing trajectories, again suggesting that their radius and impact parameter measurements may be unreliable.} \label{fig:koi_supergiants} \end{figure*} The effects of the transition threshold bottleneck are more difficult to notice because the most common outcome is that the sampler fails to enter the grazing regime entirely. In this situation, an individual Markov chain may appear well mixed, even though the posterior distribution has not been fully explored. This scenario is arguably worse than when the sampler becomes stuck deep in the grazing regime at astrophysically implausible values of $r$ because we often will not recognize that anything has gone wrong, even after consulting the usual set of Markov chain diagnostics. If we cannot trust that our sampler is well-behaved near the grazing/non-grazing boundary, we cannot trust that our sampler is well-behaved anywhere. We must therefore be skeptical of any results obtained before we can be confident that the bottleneck has not biased our inferences. Comparing impact parameter measurements between different Kepler data releases bears out our exhortation toward caution. Each point in Figure \ref{fig:dr22_vs_dr25} marks an individual KOI reported in both DR22 \citep{Mullally2015} and DR25 \citep{Thompson2018}, so points should cluster around the line $b_{22} = b_{25}$. The high degree of scatter indicates that the actual results are inconsistent, despite the fact that they were obtained from nearly identical input observations and similar data processing pipelines. Although there is some evident correspondence between the catalogs for high impact parameters ($b \gtrsim 0.7$), there is nonetheless a substantial fraction of points which report $b \approx 0$ in one catalog and $b \approx 1$ in the other. Even the 1D single-catalog distributions exhibit inhomogeneity, with a pile-up of reported impact parameters near $b=0$ seen in both catalogs. Although some of these discrepancies can be mitigated by using posterior medians rather than the default maximum likelihood point estimates \citep{Petigura2020}, much of the error is endemic to the problem of impact parameter measurement. \begin{figure} \centering \includegraphics[width=0.45\textwidth]{dr22_vs_dr25.pdf} \caption{Impact parameters reported by Kepler DR22 \citep{Mullally2015} vs DR25 \citep{Thompson2018}. Plotted values correspond to best-fit point estimates. Each point indicates a single Kepler Object of Interest reported in both catalogs, so points should cluster around the line $b_{22}=b_{25}$. The high degree of scatter in the actual data indicates that results are inconsistent and therefore unreliable. While there is some correspondence of values where $b \gtrsim 0.7$, in a substantial fraction of cases one catalog reports $b\approx0$ while the other reports $b\approx1$. A pile-up of reported values near $b=0$ can be seen in both catalogs, indicating that results are inaccurate. Because $r$ and $b$ are correlated for stars with non-negligible limb darkening, any mismeasurement of $b$ will propagate through to a mismeasurement of $r$.} \label{fig:dr22_vs_dr25} \end{figure} \subsection{Experiments using simulated data}\label{subsec:simulation_description} In order to illuminate the origin of the skewed $r-b$ distribution seen in real data, we perform an experiment which applies a Markov Chain Monte Carlo (MCMC) model fitting routine to synthetic data. In the next several paragraphs, we describe our method for simulating data and for subsequently fitting a transit model to that data using Hamiltonian Monte Carlo \citep[HMC;][]{Neal2011}. The casual reader may wish to skim these paragraphs so as not to become bogged down in the details. The important point is that we simulate an ordinary transit of an unremarkable star-planet system and then model that transit using our fiducial basis set and standard Monte Carlo sampling techniques. The details of our data simulation procedure are as follows. First, we simulate a low signal-to-noise transit of a warm Jupiter on a near-grazing, $P=13$ day circular orbit around a Sun-like star. The period was calculated in order to create a 3 hr transit duration for an impact parameter $b=0.85$. We generated 500 data points between $t_0 \pm T$, each with an integrated exposure time of 14.4 minutes (0.01 days). The finite data points were spaced randomly over the interval in order to minimize aliasing artifacts that might arise from a uniform observing cadence. We then added $\sigma_F/F = 10^4$ ppm Gaussian noise to the data. We did not include any long term trends or correlated noise in our simulation. Ground-truth parameter values are collected as simulation J-85 in Table \ref{tab:sim_parameters}, and the simulated photometry is shown in the middle panel of Figure \ref{fig:simulated_photometry}. \begin{table*} \centering \begin{tabular}{c c | c c c | c c | c} Parameter & Unit & J-22 & J-85 & J-100 & SE & MN & MHZ\\ \hline \textbf{Star} \\ $R_{\star}$ & $R_{\odot}$ & 1.0 & 1.0 & 1.0 & 0.92 & 0.92 & 0.37 \\ $M_{\star}$ & $M_{\odot}$ & 1.0 & 1.0 & 1.0 & 0.86 & 0.86 & 0.38 \\ $u_1$ & - & 0.40 & 0.40 & 0.40 & 0.48 & 0.48 & 0.46 \\ $u_2$ & - & 0.25 & 0.25 & 0.25 & 0.22 & 0.22 & 0.28\\ $\sigma_F$ & ppm & $1\times10^4$ & $1\times10^4$ & $5\times10^3$ & 300 & 300 & 200\\ \textbf{Planet} \\ $P$ & days & 3.6 & 13.0 & 44.9 & 21.0 & 21.0 & 37.0 \\ $r_p$ & $R_{\oplus}$ & 11.2 & 11.2 & 11.2 & 1.3 & 2.2 & 0.38\\ $b$ & - & 0.22 & 0.85 & 1.00 & 0.70 & 0.98 & 0.70 \\ $T$ & hrs & 3.0 & 3.0 & 3.0 & 3.24 & 1.26 & 2.33 \\ \textbf{Derived} \\ $r$ & - & 0.103 & 0.103 & 0.103 & 0.012 & 0.020 & 0.009 \\ $\gamma$ & - & 7.57 & 1.36 & 0.0 & 25.2 & 2.48 & 32.7 \\ $\lambda$ & - & 0.091 & 0.026 & 0.011 & 0.004 & 0.001 & 0.003 \\ \end{tabular} \caption{Ground-truth parameter values for simulated lightcurves used to compare a standard sampling approach to our new method. The quantities $\lambda$ and $\gamma$ are defined in Equation \ref{eq:lam_gam}. All simulated planets were placed on circular orbits. The first set of simulations (J-22, J-85, \& J-100) placed a Jupiter-sized planet around a Sun-like star at three different impact parameters in order to produce a non-grazing ($b=0.22$), nearly grazing ($b=0.85)$, and grazing ($b=1.00$) geometry; the orbital period was scaled to preserve a circular orbit for a consistent transit duration $T=3$ hrs. The second pair of simulations (SE, MN), placed a super-Earth ($r_p = 1.6 R_{\oplus}$) and a mini-Neptune ($r_p = 2.2 R_{\oplus}$ on a 21 day orbit around a K star; this experiment was designed to mimic the detection of radius valley planet that will require a precise impact parameter measurement in order to determine the its composition. The final simulation (MHZ) placed a small rocky planet in the habitable zone of an M-dwarf. The properties of the K star were chosen to be similar to to Kepler 20 \citep{Mathur2017}, and the properties the M star were chosen to be similar to GJ 876 \citep{vonBraun2014}. Limb darkening coefficients for all stars were calculated assuming solar metallicity and using the EXOFAST web applet \citep{Eastman2013}. Host star properties are meant to be illustrative of a few different common stellar types rather than an exact match to any real particular stars.} \label{tab:sim_parameters} \end{table*} \begin{figure} \centering \includegraphics[width=0.45\textwidth]{simulated_photometry.pdf} \caption{Simulated lightcurve photometry for a Jupiter-size planet on a circular orbit around a Sun-like star with various impact parameters. The orbital period was adjusted to maintain a consistent transit duration of $T=3$ hrs. The solid colored lines show the true underlying model while the grey points have additive Gaussian noise. The low signal-to-noise ratio of the transit makes the orbital trajectory (grazing vs. non-grazing) ambiguous. \textit{Top panel}: $b=0.22$, placing the planet on a non-grazing trajectory, corresponding to model J-22 in Table \ref{tab:sim_parameters}. \textit{Middle panel}: $b=0.85$, a near-grazing trajectory (model J-85). \textit{Bottom panel}: $b=1.0$, a grazing trajectory (model J-100).} \label{fig:simulated_photometry} \end{figure} We parameterized the model using our fiducial basis set - $\{P, t_0, \ln r, b, \ln T, q_1, q_2\}$ - plus a baseline flux offset, $F_0$, and a white noise jitter term, $\ln\sigma_F$. We held $P$ fixed at the true value and placed uninformative priors on all other variables, the mathematical details of which are collected in Table \ref{tab:priors}. Fixing $P$ is equivalent to assuming that the planet's ephemeris is tightly constrained, which is often the case even for noisy transits. Although in practice most applications will use the best available stellar characterization to place at least a modestly informative prior on $\rho_{\star}$ (and, indirectly, on $T$ and $e$ via the photoeccentric effect), for our present experiment we are more concerned with the sampler behavior (i.e. whether MCMC chains are well behaved) rather than the posterior inferences. Our philosophy is that the model should converge regardless of any particular choice of prior, so we adopt minimally restrictive priors wherever possible. \begin{table}[] \centering \begin{tabular}{l|l} Parameter & Prior \\ \hline $P$ & \textit{fixed} \\ $t_0$ & $\mathcal{N}(0.0, 0.1)$ \\ $\ln r$ & $\mathcal{U}(-9.2,-0.01)$ \\ $b$ & $\mathcal{U}(1-r,1+r)$ \\ $\ln T$ & $\mathcal{U}(-4.6,-1.4)$ \\ $q_1, q_2$ & $\mathcal{U}(0,1)$ \\ \hline $F_0$ & $\mathcal{N}(0,1)$ \\ $\ln\sigma_F$ & $\mathcal{N}(0,1)$ \\ \hline $\rho_{\star}$ & \textit{see text} \\ $e$ & \textit{see text} \end{tabular} \caption{Model priors for the grazing regime. All times are in units of days. $\mathcal{N} = \mathcal{N}(\mu,\sigma)$ denotes normal distributions and $\mathcal{U} = \mathcal{U}(x_{min},x_{max})$ denotes uniform distributions. All units of time are in days or log(days), where appropriate. Note that $b$ is defined as a conditional distribution predicated on $r$, i.e. $p(b) \equiv p(b|r)$; see Appendix \ref{appx:B} for details. Our limb darkening treatment follows \citet{Kipping2013} by placing uninformative priors on the two quadratic coefficients.} \label{tab:priors} \end{table} We sampled from the posterior distribution using HMC as implemented by \texttt{PyMC3} \citep{pymc3:2016} and the No U-Turn Sampler \citep[NUTS;][]{Hoffman2011}. Each sampling run consisted of two independent chains tuned for 5000 steps and sampled for 1000 draws, for a total of 2000 samples per run. We deliberately left the independent chains short in order to highlight the stochastic nature of the problem, but note that with HMC the autocorrelation length is typically much shorter than for standard random walk Metropolis-Hastings algorithms \citep{Metropolis1953, Hastings1970}, so that the number of effective samples is usually $\gtrsim 25\%$ and under ideal circumstances can approach $100\%$. This high effective sample rate is achievable with HMC because the algorithm adds a ``momentum'' term to the proposal generation process which enables much larger steps sizes than a random walk. During the tuning phase (analogous to the burn-in phase of other MCMC routines), the sampler ``learns'' the posterior topology and adaptively selects an optimal steps size for efficient exploration of the posterior. While the computational cost per step is higher for HMC compared to random walk Metropolis-Hastings, the cost per effective sample is usually considerably lower, especially for high dimensional problems. HMC has only recently begun to gain popularity among astrophysicists, so we direct the interested reader to the excellent review by \citet{Betancourt2017}, as well as tutorials for the Python software packages \texttt{PyMC3}\footnote{\url{https://docs.pymc.io}} \citep{pymc3:2016} and \texttt{exoplanet}\footnote{\url{https://docs.exoplanet.codes}} \citep{ForemanMackey2021}. Figure \ref{fig:rb_corner_degeneracy} illustrates results of four independent attempts to model simulated transit data using HMC. The only difference from run-to-run was the random seed for the sampler. Despite identical setups, each run produced a remarkably different posterior distribution, sometimes getting stuck in the grazing regime and sometimes failing to explore that regime altogether. The issue is not merely that the chains had not converged, and even increasing the length of the sampling and/or tuning phase by orders of magnitude did not reliably produce consistent results. Because standard sampling methods cannot be counted on to adequately explore both the grazing and non-grazing portions of the distribution, our inferences are unreliable, and we must find a new method for modeling exoplanet transit lightcurves. \begin{figure*} \centering \includegraphics[width=0.9\textwidth]{rb_corner_plot_degeneracy.pdf} \caption{Corner plots of the posteriors from four identical MCMC runs of model J-85 using our fiducial $\{\ln r, b\}$ basis. See \S\ref{subsec:basis_comparison} and Table \ref{tab:sim_parameters} for details of the model setup. The only difference between the runs was the random seed for the Markov chains. Despite their identical setups, each run produces a remarkably different posterior geometry. \textit{Panel A}: The sampler appears to fully explore the posterior region, with most samples consistent with a non-grazing geometry and a smaller fraction extending into the grazing regime. There is a ``dog leg'' feature at $b\approx1$ where the geometry transitions from non-grazing to grazing, and there is a strong degeneracy between $\ln r$ and $b$ for grazing transits. \textit{Panel B}: The sampler fails to explore the grazing regime entirely, giving the illusion of a well-behaved posterior. \textit{Panel C}: The sampler extends to high impact parameters, but catches at the boundary between grazing and non-grazing geometries, producing a sharp spike at $b \approx 1$. \textit{Panel D}: The samples pile up at $b \approx 1$, leading to a bimodal posterior distribution that barely explores the grazing regime at all. Increasing the length the tuning phase and/or the sampling phase does not reliably fix these issues.} \label{fig:rb_corner_degeneracy} \end{figure*} The problem is two-fold. First, we need to use a different basis set for grazing vs. non-grazing geometries because the covariance properties of $r$ and $b$ are quite different between the two regimes. Second, we need to find a way to efficiently explore the full posterior space without getting stuck at the grazing transition boundary. The solutions to these problems are interrelated and are discussed in the next two sections of this paper. \section{A new basis for grazing transits}\label{sec:new_basis} Our solution to the grazing transit problem is to split the Monte Carlo sampling routine into separate runs for the grazing ($b > 1-r$) and non-grazing ($b < 1-r$) regimes. We then combine these independent runs into a single posterior distribution using umbrella sampling (see \S\ref{sec:umbrella}). Before we describe our full umbrella sampling procedure, we first present a new basis set which is designed for optimal performance in the grazing regime. \subsection{Specification of the model parameters}\label{subsec:basis_specification} Of the seven parameters in our fiducial basis set - $\{P, t_0, \ln r, b, \ln T, q_1, q_2\}$ - four can be carried over to our new grazing basis without modification: $P$, $t_0$, $q_1$, and $q_2$. Both $P$ and $t_0$ are generally tightly constrained by the data and are minimally covariant with the other parameters, and the two limb darkening coefficients, $q_1$ and $q_2$ \citep{Kipping2013}, perform well for both grazing and non-grazing orbits. Only $r$, $b$, and $T$ now remain. The transit duration, $T$, is usually well constrained by the data (albeit somewhat less so than $P$ and $t_0$) and is closely related to the eccentricity via the photoeccentric effect; we therefore maintain $\ln T$ as one of our seven basis parameters. With five parameters in common between the fiducial non-grazing basis and our new grazing basis, our reparameterization effort now hinges on a transformation of $r$ and $b$ (which are highly covariant for grazing transits) into a new parameter pair which is more nearly orthogonal for grazing geometries. Rather than producing new parameters wholesale, our strategy is to find some mapping of $\{r,b\} \rightarrow \{x_1,x_2\}$ with the desired orthogonality when $b > 1-r$. After some experimentation, we identified a suitable pair of quantities, which we define according to the non-linear combination \begin{equation}\label{eq:lam_gam} \begin{aligned} &\lambda = r^2 + \beta r\\ &\gamma = \frac{\beta}{r} \end{aligned} \end{equation} where $\beta \equiv 1-b$ is a convenience variable. Because both $r$ and $b$ are unitless, $\lambda$ and $\gamma$ are unitless as well. The first quantity, $\lambda$, is derived from a linear approximation to the area of partial overlap between two spheres \citep{MandelAgol2002}; see Appendix \ref{appx:A} for details. Thus, $\lambda$ is closely related to the transit depth in the grazing regime. But, note that because $\lambda$ ranges over (0, $2r^2$) for grazing transits, the relation is closer to $\lambda \approx 2\delta$ than to $\lambda \approx \delta$. We caution the reader \underline{not} to use $\lambda$ as a basis parameter outside of the grazing regime because it is explicitly tied to the geometry of grazing transits. Figure \ref{fig:mandel_agol} demonstrates that the exact \citet{MandelAgol2002} geometry is well matched by a simple linear function $\lambda(b)$ at fixed r as long as $r < 1$, which will virtually always be the case for exoplanets orbiting main sequence or giant branch stars. We have not rigorously checked how the validity of our assumptions break down when $r \geq 1$, and so the results in this paper will likely need to be adjusted if they are to be applied to substellar companions of brown dwarfs \citep{Jung2018} or white dwarfs \citep{Vanderburg2020}. \begin{figure} \centering \includegraphics[width=0.45\textwidth]{mandel_agol_approximation.pdf} \caption{Planet-star overlap area at mid-transit as a function of impact parameter, $b$, for planet-to-star radius ratio $r = 0.05$. The plot is restricted to show only the grazing regime, i.e. $1-r < b < 1+r$. The solid black line shows the exact geometric solution derived by \citet{MandelAgol2002} and presented in this paper as Equation \ref{eq:mandel_agol}. Even though the full geometry is quite complicated, the resultant curve is well approximated by a simple linear function (dashed red line).} \label{fig:mandel_agol} \end{figure} The second quantity, $\gamma$, indicates the extent to which a transit is grazing or non-grazing, with transition occurring at $\gamma = 1$. When $\gamma \geq 1$, the transit is non-grazing; when $-1 < \gamma < 1$, the transit is grazing; when $\gamma \leq -1$ the planet does not transit at all. We refer to $\gamma$ as the \textbf{\textit{grazing coordinate}}, and in \S\ref{sec:umbrella} we will see that it plays a special role in our umbrella sampling routine. When converting from one basis to another, care must taken in order to avoid inadvertently introducing unwanted priors. For a thorough discussion of the implicit priors introduced by our reparameterization and for a recipe to establish sensible prior distributions for $\lambda$ and $\gamma$, see Appendix \ref{appx:B}. The important point is that in addition to mapping $\{r,b\} \rightarrow \{\lambda,\gamma\}$, we add additional terms to the log-likelihood function as needed to ensure that our priors remain consistent between parameterizations. By construction, our new $\{\lambda,\gamma\}$ basis is far more orthogonal than the fiducial $\{r,b\}$ basis is for grazing transits. Conversely, $\{\lambda,\gamma\}$ is far \textit{less} orthogonal than $\{r,b\}$ is for non-grazing transits. To achieve good sampler performance, we must therefore make the restriction $\gamma < 1$ when using our new basis and the restriction $\gamma \geq 1$ when using the old basis. We stress that our new $\{\lambda, \gamma\}$ parameterization is specifically designed with grazing transits in mind and should not be applied to non-grazing geometries. For many transits, some fraction of the posterior distribution will be consistent with both grazing and non-grazing trajectories, so fitting a transit will require at least two independent sampling runs, one to sample the grazing regime using $\{\lambda,\gamma\}$ and the other to sample the non-grazing regime using $\{r,b\}$. Recombining independent posterior chains into a single posterior distribution can be performed using the statistical technique of umbrella sampling, which will be introduced in \S\ref{sec:umbrella}. For now, we will restrict our analysis to consideration of the grazing regime in order to compare the relative performance of the two basis sets. \subsection{Performance of the $\{r,b\}$ vs $\{\lambda,\gamma\}$ basis}\label{subsec:basis_comparison} The fiducial basis set we have used thusfar is $\{P, t_0, \ln r, b, \ln T, q_1, q_2\}$, which we now compare to our new parameterization $\{P, t_0, \ln\lambda, \gamma, \ln T, q_1, q_2\}$. As a shorthand, we will continue to refer to these as the $\{r,b\}$ and $\{\lambda, \gamma\}$ bases, respectively, although any actual sampling will always be performed using $\ln r$ and $\ln \lambda$ in place of $r$ or $\lambda$. To compare the two basis sets, we simulate a low signal-to-noise transit of a warm-Jupiter orbiting a Sun-like star and sample from the posteriors using Hamiltonian Monte Carlo. Simulated photometry is shown in Figure \ref{fig:simulated_photometry} and ground truth parameter values are presented in Table \ref{tab:sim_parameters}. Our model setup and sampling routine both follow the procedure described in \S\ref{subsec:simulation_description}, modified to restrict samples to grazing geometries. For each basis set, we perform 100 independent MCMC runs using two chains run for 5000 tuning steps and 1000 draws, generating 2000 samples per run. Rather than merely setting a hard boundary at the grazing transition, we added a biasing potential, $\psi$, to the likelihood such that \begin{equation}\label{eq:psi_bias} \psi(\gamma) = \begin{cases} 1 + \gamma & \text{$\gamma \leq 0$}\\ 1 - \gamma & \text{$0 < \gamma < 1$} \end{cases} \end{equation} which has the effect of preferentially biasing posterior samples toward the middle of the grazing regime. The term is related to umbrella sampling, and the motivation behind its inclusion will become apparent in \S\ref{sec:umbrella}. Our new $\{\lambda,\gamma\}$ basis performs more efficiently than the standard $\{r,b\}$ basis and produces consistent posterior distributions. For a simulated near-grazing transit (model J-85), the total runtime for a given run using $\{r,b\}$ was $389 \pm 34$ seconds, compared to $386 \pm 26$ seconds using $\{\lambda,\gamma\}$, a nearly identical wall clock time. On the balance, the autocorrelation length of the chains was a little longer when using the new basis compared to the standard basis, resulting in a larger number of effective samples obtained using our $\{\lambda,\gamma\}$ basis compared to the standard $\{r,b\}$ basis. Evaluated using the autocorrelation length for $r$, the time per effect sample was 1.6 seconds using $\{\lambda,\gamma\}$ vs 2.3 seconds using $\{r,b\}$, a $29\%$ gain in efficiency. We repeated this autocorrelation analysis using posterior chains for $b$ and $T$ finding a gains in efficiency of $28\%$ and $4\%$, respectively. when using our new basis. The relative performances of the two bases was comparable for various other simulated transit geometries (see Table \ref{tab:sim_parameters}), typically producing a $\sim20\%$ gain in efficiency for generating effective samples of $r$ and $b$ and a roughly equivalent efficiencies for generating effective samples of $T$. The two bases produce consistent posterior distributions (Figure \ref{fig:basis_corner_compare}). We conclude that our new $\{\lambda,\gamma\}$ basis will be preferred under most circumstances. \begin{figure*} \centering \includegraphics[width=0.90\textwidth]{basis_corner_compare.pdf} \caption{Posterior distributions of $\ln \lambda$, $\gamma$, and $\ln r$ for transit geometries restricted to the grazing regime for a near-grazing transit of a warm Jupiter orbiting a Sun-like star (model J-85). The effects of the biasing potential have been removed. See Table \ref{tab:sim_parameters} for ground truth simulation parameters and Figure \ref{fig:simulated_photometry} for the simulated photometry. The two parameterizations produce comparable posterior distributions, although the new $\lambda - \gamma$ basis performs $\sim20\%$ more efficiently. Note that for the left panel, $\lambda$ and $\gamma$ were not basis parameters, but were computed after the fact from samples of $r$ and $b$. A detailed discussion of the model parameterization is presented in \S\ref{sec:new_basis}.} \label{fig:basis_corner_compare} \end{figure*} \section{Umbrella sampling}\label{sec:umbrella} Umbrella sampling \citep{TorrieValleau1977} is a statistical tool designed for estimating complicated target distributions - e.g. multimodal distributions or degeneracy ridges - for which standard sampling techniques fail. Umbrella sampling does not replace existing sampling methods, but rather works in tandem with these methods to produce more robust posterior estimates. The basic idea is to split a complicated sampling problem into multiple smaller, more manageable problems, each restricted to a narrow region (or window, in the standard nomenclature) of parameter space. Samples are obtained separately from each window using whatever sampling technique the user prefers - e.g. Hamlitonian Monte Carlo \citep{Neal2011, pymc3:2016} or ensemble sampling \citep{GoodmanWeare2010, ForemanMackey2013} - after which the samples are recombined into a single joint posterior distribution. \begin{figure*} \centering \includegraphics[width=0.9\textwidth]{umbrella_tutorial.pdf} \caption{Schematic illustration of the umbrella sampling method, inspired by \citet{Smith2017}. The method is designed to facilitate sampling from multi-modal target distributions (shaded grey region in all panels). \textit{Top left}: The target distribution (emphasized with a thick black line) has low-probability valley which will create a bottleneck for standard sampling techniques. In order to ensure sampling from the full posterior space, we split the problem into three windows, each assigned a bias function, $\psi_i$. \textit{Top right}: After sampling independently from each window, we have three biased sub-distributions $\pi_i$. \textit{Bottom left}: Removing the effect of the bias functions, $\psi_i$ produces three unbiased sub-distributions with unknown offsets between one another. \textit{Bottom right}: calculating the window weights, $z_i$, and recombining all sub-distributions $\pi_i$ into a single joint posterior recovers the true target distribution. See \S\ref{subsec:umbrella} for a detailed discussion. A tutorial for reproducing this plot by implementing umbrella sampling can be found at \url{https://gjgilbert.github.io/tutorials/umbrella_sampling/}.} \label{fig:umbrella_tutorial} \end{figure*} Although umbrella sampling has rarely been applied to astrophysical problems, the technique is widely used in the field of molecular dynamics where it originated \citep{TorrieValleau1977}. The literature on umbrella sampling is extensive, but because most examples of its use are presented in the context of highly technical chemical analyses, there exists a precipitous barrier to entry for many astronomers (this paper's author included) who lack the domain expertise to easily comprehend the specialized scientific content surrounding the general statistical tool we wish to adopt. One goal of this paper is therefore to present an accessible, high-level introduction to umbrella sampling tailored toward the needs of astronomers in order to establish a gateway into the wider umbrella sampling literature. For a more rigorous introduction, we direct the interested reader to a recent review of umbrella sampling by \citet{Kastner2011}, as well as to the first astrophysical application of by \citet{Matthews2018}. Indeed, much of the pedagogy in this section was borrowed from \citet{Matthews2018} - particularly their \S 2.1 - and any astronomer wishing to implement umbrella sampling themselves will likely benefit from reviewing that paper in tandem with the present manuscript. Because learning to use new mathematical tools is often best accomplished through a ``hands-on'' approach, we have also developed a Python tutorial for implementing umbrella sampling, available at \url{https://gjgilbert.github.io/tutorials/umbrella_sampling/}. \subsection{A brief overview of umbrella sampling}\label{subsec:umbrella} Let us begin by assuming that we wish to sample from some arbitrary target distribution which possesses a complicated geometry (Figure \ref{fig:umbrella_tutorial}). Standard sampling techniques will do a poor job at traversing the low probability ``valleys'' between high probability ``peaks,'' resulting in poorly mixed posterior chains and incomplete sampling of the target distribution. One way around this issue is to add an additional bias term to the likelihood in order to ``level out'' the peaks and valleys, thereby simplifying the geometry. If the target distribution were known \textit{a priori} (which of course it is not), we could add a single bias term to the entire distribution to make it flat throughout. In practice, however, the more viable approach is to break the complicated target distribution into several overlapping \textit{\textbf{windows}}, sample separately from each window, and then recombine the sub-samples into a joint posterior distribution. Each window will be assigned its own \textbf{\textit{bias function}}, sometimes called simply a \textit{\textbf{bias}} or \textit{\textbf{umbrella}}. The bias functions serve to restrict the sampler to a given window and ensure that a significant fraction of samples are drawn from the low probability valleys. Before defining our windows and biases, we must first identify a suitable variable, $x$, which we will use to construct a sampling framework. In the molecular dynamics literature, $x$ is usually called the \textit{reaction coordinate} because it corresponds to a real physical quantity related to chemical reactions such as free energy or molecular bond strength; in this manuscript we will refer to $x$ as the \textit{\textbf{umbrella coordinate}} (hence our earlier terminology for the grazing coordinate). The optimal choice of $x$ will be dictated by the geometry of the target distribution. For example, if the target distribution consists of several isolated peaks, $x$ could be defined along the line connecting those peaks. Such detailed advance knowledge is not strictly necessary, however, and in many cases it is possible to select a good (though perhaps sub-optimal) umbrella coordinate even for a blind search. In any case, the prior information needed to identify a suitable umbrella coordinate is comparable to the prior information needed to properly specify the model in the first place, and the choice of umbrella coordinate should follow from the structure of the problem. For a more in-depth discussion of strategies for choosing umbrella coordinates, particularly under information-limited circumstances, see \citet{Matthews2018}. Once we have selected our umbrella coordinate, $x$, our next task is to define our window bounds and a set of $N$ corresponding bias functions, $\psi_i(x)$ (Figure \ref{fig:umbrella_tutorial}, top left panel). Once again, the optimal choice of windows and biases depends on the geometry, so the more that can be learned via exploratory analysis, the better. Fortunately, however, the results of umbrella sampling are insensitive to the particular choice of window bounds and bias functions provided that two conditions are met: (1) each window is adequately sampled, and (2) there is sufficient overlap between windows in order to allow accurate determination of relative window weights. We are thus free to define $\psi_i$ in whatever manner is most convenient for the problem at hand. With windows and biases defined, we now sample from the target distribution, $\pi(x)$ separately from each of the $N$ windows, thereby producing $N$ biased posterior sub-distributions $\pi_i(x)$ (Figure \ref{fig:umbrella_tutorial}, top right panel). The sub-distributions relate to the (known) bias functions and to the (unknown) target distribution, via the equation \begin{equation}\label{eq:pi_i} \pi_i(x) = \frac{1}{z_i}\psi_i(x)\pi(x) \end{equation} where $z_i$ are the window weights quantifying the relative contribution of each $\pi_i$ to the combined target distribution, $\pi$. Because each $\pi_i$ is a probability distribution, $\int\pi_i(x)dx=1$, and the window weights $z_i$ can be calculated via integration of Equation \ref{eq:pi_i} as \begin{equation}\label{eq:zi_int} z_i = \int \psi_i(x)\pi(x)dx = \langle \psi_i \rangle_{\pi} \end{equation} where $\langle \psi_i \rangle_{\pi}$ denotes the average of some function $f$ with respect to $\pi$. In other words, to determine $z_i$, we take the average of each $\psi_i$ weighted by the empirically sampled target distribution, $\pi$. If the full target distribution $\pi$ were known, calculating the window weights $z_i$ would be trivial. But of course $\pi$ is not known - it is precisely the quantity we are trying to determine! Furthermore, we don't actually have samples of $\pi$ yet. Rather, we have $N$ sets of biased sub-samples, $\pi_i$, meaning we will need to compute $\langle \psi_i \rangle_{\pi_j}$ for each $(i,j)$ and then combine these to estimate $\langle \psi_i \rangle_{\pi}$. The challenge is that this final combination step depends on $z$, making the whole process a bit circular. Once the $z_i$ are known, however, the biased sub-distributions, $\pi_i$, can be easily combined into a single joint posterior distribution, $\pi$ (Figure \ref{fig:umbrella_tutorial}, bottom panels). Different methods for implementing umbrella sampling more or less come down to different strategies for solving the integral in Equation \ref{eq:zi_int}. The most popular method is the Weighted Histogram Analysis Method \citep[WHAM;][]{Kumar1992}, which works by binning the data and computing a histogram in the overlap region. Another popular method is the Multistate Bennet Acceptance Ratio \citep[MBAR;][]{ShirtsChodera2008}, which does not require discretization of the data. Both WHAM and MBAR can be derived from maximum likelihood or minimum asymptotic variance principles (see the references above for proofs). Recently, \citet{Thiede2016} and \citet{Dinner2017} demonstrated that the determination of umbrella weights $z_i$ can be recast as an eigenvector problem, a method which they term the Eigenvector Method for Umbrella Sampling (EMUS). Establishing umbrella sampling as an eigenvector problem has the twin advantages of being computationally efficient and facilitating accurate error analysis, and so we adopt EMUS as our method of choice here. Following \citet{Matthews2018}, we restate Equation \ref{eq:zi_int} as an explicit sum \begin{equation}\label{eq:zi_sum} z_j = \sum_{i=1}^N \Bigg\langle \frac{\psi_j(x)}{\sum_{k=1}^N \psi_k(x)/z_k} \Bigg\rangle_{\pi_i} \end{equation} where $\langle \rangle_{\pi_i}$ denotes an average with respect to $\pi_i$. Because the umbrella weights $z_i$ enter the equation both on the left-hand side of the equation and in the denominator sum on the right-hand side, Equation \ref{eq:zi_sum} must be solved iteratively. To do so using EMUS, we first define a square \textit{overlap matrix}, $F$, with each element $(i,j)$ defined as \begin{equation}\label{eq:Fij} F_{ij} = \Bigg\langle \frac{\psi_j/z_i}{\sum_{k=1}^N \psi_k/z_k} \Bigg\rangle_{\pi_i} \end{equation} As its name implies, $F$ tracks the extent to which samples drawn within one window fall under the umbrella of any other window. On diagonal terms will usually have larger values (because all samples drawn from window $i$ by construction fall under umbrella $\psi_i$), and when windows $(i,j)$ do not overlap, $F_{ij} = F_{ji} = 0$. In order to calculate $z_i$ using linear algebra, we first define $z \equiv [z_1,z_2,...,z_N]$ as a vector. Taking the product of $z$ and the $j^{\rm th}$ column of $F$ yields \begin{equation}\label{eq:emus_multiplication} \sum_{i=1}^N z_i F_{ij} = \sum_{i=1}^N \Bigg\langle \frac{\psi_j}{\sum_{k=1}^N \psi_k/z_k} \Bigg\rangle_{\pi_i} = \langle \psi_j \rangle_{\pi} \end{equation} Recall from Equation \ref{eq:zi_int} that $z_j = \langle \psi_j \rangle_{\pi}$, so $\sum_i z_i F_{ij} = z_j$. Considering all columns in $F$ simultaneously yields the left eigenvalue problem \begin{equation}\label{eq:emus_eigenvector} zF = z \end{equation} which when solved provides an estimate of the window weights. If we knew $F$ \textit{a priori}, finding the eigenvalues and eigenvectors of Equation \ref{eq:emus_eigenvector} would be a straightforward application of linear algebra. But, in practice, we need to estimate both $z$ and $F$ from our empirical samples, $\pi_i$. As we noted earlier, this must be done iteratively. Our strategy will be to pick a starting guess for $z$ and calculate a first estimate of $F$ from Equation \ref{eq:Fij}. We'll then use our estimate of $F$ to calculate an updated value for $z$ using Equation \ref{eq:emus_eigenvector}, and then iterate between Equations \ref{eq:Fij} and \ref{eq:emus_eigenvector} until the result converges. In practice the problem is often nearly converged after just one or two iterations, and both the final result and convergence rate are insensitive to the particular starting estimate of $z$. In summary, the steps of umbrella sampling are: (1) choose a suitable umbrella coordinate $x$, (2) define windows and biases $\psi_i$, (3) sample from each window to produce biased sub-distributions $\pi_i$, (4) calculate window weights $z_i$ by iteratively solving Equations \ref{eq:Fij} and \ref{eq:emus_eigenvector}, and finally (5) recombine all sub-samples into the joint posterior estimate $\pi$ by inverting Equation \ref{eq:pi_i}. Note that unlike standard direct sampling methods which produce a single set a unweighted samples, umbrella sampling produces multiple sets of weighted samples (with weights given by $z_i$), and these weights must be taken into account when estimating posterior distributions or summary statistics. A Python tutorial for implementing EMUS can be found at \url{https://gjgilbert.github.io/tutorials/umbrella_sampling/}. \subsection{Applying umbrella sampling to the transit model}\label{subsec:full_model} We now introduce our full umbrella sampling routine as applied to the transit fitting problem. Properly implemented, our new method produces posterior estimates which are more accurate than estimates obtained using standard direct sampling techniques. The key components of our method are (1) splitting the transit fitting problem into separate windows for grazing vs non-grazing geometries and (2) adopting a unique parameter basis within each window tailored to the specific geometry at hand. For our umbrella coordinate we adopt the grazing coordinate, $\gamma \equiv (1-b)/r$, which was introduced in \S\ref{subsec:basis_specification}. Defining our windows in terms of $\gamma$ allows us to easily separate posterior sampling into grazing and non-grazing runs, with the cutoff occurring at $\gamma = 1$. While developing our method, we first attempted to implement a simple two-umbrella scheme wherein the non-grazing window extended slightly into the grazing regime and, conversely, the grazing window extended slightly into non-grazing regime. However, we found that the sampler still often became stuck at the grazing to non-grazing transition, leading to poorly mixed chains and inaccurate results. We therefore found it necessary to restrict the grazing umbrella to strictly grazing geometries ($\gamma < 1)$ and the non-grazing umbrella to strictly non-grazing geometries ($\gamma > 1$). Windows must have at least some overlap with their neighbors, so we introduced a third ``transition'' umbrella centered on the grazing to non-grazing boundary at $\gamma = 1$ and extending a little way into both the grazing and non-grazing regimes in order to bridge the gap. We find that this simple three-umbrella scheme performed well under a wide range of circumstances. We define our bias functions over the non-grazing (N), transition (T), and grazing (G) windows as \begin{equation}\label{eq:psi_N} \psi_N \simeq \begin{cases} \gamma - 1 & \text{$1 < \gamma < 2$} \\ 1 & \text{$\gamma \geq 2$} \end{cases} \end{equation} \begin{equation}\label{eq:psi_T} \psi_T \simeq \begin{cases} \gamma & \text{$0 \leq \gamma < 1$}\\ 2 - \gamma & \text{$1 \leq \gamma < 2$} \end{cases} \end{equation} \begin{equation}\label{eq:psi_G} \psi_G \simeq \begin{cases} 1 + \gamma & \text{$\gamma \leq 0$}\\ 1 - \gamma & \text{$0 < \gamma < 1$} \end{cases} \end{equation} where the symbol ``$\simeq$'' denotes that normalization constants have been omitted. These biases are shown graphically in Figure \ref{fig:umbrella_functions}. We have opted to use tent biases out of mathematical convenience, but as noted above, the results of umbrella sampling are in general insensitive to any particular choice of bias function. The reader is thus free to chose any other bias function if they so desire. However, we do caution that while the shape of the bias within each window is mostly unimportant, altering the window widths (i.e. the range of $\gamma$ spanned by each $\psi$) can have a significant effect. Indeed, while developing this method we undertook considerable effort to ensure that windows overlapped enough to facilitate calculation of the window weights without being so wide as to lead to geometric degeneracies. We therefore advise that anyone attempting to apply our method should only adjust the window bounds after careful consideration of the consequences. Unless one has a strongly motivated reason to alter the windows, the safest approach is to stick with the limits presented in Equations \ref{eq:psi_N} - \ref{eq:psi_G}. \begin{figure} \centering \includegraphics[width=0.45\textwidth]{psi_umbrellas.pdf} \caption{Our umbrella bias functions, $\psi(\gamma)$. The solid blue line is the non-grazing umbrella ($\psi_N$). The dashed orange line is the transition umbrella ($\psi_T$). The dotted red line is the grazing umbrella ($\psi_G$). We have opted to use tent biases because these are simple to perform calculations with, but because umbrella sampling is insensitive to the particular choice of bias functions \textemdash provided that windows overlap \textemdash many other functional forms would perform just as well.} \label{fig:umbrella_functions} \end{figure} Because each window will be sampled independently of the others, we are free to use a different parameter basis within each window. Motivated by the results of \S\ref{subsec:basis_comparison}, we adopt the standard $\{\ln r, b\}$ basis for the non-grazing umbrella, while for the grazing umbrella we adopt our new $\{\ln\lambda, \gamma\}$ basis. For sampling runs under the transition umbrella, we adopted the hybrid parameter basis $\{\ln r, \gamma\}$, which we found worked well over a range of conditions. Each basis set is tailored to the specific geometry within its corresponding window, and thus performs well under its own umbrella. Umbrellas are always defined in terms of $\gamma$, but $\gamma$ is not always a basis parameter. In practice, in order to define our window bounds and bias functions, we must first calculate $\gamma$ from any two out of three basis parameters $\{r, b, \lambda\}$ following Equation \ref{eq:lam_gam}. The stage is now set, and at this point one could in principle draw samples from each window and then recombine them into a final joint posterior following the weighting prescription described in \S\ref{subsec:umbrella}. However, there is one final complication that must be addressed first, namely that we do not know the transit geometry ahead of time and so we cannot be sure whether samples $\pi_T$ obtained under the T umbrella will overlap with both $\psi_G$ and $\psi_N$. Recall that umbrella sampling does not merely require that windows overlap, but instead imposes the more stringent requirement that at least some samples obtained within each window fall into the overlap region with their neighboring windows. This subtle condition demands careful attention, but turns out to be a blessing in disguise. In order to ensure that our window weights will be properly determined, we first sample from the transition umbrella, producing a (biased) posterior distribution $\pi_T(\gamma)$ with samples restricted by $\psi_T$ to lie between $0 < \gamma < 2$. Because $\gamma$ tells us how strongly grazing the transit is and because all samples $\pi_T(\gamma)$ must by construction fall near the grazing/non-grazing transition boundary, we can use $\pi_T(\gamma)$ to infer whether the transit geometry is grazing or not. If all samples $\pi_T(\gamma)$ have $\gamma < 1$, we can be confident that the planet is on a grazing trajectory; conversely if all samples $\pi_T(\gamma)$ have $\gamma > 1$, we can be confident that the planet is on a non-grazing trajectory. In the former case (all $\gamma < 1$) we then need only draw samples from the grazing window, whereas in the latter case (all $\gamma > 1$) we need only draw samples from the non-grazing window. In fact, under these circumstances umbrella sampling may no longer be needed, as the N or G windows will by themselves cover the full span of the relevant parameter space. However, at this point samples from the T umbrella have already been obtained, so one may as well proceed with a two-umbrella scheme. We recommend that all future transit modeling efforts \textemdash\ even those which do not intend use umbrella sampling for their final analysis \textemdash\ first conduct an exploration of the grazing/non-grazing transition boundary, aided by $\psi_T$ to ensure adequate sampling of the region immediately surrounding $\gamma = 1$. Depending on the circumstances, one may wish to set a more or less lenient condition for categorizing a transit as grazing/non-grazing than we have proposed here (i.e. all $\gamma < 1$ vs all $\gamma > 1$), but the core strategy would remain the same. Conclusively ruling in/out grazing geometries will afford us greater confidence in results derived from transiting modeling, and if widely adopted we anticipate our ``check the transition region first'' approach will reveal previously unnoticed inaccuracies or systematic offsets in transiting exoplanet catalogs. Samples may be drawn using any suitable sampling method, and provided that all posterior chains are well mixed and pass the necessary convergence checks, the choice of sampler will be inconsequential to the final results, save perhaps a difference in computational efficiency. Once we have drawn samples from all three windows (or perhaps only two, if $\pi_T(\gamma)$ rules out one geometry or another), calculation of the window weights, $z_i$, is a straightforward application of the EMUS algorithm presented in \S\ref{subsec:umbrella}; once $z_i$ have been calculated, we can then immediately estimate the posterior distributions and summary statistics. \section{Comparison of results from standard sampling techniques to umbrella sampling}\label{sec:sampler_comparison} We now test our proposed method by simulating transit lightcurve photometry for several prototypical star-planet configurations and then comparing posterior inferences obtained via umbrella sampling to inferences obtained using a standard direct sampling approach. Throughout these tests, we follow the same data simulation procedure and Hamiltonian Monte Carlo sampling routine described in \S\ref{subsec:simulation_description}, modified to incorporate a moderately informative prior on eccentricity. Rather than incorporating $e$ and $w$ as free parameters in our model, we instead inferred these quantities using the photoeccentric effect \citep{FordQuinnVeras2008, DawsonJohnson2012}, thus necessitating priors on both $e$ and $\rho_{\star}$. For $e$ we assumed a Rayleigh distribution with scale parameter $\sigma_e = 0.21$, corresponding to the single-planet value found by \citet{Mills2019}; for $\rho_{\star}$, we assumed a $10\%$ Gaussian measurement uncertainty. In practice, placing priors on $e$ and $\rho$ serves to place indirect priors on $T$ and $b$. We will address the role of eccentricity priors and describe the effects of several alternative prior distributions in greater detail is \S\ref{sss:eccentricity} below. We perform three tests of our method, each focused on a different star-planet architecture. In the first test (the ``J'' models; see Table \ref{tab:sim_parameters}), we place a warm Jupiter in orbit around a Sun-like star at various impact parameters in order to simulate grazing, near-grazing, and non-grazing trajectories. In the second test (models ``SE'' and ``MN'') we place a super-Earth and mini-Neptune on 21 day orbits around a star typical of the Kepler field, with inclinations scaled to produce comparable transit depths. In the third (model ``MHZ''), we place a rocky planet in the habitable zone of an M dwarf. Simulated photometry is shown in Figures \ref{fig:simulated_photometry}, \ref{fig:simulated_photometry_valley}, and \ref{fig:simulated_photometry_mhz}, and ground truth parameter values for each simulated lightcurve are collected in Table \ref{tab:sim_parameters}. As before, the important point throughout is that we have endeavored to simulate unremarkable transits, which we then model using techniques which are intended to be as uncontroversial as possible. \subsection{A giant planet orbiting a solar twin}\label{subsec:case_study_J} For our first test, we placed a warm Jupiter ($r=0.103$) on a circular orbit around a Sun-like star at three different impact parameters in order to create a grazing ($b=1.00$, model J-100), near-grazing ($b=0.85$, model J-85), and non-grazing ($b=0.22$, model J-22) trajectory. The transit duration for all three cases was set to $T=3.0$ hrs and then the orbital period was calculated in order to preserve $e=0$, resulting in orbital periods of 44.9, 13.0, and 3.6 days, respectively. In order to produce a comparable signal-to-noise, the simulated Gaussian noise for the grazing transit (J-100) was reduced by a factor of two relative to the non-grazing and near-grazing transits. The simulated photometry for all three configurations is shown in Figure \ref{fig:simulated_photometry}. \subsubsection{Simulation J-85: a near-grazing transit} We being by placing our warm Jupiter on a $P=13$ day orbit around its host star with $b=0.85$, thereby producing a transit chord that is non-grazing yet close enough to the stellar edge that limb darkening becomes significant. In this near-grazing regime, the transit morphology begins to shift from U-shaped to V-shaped, so we expect that some fraction of the posterior distribution will be consistent with both a grazing and non-grazing trajectory. Examination of $\gamma$ samples obtained under the transition umbrella, $\psi_T$, confirm that this is indeed the case (Figure \ref{fig:transition_umbrella_J}), validating our assertion that umbrella sampling is warranted. \begin{figure} \centering \includegraphics[width=0.45\textwidth]{transition_umbrella_J_models.pdf} \caption{Distribution of the grazing coordinate, $\gamma$, for posterior MCMC samples obtained under the transition umbrella, $\psi_T$, for three simulated transits of a warm Jupiter orbiting a Sun-like star at various impact parameters. Simulated parameter values are collected in Table \ref{tab:sim_parameters} and corresponding simulated lightcurves are shown in Figure \ref{fig:simulated_photometry}. Unsurprisingly, the fraction of posterior samples consistent with a non-grazing geometry is highest for the simulated non-grazing transit (top), and vice-versa for a grazing geometry (bottom). The near-grazing transit (middle) reflects an intermediate state. In all three cases, at least some fraction of the posteriors are consistent with both a grazing and a non-grazing trajectory, indicating the transit geometry is ambiguous and the application of umbrella sampling is warranted.} \label{fig:transition_umbrella_J} \end{figure} Both direct sampling and umbrella sampling produce comparable distributions for $T$ and broadly similar estimates of $r$ and $b$ (Figure \ref{fig:posteriors_J85}). However, direct sampling does not fully explore the high-$b$, high-$r$ tail of the distribution. By-eye the differences appear slight, but the consequences of these skewed distributions become apparent when one calculates the marginalized $1\sigma$ uncertainties for $r$ and $b$. From direct sampling, we estimate $r = 0.098^{+0.042}_{-0.013}$, whereas from umbrella sampling, we estimate $r = 0.108^{+0.187}_{-0.021}$ (based on the $16^{\rm th}$, $50^{\rm th}$, and $84^{\rm th}$ percentiles). Although one might naively prefer the narrower posterior obtained via direct sampling, this misleadingly tight constraint on $r$ is predicated on the false assumption that the high-$b$ high-$r$ tail has been ruled out, when in fact it has simply not been explored. Umbrella sampling, on the other hand, ensures that the difficult to explore regions of the posterior have indeed been adequately sampled. \begin{figure*} \centering \includegraphics[width=0.9\textwidth]{posteriors_J85.pdf} \caption{Posterior distributions of $r$, $b$, and $T$ for a simulated near-grazing transit of a Jupiter-size planet on a 13 day orbit around a Sun-like star (simulation J-85). See Table \ref{tab:sim_parameters} for simulated model parameters and Figure \ref{fig:simulated_photometry} for the simulated photometry. Each thin line represents a 2000 sample chain from a single independent Monte Carlo run, while the thick lines give the combined results of 20 such runs. Vertical dashed lines represent ground-truth parameter values. Both methods produce posterior distributions consistent with the true value, but only umbrella sampling is able to fully explore the high-$b$, high-$r$ tail of the distribution.} \label{fig:posteriors_J85} \end{figure*} \subsubsection{Simulation J-22: a non-grazing transit} We next modify our simulated transit by changing the impact parameter to $b=0.22$ in order to place the planet on a non-grazing trajectory. In order to keep the transit duration consistent at $T=3$ hrs, we shifted the orbital period to $P=3.6$ days. In this case, the results of the two methods are entirely consistent with one another (Figure \ref{fig:posteriors_J22}), as expected for a planet with negligible posterior mass consistent with a grazing geometry. Because there is a small but non-zero fraction fraction of samples with $b > 1-r$ (Figure \ref{fig:transition_umbrella_J}), trusting the results from direct sampling hinges on the implicit assumption that the sampler did not explore the grazing regime because the model and data are poorly matched there, rather than because the sampler encountered a bottleneck at the grazing/non-grazing boundary. The advantage of using umbrella sampling is that we can be more confident is our inferences because the sampler explores smoothly deep into the grazing regime, allowing us to be sure that the posterior likelihood there is indeed small. \begin{figure*} \centering \includegraphics[width=0.9\textwidth]{posteriors_J22.pdf} \caption{Posterior distributions of $r$, $b$, and $T$ for a simulated non-grazing transit of a Jupiter-size planet on a 3.6 day orbit around a Sun-like star (simulation J-22). See Table \ref{tab:sim_parameters} for simulated model parameters and Figure \ref{fig:simulated_photometry} for the simulated photometry. Each thin line represents a 2000 sample chain from a single independent Monte Carlo run, while the thick lines give the combined results of 100 such runs. Vertical dashed lines represent ground-truth parameter values. Both methods produce comparable results, however only umbrella sampling is able to smoothly explore deep into the grazing regime. This augmented exploration allows us to confidently rule out a grazing geometry by placing reliable upper limits on $r$ and $b$.} \label{fig:posteriors_J22} \end{figure*} \subsubsection{Simulation J-100: A grazing transit} For our last test we shift the transit to $b=1.0$ in order to create a grazing trajectory (model J-100). Once again, we preserve the transit duration at $T=3$ hrs by adjusting the orbital period, in this case to to $P=45$ days. In order to compensate for the reduced transit depth of the grazing geometry, we reduce the photometric noise level by a factor of two, which gives this simulated transit (J-100) a similar signal-to-noise ratio compared to the first two simulations (J-85 \& J-22). The performance of the two methods for fitting a grazing transit is quite similar (Figure \ref{fig:posteriors_J100}). From direct sampling, we estimate $r = 0.148^{+0.262}_{-0.077}$, $b = 1.05^{+0.28}_{-0.11}$; whereas from umbrella sampling, we estimate $r = 0.130^{+0.265}_{-0.070}$, $b = 1.03^{+0.29}_{-0.13}$. As with the non-grazing case (simulation J-22), the main advantage of umbrella sampling is that we can be sure we have explored the full posterior geometry, lending us greater confidence in our results. \begin{figure*} \centering \includegraphics[width=0.9\textwidth]{posteriors_J100.pdf} \caption{Posterior distributions of $r$, $b$, and $T$ for a simulated grazing transit of a Jupiter-size planet on a 45 day orbit around a Sun-like star (simulation J-100). See Table \ref{tab:sim_parameters} for simulated model parameters and Figure \ref{fig:simulated_photometry} for the simulated photometry. Each thin line represents a 2000 sample chain from a single independent Monte Carlo run, while the thick lines give the combined results of 100 such runs. Vertical dashed lines represent ground-truth parameter values. In this case, both mehods produce comparable results.} \label{fig:posteriors_J100} \end{figure*} \subsubsection{The role of eccentricity priors}\label{sss:eccentricity} In order to investigate the effect of eccentricity priors, we repeated the experiment for the near-grazing transit (simulation J-85) using three additional eccentricity priors distributions. For the first two, we again used a Rayleigh prior, but now with scale parameter $\sigma_e = 0.0355$ or $\sigma_e = 0.008$. The former corresponds to the value found by \citet{Mills2019} for multiplanet systems, while the latter corresponds to the value by \citet{LithwickXieWu2012} for systems exhibiting large-amplitude transit timing variations. Recall that our original test used $\sigma_e = 0.21$, the \citet{Mills2019} single planet value. Our fourth and final test placed uniform (i.e. uninformative) priors on $e$. In all cases, we assumed a 10\% Gaussian measurement uncertainty on $\rho_{\star}$. Both methods show a similar sensitivity to choice of eccentricity prior (see Table \ref{tab:ecc_priors}). \begin{table}[] \renewcommand{\arraystretch}{2.0} \centering \begin{tabular}{l c c} Prior distribution & Direct Sampling & Umbrella Sampling \\ \hline Rayleigh, $\sigma_e=0.008$ & $0.108^{+0.037}_{-0.014}$ & $0.118^{+0.198}_{-0.021}$ \\ Rayleigh, $\sigma_e=0.0355$ & $0.108^{+0.038}_{-0.014}$ & $0.120^{+0.208}_{-0.022}$ \\ Rayleigh, $\sigma_e=0.21$ & $0.098^{+0.042}_{-0.013}$ & $0.108^{+0.187}_{-0.021}$ \\ Uniform, $e \sim (0,1)$ & $0.091^{+0.033}_{-0.010}$ & $0.097^{+0.136}_{-0.015}$ \\ \end{tabular} \caption{Marginalized MCMC posterior values for the planet-to-star radius ratio, $r$, of a simulated transit (simulation J-85), assuming four different eccentricity prior distributions. The true value is $r=0.103$. Posterior values quoted in this table correspond to the retrieved 16th, 50th, and 84th percentiles of $r$, with results arranged from most informative prior (top) to least informative (bottom). See text of \S\ref{sss:eccentricity} for discussion.} \label{tab:ecc_priors} \end{table} \subsection{A pair of planets straddling the radius valley}\label{subsec:case_study_radius_valley} A primary motivation for developing our umbrella sampling method is to accurately determine the radii of exoplanets in or near the radius valley \citep{Fulton2017}. More specifically, we would like to be able to measure the size of planets with $r_p \approx 1.6 R_{\oplus}$ and periods $P \lesssim 100$ days orbiting FGK stars, i.e. planets typical of the Kepler and \textit{K2} samples. For this case study, we simulate the transits of a pair of planets, each on a circular 21 day orbit around a K dwarf ($R_{\star}=0.92 R_{\odot})$. The first planet (simulation SE; a super-Earth) has $r_p = 1.3 R_{\oplus}$ and a non-grazing trajectory ($b=0.70)$. The second planet (simulation MN; a mini-Neptune) has $r_p = 2.2 R_{\oplus}$ and a barely grazing trajectory ($b=0.98)$. These setups produce a pair of transits with comparable transit depths, albeit distinct transit durations (Figure \ref{fig:simulated_photometry_valley}). For these test cases, an accurate estimate of $r$ thus hinges on accurate estimates of both $T$ and $b$. Our goal then is to investigate whether our competing sampling methods can constrain these three parameters with sufficient reliability to determine whether each planet exists on the rocky or gaseous edge of the radius valley. \begin{figure} \centering \includegraphics[width=0.45\textwidth]{simulated_photometry_radius_valley.pdf} \caption{\textit{Top panel}: Simulated photometry for a mini-Neptune (model MN) orbiting a K-dwarf star on a barely grazing orbit. \textit{Bottom panel}: Simulated photometry for a super-Earth (model SE) orbiting the same star on a non-grazing orbit. Ground truth simulation parameters are collected in Table \ref{tab:sim_parameters}. See \S\ref{subsec:case_study_radius_valley} for discussion.} \label{fig:simulated_photometry_valley} \end{figure} \subsubsection{Simulation SE: A non-grazing super-Earth} Posterior distributions for the super-Earth simulation produce consistent results regardless of which method is used (Figure \ref{fig:posteriors_SE}). Because the transit trajectory is far from grazing ($b=0.7, r=0.012$) this agreement is to be expected. The marginalized constraints for this case are $r_p = 1.06 \pm 0.17 R_{\oplus}$, $b = 0.51 \pm 0.30$, correctly identifying the planet as a rocky object with a non-zero, non-grazing impact parameter. Once again, the primary advantage of umbrella sampling is that it affords us confidence in our results. A small fraction of the posterior samples are consistent with $b > 1$, and by employing umbrella sampling we can be sure that we have correctly weighted the high-$b$ tail of the distribution, whereas with direct sampling alone there would be ambiguity as to whether the tail has been properly explored. In this case, direct sampling does manage to produce the correct result, but we only know this because we have also fit the transit using umbrella sampling. In this specific case, a larger fraction of samples consistent with a grazing trajectory would have made the radius uncertainty larger, which in turn would make the composition of the planet ambiguous, a major detriment for studies of planets near the radius valley. \begin{figure*} \centering \includegraphics[width=0.9\textwidth]{posteriors_SE.pdf} \caption{Posterior distributions of $r$, $b$, and $T$ for a simulated non-grazing transit of a super-Earth orbiting a K-dwarf star (simulation SE). See Table \ref{tab:sim_parameters} for simulated model parameters and Figure \ref{fig:simulated_photometry_valley} for the simulated photometry. Each thin line represents a 2000 sample chain from a single independent Monte Carlo run, while the thick lines give the combined results of 20 such runs. Vertical dashed lines represent ground-truth parameter values. The transit trajectory is far from grazing ($b=0.7, r=0.012$), and the two methods produce comparable results ,as expected.} \label{fig:posteriors_SE} \end{figure*} \subsubsection{Simulation MN: A barely grazing mini-Neptune} For our mini-Neptune simulation, posterior inferences made via umbrella sampling are significantly better than those made via direct sampling (Figure \ref{fig:posteriors_MN}). Whereas umbrella sampling returns $r_p = 2.17^{+6.16}_{-0.55}\ R_{\oplus}$, $b = 0.96^{+0.10}_{-0.02}$, direct sampling returns $r_p = 5.3^{+19.4}_{-2.95}\ R_{\oplus}$, $b = 1.03^{+0.19}_{-0.04}$. The reduced precision in $r_p$ from direct sampling will have dramatic consequences for understanding the composition of the individual planet. Even though there is indeed a fairly large uncertainty on the planet radius no matter what method is used - which is to be expected for grazing transits - the implied planet composition is far more ambiguous using direct sampling. \begin{figure*} \centering \includegraphics[width=0.9\textwidth]{posteriors_MN.pdf} \caption{Posterior distributions of $r$, $b$, and $T$ for a simulated barely grazing transit of a mini-Neptune orbiting a K-dwarf star (simulation MN). See Table \ref{tab:sim_parameters} for simulated model parameters and Figure \ref{fig:simulated_photometry_valley} for the simulated photometry. Each thin line represents a 2000 sample chain from a single independent Monte Carlo run, while the thick lines give the combined results of 20 such runs. Vertical dashed lines represent ground-truth parameter values. In this case, umbrella sampling produces obviously improved results, as direct sampling struggles to smoothly explore the grazing regime.} \label{fig:posteriors_MN} \end{figure*} \subsection{A rocky planet in the M-dwarf habitable zone}\label{subsec:case_study_MHZ} For our final test (simulation MHZ), we place a Mercury-sized planet ($r_p = 0.38 R_{\oplus}$) on a $P=37$ day orbit around a $R_{\star} = 0.38 R_{\odot}$ M dwarf, which puts the planet squarely in that star's habitable zone. See Figure \ref{fig:simulated_photometry_mhz} for the simulated photometry and Table \ref{tab:sim_parameters} for the ground truth simulation parameters. \begin{figure} \centering \includegraphics[width=0.45\textwidth]{simulated_photometry_MHZ.pdf} \caption{Simulated photometry for a Mercury radius planet orbiting in the habitable zone of an M-dwarf host star (model MHZ). Ground truth simulation parameters are collected in Table \ref{tab:sim_parameters}. See \S\ref{subsec:case_study_MHZ} for discussion.} \label{fig:simulated_photometry_mhz} \end{figure} We find that direct sampling and umbrella sampling perform equally well for this test case, with both methods recovering the true values for $r$, $b$, and $T$ with nearly identical accuracy (Figure \ref{fig:posteriors_MHZ}). Specifically, both methods find $r_p = 0.32 \pm 0.20\ R_{\oplus}$ and a broad, predominantly non-grazing distribution for $b$. Yet even in this case where marginalized statistics are nearly identical, umbrella sampling still confers an advantage over direct sampling. Because the posterior distribution for impact parameter extends above $b = 1$ for both methods, with direct sampling we cannot be certain that the entire full posterior space has been adequately explored. Rather, it is possible we encountered the usual bottleneck at the grazing/non-grazing boundary, leaving the grazing regime undersampled. With umbrella sampling, however, we can be confident \textemdash without the need for follow-up observations \textemdash that the posterior geometry has been fully explored, meaning that the planet is indeed on a non-grazing orbit and therefore has an accurately measured radius. \begin{figure*} \centering \includegraphics[width=0.9\textwidth]{posteriors_MHZ.pdf} \caption{Posterior distributions of $r$, $b$, and $T$ for a simulated barely grazing transit of a Mercury-sized planet orbiting in the habitable zone of an M-dwarf star (simulation MHZ). See Table \ref{tab:sim_parameters} for simulated model parameters and Figure \ref{fig:simulated_photometry_mhz} for the simulated photometry. Each thin line represents a 2000 sample chain from a single independent Monte Carlo run, while the thick lines give the combined results of 100 such runs. Vertical dashed lines represent ground-truth parameter values. For this test case, both direct sampling and umbrella sampling perform equally well.} \label{fig:posteriors_MHZ} \end{figure*} \section{Analysis of real targets}\label{sec:real_systems} We will now use umbrella sampling to estimate the impact parameters and radii of several KOI planet candidates with $b > 1$ reported by the NASA Exoplanet Archive cumulative KOI table. For the KOIs in multiplanet systems, we also fit the sibling planets (which are not on grazing trajectories) in order to verify that our results are consistent with previous measurements. For the KOIs in single planet systems, we select an unrelated planet with with a non-grazing $b$ but otherwise similar properties to serve as a basis of comparison. Of the four $b>1$ candidates we investigate, we find that two are almost certainly on non-grazing trajectories (in conflict with reported values) and two are unambiguously on grazing trajectories (in agreement with reported values). For the two now non-grazing targets, we report updated values on $r_p$ and $b$ which substantially revise our interpretation of these objects' properties. For the two grazing targets, our results are in agreement with previous measurements and thus place a broad upper limit on $r_p$ and $b$. The advantage of umbrella sampling - even when results are unchanged from literature values - is that because we have explored the full range of geometries, we can be confident that our results are reliable and not artifacts of sampler inefficiencies. Our data reduction and transit fitting pipeline is described below. Similarly to our experiments with simulated data, we have endeavored to use standard techniques wherever possible, except of course for the steps of the procedure which directly implement umbrella sampling. We begin by downloading the Pre-search Data Conditioning Simple Aperature Photometry (PDCSAP) flux from the Mikulski Archive for Space Telescopes (MAST). We then flag bad cadences and remove any large outliers with iterative sigma clipping at the $5\sigma$ level. We next remove long-term trends using a Gaussian Process (GP) implemented by \texttt{celerite} \citep{ForemanMackey2017}. For the GP kernel, we adopted a stochastically driven simple harmonic oscillation SHOTerm\footnote{\url{https://celerite.readthedocs.io/}}, which has been shown to produce good results for astronomical time series \citep{ForemanMackey2017}. In order to protect the transit shape during detrending, we mask all cadences within 1.5 transit durations of each expected mid-transit time and project our GP trend across the masked transit region. To account for possible transit timing variations (TTVs), we read in the transit time measurements of \citet{Holczer2016} and fit a smooth model to these using a GP regression and a Matern-3/2 kernel. We obtain a self-consistent starting estimate for the transit shape and transit times by first fitting $\{P, t_0, \ln r, b, T\}$ and holding transit times fixed, then reversing the procedure to hold transit shape parameters fixed and fitting independent transit times. Finally, we model the independent transit times using a 1st-3rd order polynomial and either zero or one single frequency sinusoids. The complexity of the TTV model was selected based on the Akaike Information Criterion \citep[AIC;][]{Akaike1974}. For all steps of this TTVs initialization procedure we hold limb darkening coefficients fixed to the theoretical values obtained from the NASA Exoplanet Archive. While sampling from the posterior, we hold transit times fixed at our low order polynomial + sinusoid model and sample each of the umbrellas independently following the prescription in \S\ref{sec:umbrella} and \S\ref{sec:sampler_comparison}. This means that the free parameters in the model are $\{\ln T, q_1, q_2, F, \ln\sigma_F\}$, plus either $\{\ln r,b\}$ for the N umbrella, $\{\ln r, \gamma\}$ for the T umbrella, or $\{\ln\lambda, \gamma\}$ for the G umbrella. As usual, we adopt a Rayleigh prior for the eccentricity, with scale parameter, $\sigma_e$, chosen to match the architecture of the system under consideration (see below). Stellar density priors were taken from the \textit{Gaia-Kepler} catalog \citep{GaiaDR2, Berger2018}. For simplicity, we only consider non-overlapping transits and fit planets one at a time for multiplanet systems. Overlapping transits were defined as any transit pair for which $|t_{0,b}-t_{0,c}| < (T_b + T_c)$ for any two planets b and c. Each HMC run consisted of two independent chains, with each chain run for a default length of 10,000 tuning steps and 5,000 draws, generating 10,000 samples total per run. In a few cases, the chains did not converge on our first attempt to fit the data, in which case extending the length of the tuning phase remedied the issue. After drawing samples from all three windows - N, T, and G - we check that the posterior samples of $r$, $b$, and $T$ are consistent between the sub-distributions $\pi_N$, $\pi_T$, and $\pi_G$ for each planet. This does not mean that the distributions must overlap completely (indeed, they are expected not to), but rather that they have at least some overlap, with perhaps some modest tension between umbrellas. In practice, we found that posterior sub-distribution were nearly always either obviously consistent or obviously inconsistent, with the later case indicating that the algorithm had not been properly tuned prior to sampling. In some cases, even though sub-distributions were clearly inconsistent when considered simultaneously, results initially appeared reasonable when each umbrella was considered in isolation. Thus, our method provides a new avenue for verifying that the results of a transit fit are trustworthy: if Markov chains do not properly behave within all three windows and produce self-consistent results, we know to investigate further. Thus, the sub-distribution comparison step of our algorithm builds in an extra redundancy for checking convergence. \subsection{KOI-2068}\label{subsec:KOI-2068} KOI-2068 is a $0.91 R_{\odot}$ star hosting a single planet candidate at $P=42$ days with $1\sigma$ upper limits $b\leq58$ and $r_p\leq42 R_{\oplus}$. With signal-to-noise $S/N=21$ and a disposition score of $0.89$, the object is unlikely to be a false positive. This combination of degenerate, poorly constrained $r$ and $b$ values plus a low false positive probability makes this object an ideal test case for our umbrella sampling scheme. As a comparison target, we select KOI-2285, a $0.87 R_{\odot}$ star hosting a single confirmed planet at $P=38$ days, with $b=0.26\pm0.23$, $r_p=2.79\pm0.30$, and $S/N=24$. For both targets, we set the Rayleigh eccentricity prior scale at $\sigma_e = 0.21$, the \citet{Mills2019} single-planet value. After sampling, for KOI-2068.01 we recover $r_p = 17.7^{+35.9}_{-10.3} R_{\oplus}$, $b=1.13^{+0.37}_{-0.12}$, which is unfortunately not an appreciably different constraint on $r_p$ than the literature value. However, all is not lost. Visual inspection of the posterior transit model (Figure \ref{fig:KOI_2068_transit_posterior}, left panel) suggests that the transit shape is remarkably well constrained, even if the individual parameters are not. Moreover, posterior distributions of $r$ and $b$ (Figure \ref{fig:KOI_2068_transit_posterior}, right panel) are smooth and well behaved, showing a clear preference for grazing geometries. Because umbrella sampling ensures that we have explored the full parameter space, we can now be confident that the weak upper limits on $r$ and $b$ arise from inherent limitations of the data themselves, not from a failure of the sampler. Furthermore, we can now place a confident lower limit on the impact parameter, which will be useful for any follow-up work. A deeper investigation, which is beyond the scope of this work, might reveal transit depth variations or transit duration variations which could be able to place far more precise constraints on $b$ and, consequently, on $r$ \citep{Dawson2020}. \begin{figure*} \centering \includegraphics[width=0.75\textwidth]{K02068_poseterior_fit.pdf} \caption{Phase-folded transits of KOI-2068 with posterior model from umbrella sampling overplotted in red. The pink shaded region shows the $1\sigma$ credible interval. The V-shape of the transit is apparent, which is suggestive of a grazing transit. The posterior corner plot for $r$ and $b$ is shown on the right.} \label{fig:KOI_2068_transit_posterior} \end{figure*} For the comparison target, we recover $r_p = 2.88 \pm 0.15 R_{\oplus}$, $b=0.55 \pm 0.27$, $T = 4.17 \pm 0.16$ hrs, consistent with the literature values. We will not comment on the relative precision or accuracy of our results vs reliable literature results, as any differences in measured values are more likely to be driven by differences in data reduction techniques than by which sampling method was used. The important point is that our analysis was able to reproduce known reliable results, validating our pipeline and affording us confidence in any new measurements which improve upon the state of the art. \subsection{KOI-2150}\label{subsec:KOI-2150} KOI-2150 is a $0.94 R_{\odot}$ star hosting two planet candidates, both with impact parameters greater than unity reported on the NASA Exoplanet Archive. The inner planet ($P=19$ days) has $1\sigma$ upper limits $b\leq73$ and $r_p \leq 38 R_{\oplus}$, while the outer planet ($P=45$ days) has $b\leq72$ and $r_p \leq 94 R_{\oplus}$. Both candidates have a disposition score $>0.99$, indicating that neither is likely to be a false positive. We set the Rayleigh eccentricity prior scale at $\sigma_e = 0.0355$, the \citet{Mills2019} multi-planet value. After umbrella sampling, for the inner planet we recover $b=0.35\pm0.32$, $r_p=2.45\pm0.26 R_{\oplus}$ (11\% radius uncertainty), and for the outer planet we recover $b=0.64^{+0.39}_{-0.43}$, $r_p=2.01^{+5.40}_{-0.26} R_{\oplus}$. Thus, umbrella sampling places both objects on non-grazing trajectories - albeit with poorly constrained impact parameters - and finds a plausible radius for each. In this case, umbrella sampling has significantly outperformed the standard method. These candidates are probably mini-Neptunes, both possessing individual properties consistent with a depleted radius valley \citep{FultonPetigura2018, VanEylen2018} and relative sizes consistent with the ``peas in a pod'' hypothesis \citep{Weiss2018}, adding further credulity to our results. Posterior transit models are shown in Figure \ref{fig:KOI_2150_transit_posterior}. \begin{figure} \centering \includegraphics[width=0.45\textwidth]{K02150_poseterior_fit.pdf} \caption{Posterior transit models for the two candidate planets orbiting K01-2050. Shaded regions (just barely visible on the plots) show $1\sigma$ credible intervals.} \label{fig:KOI_2150_transit_posterior} \end{figure} \subsection{KOI-1426}\label{subsec:KOI-1426} KOI-1426 is a $0.90 R_{\odot}$ star hosting two confirmed planets and one planet candidate. The two confirmed planets have well-constrained properties reported on the NASA Exopplanet Archive (KOI-1426.01: $P=39$ days, $r_p=2.81\pm0.04 \ R_{\oplus}$, $b=0.03^{+0.33}_{-0.03}$, KOI-1426.02: $P=75$ days, $r_p=6.39\pm0.10 R_{\oplus}$, $b=0.80^{+0.01}_{-0.06}$), but the candidate planet (KOI-1426.03: $P=150$ days, $r_p \leq 36 R_{\oplus}$, $b\leq68$) exhibits the $r-b$ degeneracy. Unlike either candidate in the KOI-2150 system, KOI-1426.03 possesses an impact parameter constraint $b=1.25^{+67}_{-0.17}$ that marks its orbit (if real) as unambiguously grazing. All three planets possess highly significant TTVs with similar periodicities and amplitudes \citep{Holczer2016}, indicating that they are unlikely to be false positives. Consequently, we set the Rayleigh eccentricity prior scale at $\sigma_e = 0.008$, the \citet{LithwickXieWu2012} value. Our umbrella sampling analysis confirms the grazing transit hypothesis for KOI-1426.03, finding $b=1.13^{+0.24}_{-0.12}, r=25.0^{+21.9}_{-9.9} R_{\oplus}$ and $<4\%$ of posterior samples drawn under the transition umbrella consistent with a non-grazing geometry. In contrast, none of the samples drawn from the transition window for either of the two confirmed planets were consistent with a grazing geometry (Figure \ref{fig:transition_K01426}), highlighting the utility of our approach for distinguishing grazing from non-grazing transits. As expected, our posterior results for the two confirmed planets (KOI-1426.01: $r_p=2.72\pm0.07 \ R_{\oplus}$, $b=0.26\pm0.15$, KOI-1426.02: $r_p=6.52\pm0.16 R_{\oplus}$, $b=0.84\pm0.02$) are consistent with the literature values. Although the uncertainty on the radius of the grazing candidate planet remains high at ${\sim}70\%$, the transit shape is extremely well constrained by the data, reminiscent of the results for KOI-2068. \begin{figure} \centering \includegraphics[width=0.45\textwidth]{K01426_posterior_fit.pdf} \caption{Posterior transit model for K01-1426.03. The $1\sigma$ credible interval is so narrow that it is not even visible on the plot.} \label{fig:KOI_1426_transit_posterior} \end{figure} Revealing the true properties of the KOI-1426 system will likely require a full photodynamical analysis. In retrospect, this is unsurprising for two reasons. First, all three planets exhibit large transit timing variations \citep{DiamondLowe2015, Holczer2016} which may be insufficiently characterized by our parametric model. Second, grazing transits often have a time-dependent transit shape \citep{Hamann2019, Dawson2020}, and thus our approximation of an invariant transit shape may yield biased inferences. These complications do not mean that our present efforts to model the system were a waste of time. On the contrary, the results obtained with umbrella sampling will serve as useful priors for setting up the computationally expensive photodynamical model. Informed priors (such as the fact that KOI-1426.02 is both real and on a grazing trajectory) can place meaningful limits on the system architecture and thereby greatly improve both the accuracy and efficiency of the full photodynamical treatment. Furthermore, the techniques of photodynamics and umbrella sampling are not mutually exclusive, and it may ultimately prove necessary to combine the two methods in order to achieve a definitive result for this or other dynamically active systems. \begin{figure} \centering \includegraphics[width=0.45\textwidth]{transition_umbrella_K01426.pdf} \caption{Samples of $\gamma$ obtained under the transition umbrella, $\psi_T$, for the three planets in the KOI-1426 system. Neither of the two confirmed objects (top two panels, blue) have any posterior samples $\gamma < 1$, indicating that both planets are almost certainly on non-grazing trajectories. For these two objects, we can comfortably ignore the grazing umbrella, $\psi_G$, and perform a two-window analysis. The candidate object (bottom panel, orange) has $97\%$ of samples with $\gamma < 1$, indicating that this planet is probably on a grazing trajectory. For this object, we might choose to ignore the non-grazing umbrella, $\psi_N$, but the more conservative approach would be to perform a three-window analysis as usual. The distributions seen here for KOI-1426 are typical of Kepler targets, in that samples of $\gamma$ from the transition umbrella can often be used to rule out/in certain transit geometries.} \label{fig:transition_K01426} \end{figure} \section{Summary and recommendations}\label{sec:summary} We have introduced a new method for modeling exoplanet transit lightcurves which explicitly accounts for the differences in transit geometry between grazing and non-grazing trajectories. Our technique employs the well-established framework of umbrella sampling \citep{TorrieValleau1977} by splitting the transit fitting problem into three sub-problems, each restricted to either the grazing, non-grazing, or transition regions of the parameter space. We draw samples independently from each window using an MCMC sampler to produce three posterior sub-distributions which we then recombine into a single joint posterior distribution using the Eigenvecor Method of Umbrella Sampling \citep{Thiede2016, Dinner2017}. Although umbrella sampling is widely used by molecular dynamicists and biochemists, it has only recently begun to gain the attention of astronomers \citep{Matthews2018}. Yet umbrella sampling is itself a general statistical tool not tied to any particular content domain. At heart, umbrella sampling is designed to estimate complicated posterior geometries (e.g. isolated modes or degeneracy ridges) - geometries of the sort that arise frequently in astrophysical studies. By applying umbrella sampling to a familiar astronomical problem and illustrating its efficacy, we hope to raise awareness of this powerful statistical technique which is well suited to astronomical data analysis. To aide astronomers first learning to use umbrella sampling, we have provided an introductory Python tutorial at \url{https://gjgilbert.github.io/tutorials/umbrella_sampling/}. Our umbrella sampling routine reliably produces posterior estimates of planetary radii and impact parameters which are more accurate than estimates obtained using a standard approach. We tested our method under a wide range of conditions using both real and synthetic data, finding that umbrella sampling performed at least as well as \textemdash and usually better than \textemdash the standard direct sampling approach for every star-planet configuration we considered. Moreover, even in cases where umbrella sampling did not provide higher precision estimates than direct sampling, we were able to have greater confidence in the results of umbrella sampling because only this method is able to efficiently explore deep in the the grazing regime. Throughout this paper, we have offered numerous suggestions for how to modify existing transit modeling procedures in order to produce more robust posterior estimates. We now summarize these recommendation here. \begin{enumerate} \item Before fitting any transit model, perform an exploratory analysis restricted to the region of parameter space immediately surrounding the grazing/non-grazing transition at $r=1-b$. This exploration can be efficiently executed using our $\{r,\gamma\}$ basis and and the transition umbrella, $\psi_T$. \item If all samples $\pi_T(\gamma)$ are consistent with a non-grazing geometry (i.e. all $\gamma > 1$), one may proceed with a standard analysis, restricting the model to non-grazing geometries. Conversely, if all samples $\pi_T(\gamma)$ are consistent solely with a grazing geometry (all $\gamma < 1$), one may instead restrict the model to grazing geometries and sample using our new $\{\lambda,\gamma\}$ basis. \item If, however, samples $\pi_T(\gamma)$ are mixed between grazing and non-grazing geometries, the transit should be modeled using the scheme we have described in detail in \S\ref{subsec:basis_specification} and \S\ref{subsec:full_model}. \item After sampling from under the various umbrellas $\psi_i$, compare posterior sub-distributions $\pi_i$ for each transit parameter to ensure that inferences are consistent between samples drawn from different windows. If samples are in disagreement, closer investigation is needed. This comparison step provides an additional convergence check for the user. \item For planets inferred to orbit on a grazing trajectory, consider whether a fully photodynamical analysis is needed. If so, the results obtained via umbrella sampling will serve as useful priors for initializing the more computationally expensive photodynamical model, thereby improving efficiency. \end{enumerate} For simplicity throughout this work, we always used the full first-to-fourth contact transit duration $T_{14}$ as our transit duration because it is defined regardless of transit geometry. However, the center-to-center duration (1.5 to 3.5 contact), $T_{c-c}$ is often better constrained by the data and is therefore often preferred as a basis parameter as long as it is defined, which it will be as long as $\gamma > 0$. Given our window bounds (Equations \ref{eq:psi_N}-\ref{eq:psi_G}), this means that we are free to use $T_{c-c}$ in place of $T_{14}$ for the N and T umbrellas. Swapping one $T$ for another adds an additional step to the procedure, in that a consistent $T$ must be used to produce the final joint posterior distribution. Fortunately, once samples have been obtained for all parameters, determining $T_{14}$ from $T_{c-c}$ is a matter of straightforward arithmetic, and vice versa. One may alternatively use the full-width-half-max transit duration, $T_{\rm FWHM}$, which can be defined in relation to the transit depth, $\delta$, even for strongly grazing transits. The downside of using $T_{\rm FWHM}$ is that it is numerically more difficult to determine and introduces a new covariance between $T$ and $\delta$. When applying umbrella sampling in the future, $T_{c-c}$ should probably be adopted whenever possible. For most of the history of exoplanet science, uncertainties on planetary radii and orbital parameters have been dominated by uncertainties on stellar parameters. Now, however, with improved stellar radius estimates from Gaia \citep{GaiaDR2}, and high resolution spectroscopy \citep{Johnson2017, Petigura2017}, the details of the transit fitting problem have once again become relevant for obtaining state-of-the-art estimates of planet properties. Statistical studies of exoplanets will remain dominated by the population of transiting planets for at least the next decade, and so transit modeling will remain at the foundation of many astrophysical analyses. By adopting umbrella sampling as a new tool, we will ensure that our understanding of exoplanet demographics, architectures, and formation histories will reach as far as the data allow. \acknowledgements GJG is supported by a NASA Future Investigators in Earth and Space Sciences and Technology (FINESST) Felloship, Grant Number 80NSSC20K1533. This study made use of data products from the {\sl Kepler}\ mission hosted on the NASA Exoplanet Archive. Some of the data were obtained from the Mikulski Archive for Space Telescopes (MAST) at the Space Telescope Science Institute. These data can be accessed via \dataset[10.17909/T98304]{\doi{10.17909/T98304}}. This study also made use of computational resources provided by the University of Chicago Research Computing Center. We thank the anonymous referee for their detailed feedback and many helpful comments which greatly improved the quality of this manuscript. We thank Andrey Kravstov, Dan Fabrycky, Leslie Rogers, Fred Ciesla, Erik Petigura, Mason MacDougall, Dan Foreman-Mackey, and Louis Smith for thoughtful conversations which guided the direction of this study. Software: \texttt{astropy} \citep{astropy:2013, astropy:2018}, \texttt{celerite} \citep{ForemanMackey2017} \texttt{exoplanet} \citep{ForemanMackey2021}, \texttt{numpy} \citep{numpy:2020}, \texttt{PyMC3} \citep{pymc3:2016}, \texttt{scikit-learn} \citep{scikit-learn:2011}, \texttt{scipy} \citep{scipy:2020}, \texttt{starry} \citep{Luger2019}, \texttt{usample} \citep{Matthews2018}
1,108,101,565,858
arxiv
\section{Introduction} We prove that the chain operad of small squares is formal. Together with the Deligne conjecture about the action of this operad on Hochschild cochains of an associative algebra, this implies existence of a structure of a homotopy Gerstenhaber algebra on Hochschild cochains. This fact clarifies situation with the proof of M. Kontsevich formality theorem in the paper of the author \ref{Tam}. The formality of the operad follows quite easily from the existence of an associator. The author would like to thank Boris Tsygan, Paul Bressler, and Maxim Kontsevich fior their help. {\bf Remark} Maxim Kontsevich found a proof of a more general result: chain operad of small balls is forlmal in all dimensions as an operad of coalgebras. Also, he has pointed out that the construction presented here allowes one to show the formality on the level of coalgebras as well, since all the maps of operads involved are the maps of operads of coalgebras. \section{Small Square Operad (after \cite{F})} We reproduce the construction of the small square operad from \cite{F}. First, the symmetric groups in the definition of operad are replaced with the braid groups and we obtain the notion of braided operad. A {\em topological $B_\infty$-operad} $X$ is defined as a braided operad such that all its spaces $X(n)$ are contractible and the braid group $B_n$ acts freely on $X(n)$. If $X$ and $Y$ are topological $B_\infty$-operads, then so is $X\times Y$ and we have homotopy equivalences \begin{equation}\label{equiv} p_1:X\times Y\to X;\quad p_2:X\times Y\to Y, \end{equation} where $p_1,p_2$ are the projections. Let $PB_n$ be the group of pure braids with $n$ strands. Given a topological $B_\infty$-operad $X$, the corresponding {\em operad of small squares} is a symmetric operad $X'$ such that $X'(n)=X(n)/PB_n$ with the induced structure maps. The maps (\ref{equiv}) guarantee that any two operads of small squares are connected by a chain of homotopy equivalences. It is proven in \cite{F} that the classical operad of May (whose $n-th$ space is the configuration space of $n$ disjoint numbered squares inside the unit square such that the corresponding sides are parallel) is an operad of small squares in our sense. The functor of singular chains $C^{\rm sing}_\bullet:Top\to Complexes$ has a natural tensor structure given by the Eilenberg-Zilber map $EZ:C^{\rm sing}_\bullet(X)\otimes C^{\rm sing}_\bullet(Y)\to C^{\rm sing}_\bullet(X\times Y)$. Therefore, for a topological operad $O$, the collection $C^{\rm sing}_\bullet(O(\bullet))$ has a structure of a $dg$-operad. The structure map of the $i$-th insertion is $$ C^{\rm sing}_\bullet(O(n))\otimes C^{\rm sing}_\bullet(O(m))\stackrel{EZ}{\to} C^{\rm sing}_\bullet(O(n)\times O(m))\stackrel{o_{i*}^O}{\to}C^{\rm sing}_\bullet(O(n+m-1)), $$ where $o_i^O$ is the structure map of the $i$-th insertion in $O$. For a small square operad $X$ consider the operad $E_2(X)=C^{\rm sing}_\bullet(X)$. Any two such operads are quasi-isomorphic, where quasi-isomorphic means connected by a chain of quasi-isomorphisms. In particular, the homology operad of any of $E_2(X)$ is the operad $e_2$ controlling Gerstenhaber algebras (see section \ref{apbn} for the definition of $e_2$). Our goal is to show that \begin{Theorem} Any operad $E_2(X)$ is quasi-isomorphic to its homology operad $e_2$. \end{Theorem} For this it suffices to pick a particular small square operad $X$. \section{Realization of $E_2$} \subsection{Operad of categories $PaB_n$} (after \cite{BN}) First, let us reproduce from \cite{BN} the definition of the category $PaB_n$. Let $B_n$ $(PB_n)$ be the group of braids (pure braids) with $n$ strands, let $S_n$ be the symmetric group. Let $p:B_n\to S_n$ be the canonical projection with the kernel $PB_n$. We assume that the strands of any braid are numbered in the order determined by their origins. The objects of the category $PB_n$ are parenthesized permutations of elements $1,2,\ldots,n$ (that is pairs $(\sigma, p)$, where $\sigma\in S_n$ and $\pi$ is a parenthesation of the non-associative product of $n$ elements). The morphisms between $(\sigma_1,\pi_1)$ and $(\sigma_2,\pi_2)$ are such braids from $B_n$ that any strand joints an element of $\sigma_1$ with the same element of $\sigma_2$, in other words, ${\rm Mor}((\sigma_1,\pi_1),(\sigma_2,\pi_2))=p^{-1}(\sigma_2^{-1}\sigma_1)$. The composition law is induced from the one on $B_n$. The symmetric group $S_n$ acts on $PaB_n$ via renumbering the objects $T_\sigma(\sigma_1,\pi_1)= (\sigma\sigma_1,\pi_1)$ and acts identically on morphisms. The collection of categories $PaB_n$ form an operad. Indeed, the collection ${\rm Ob} PaB_\bullet$ forms a free operad in the category of sets generated by one binary noncommutative operation. Let us describe the structure map $o_k$ of the insertion into the $k$-th position on the level of morphisms. Suppose we insert $y:(\sigma_1,\pi_1)\to (\sigma_2,\pi_2)$ into $x:(\sigma_3,\pi_3)\to (\sigma_4,\pi_4)$. We replace the strand number $\sigma^{-1}_3(k)$ of the braid $x$ by the brade $y$ made very narrow. \subsection{Operad of classifying spaces} We have the functor of taking the nerve $N:{\em Cat}\to \Delta^oEns$ and the functor of topological realization $|\ |:\Delta^oEns\to Cellular Spaces $. These functors behave well with respect to the symmetric monoidal structures, therefore the collection of cellular complexes $X_n=|NPaB_n|$ forms a cellular operad. One checks that this operad is a small square operad. Indeed, let $PaB_n'$ be the category whose objects are pairs $(x,y)$, where $x$ belongs to the braid group $B_n$ and $y$ is a parenthesation of the non-associative product of $n$ elements, and there is a unique morphism between any two objects. We have a free left action of $B_n$ on $PaB_n'$: $(x,y)\to (gx,y))$ and a braided operad structure on $PaB_\bullet'$ (the structure maps are defined similarly to $PaB_n$). One checks that the corresponding operad of classifying spaces is a topological $B_\infty$-operad and that the corresponding small square operad is isomorphic to $X_\bullet$. Consider the corresponding chain operad. Let $C_\bullet(NPaB_n)$ be the chain complex over $\Bbb Q$ of $NPaB_n$ as a simplicial set. The collection $C_\bullet(NPaB_n)$ forms a $dg$-operad (via the Eilenberg-Zilber map). Since $C_\bullet(NPaB_n)$ is just a bar complex of the category $PaB_n$, this operad will be denoted by $C_\bullet(PaB_\bullet)$. We have a canonical quasi-isomorphism of operads $C_\bullet(PaB_\bullet)\to C^{\rm sing}_\bullet|NPaB_\bullet|$. Therefore, it suffices to construct a quasi-isomorphism of $C_\bullet(PaB_\bullet)$ and $e_2$. \section{Operad of algebras $A^{pb}_n$ and construction of quasi-isomorphism} \label{apbn} By definition \cite{Dr} $A^{pb}_n$ is the algebra over $\Bbb Q$ of power series in the noncommutative variables \begin{equation}\label{tij} t_{ij},1\leq i,j\leq n;\quad i\neq j; \quad t_{ij}=t_{ji} \end{equation} with relations \begin{equation}\label{rel} [t_{ij}+t_{ik},t_{jk}]=0. \end{equation} Let $I_n$ be the double-sided ideal generated by all $t_{ij}$. We have a canonical projection \begin{equation}\label{chi} \chi: A^{pb}_n\to A^{pb}_n/I_n\cong {\Bbb Q}. \end{equation} The symmetric group $S_n$ acts naturally on $A^{pb}_n$ so that $T_\sigma t_{ij}=t_{\sigma(i)\sigma(j)}$. The collection $A^{pb}_n$ forms an operad in the category of algebras in a well-known way. The map of the insertion into the $i$-th position $o_i:A^{pb}_n\otimes A^{pb}_m\to A^{pb}_{n+m-1}$ looks as follows. Let $$ \phi(k)=\left\{\begin{matrix} k, && k\leq i;\\ k+m-1,&& k>i. \end{matrix}\right. $$ Then $$ o_i(t_{pq}\otimes 1)=\left\{\begin{matrix} t_{\phi(p)\phi(q)}, && p,q\neq i;\\ \sum_{r=i}^{i+m-1} t_{r\phi(q)}, && p=i; \end{matrix}\right. $$ $$ o_i(1\otimes t_{pq})=t_{i+p-1,i+q-1}. $$ Any algebra with unit over $\Bbb Q$ gives rise to a $\Bbb Q$-additive category $C_A$ with one object. Denote by ${\Bbb Q} Cat$ the category of small ${\Bbb Q}$-additive categories, and by ${\Bbb Q} Cat'={\Bbb Q} Cat/C_{\Bbb Q}$ the over-category of ${\Bbb Q} Cat$ over $C_{\Bbb Q}$. Its objects are the elements of ${\rm Mor}_{{\Bbb Q} Cat}(x,C_{\Bbb Q})$, where $x\in {\Bbb Q} Cat$. A morphism between $\phi$ and $\psi$, where $\phi:x\to C_{\Bbb Q}$; $\psi:y\to C_{\Bbb Q}$, is a morphism $\sigma:x\to y$ in ${\Bbb Q} Cat$ such that $\sigma\psi=\phi$. This category has a clear symmetric monoidal structure. We have the functor of nerve $N^{{\Bbb Q}}:{\Bbb Q} Cat'\to \Delta^oVect$, which is the straight analogue of the nerve of an arbitrary category, and the functor $C_\bullet:\Delta^oVect\to Complexes$. Both of these functors have tensor structure (on the latter functor it is defined via the Eilenberg-Zilber map), therefore we have a through functor ${\Bbb Q} Cat'\to Compexes$ and the induced functor $${\Bbb Q} Cat'\mbox{-} Operads\to dg\mbox{-} Operads, $$ which will be denoted by $C_\bullet^{{\Bbb Q}}$. The map (\ref{chi}) produces a morphism $\chi_*:C_{A^{pb}_n}\to C_{\Bbb Q}$ and defines an object $O_A(n)\in {\Bbb Q} Cat'$. The operad structure on $A^{pb}_n$ defines an operad structure on the collection $O_A(n)$. The complex $C_\bullet^{{\Bbb Q}}(O_A(n)) $ looks as follows: $C_n^{{\Bbb Q}}(O_A(k))\cong A_k^{pb \otimes n}$; $$ d a_1\otimes\cdots\otimes a_n=\chi(a_1)a_2\otimes\ldots\otimes a_n- a_1a_2\otimes\cdots\otimes a_n+\ldots+(-1)^{n-1}a_1\otimes\ldots\otimes a_{n-1} \chi(a_n). $$ This is the bar complex for ${\rm Tor}^{A^{pb}_n}(A^{pb}_n/I_n,A^{pb}_n/I_n) $. Let $e_2$ be the operad of graded vector spaces governing the Gerstenhaber algebras. It is generated by two binary operations: the commutative associative multiplication of degree zero, which is denoted by $\cdot$, and the commutative bracket of degree -1 denoted by $\{,\}$. These operations satisfy the Leibnitz identity $\{ab,c\}=a\{b,c\}+(-1)^{b(c+1)} \{a,c\}b$ and the Jacoby identity $$ (-1)^{|a|}\{a,\{b,c\}\}+(-1)^{|a||b|+|b|}\{b,\{a,c\}\}+ (-1)^{|a||c|+|b||c|+|c|}\{c,\{a,b\}\}=0. $$ We have a morphism of operads \begin{equation}\label{k} k:e_2\to C_\bullet^{{\Bbb Q}}(O_A), \end{equation} which is defined on $e_2(2)$ as follows: $k(\cdot)=1\in C_0^{{\Bbb Q}}(O_A)$; $k(\{,\})=t_{12}\in C_1^{{\Bbb Q}}(O_A)$. Direct check shows that this map respects the relations in $e_2$. \begin{Proposition}\label{qis} The map $k$ is a quasi-isomorphism of operads \end{Proposition} \noindent{\em Proof}. Let ${{\frak g}}_n$ be the graded Lie algebra generated by the elements (\ref{tij}) and relations (\ref{rel}), and the grading is defined by setting $|t_{ij}|=1$. Then the universal enveloping algebra $U{\frak g}_n$ is a graded associative algebra, and $A^{pb}_n$ is the completion of $U{\frak g}_n$ with respect to the grading. The algebras $U{\frak g}_\bullet$ form an operad with the same structure maps as in $A^{pb}_\bullet$. The inclusion \begin{equation}\label{incl} U{\frak g}_n\to A^{pb}_n \end{equation} is a morphism of operads. We have a canonical projection $\chi:U{\frak g}_n\to \Bbb Q$, therefore the copllection $C_{U{\frak g}_\bullet}$ forms an operad in ${\Bbb Q} Cat'$ and we have a dg-operad $C_\bullet^{{\Bbb Q}} C_{U{\frak g}_\bullet}$ which will be denoted by $C_\bullet^{{\Bbb Q}} U{\frak g}_\bullet$. The injection (\ref{incl}) induces a morphism of operads \begin{equation}\label{incl*} i_*:C_\bullet^{{\Bbb Q}}(U{\frak g}_\bullet)\to C_\bullet^{{\Bbb Q}}(O_A). \end{equation} It is clear that ${\rm Tor}^{A^{pb}_n}_\bullet(A^{pb}_n/I_n,A^{pb}_n/I_n)$ is the same as the completion of $H_\bullet({\frak g}_n)\cong {\rm Tor}^{U{\frak g}_n}_\bullet({\Bbb Q},{\Bbb Q})\cong H_\bullet(C_\bullet^{{\Bbb Q}}(U{\frak g}_n))$ with respect to the grading induced from ${\frak g}_n$. We have a natural injection ${\frak g}_{n-1}\to {\frak g}_n$. One sees that the Lie subalgebra $\i_n\subset {\frak g}_n$ generated by $t_{nk}$, $k=1,\ldots n-1$ is free and is an ideal in ${\frak g}_n$. Also, we have ${\frak g}_n=\i_n\oplus {\frak g}_{n-1}$ in the category of vector spaces. The Serre-Hochschild spectral sequence $E^2_{\bullet,\bullet}=H_\bullet({\frak g}_{n-1},H_\bullet \i_n)\Rightarrow H_\bullet({\frak g}_n)$ collapses at $E^2$ and shows that \begin{equation}\label{decompose} H_\bullet({\frak g}_n)\cong H_\bullet({\frak g}_{n-1})\oplus (\oplus_{k=1}^{n-1}H_\bullet({\frak g}_{n-1}))[-1], \end{equation} where the first summand is the image of $H_\bullet({\frak g}_{n-1})$ under the injection ${\frak g}_{n-1}\to {\frak g}_n$. Let us describe the remaining $n-1$ summands. Note that ${\frak g}_2$ is one-dimensional, therefore $H_0({\frak g}_2)=\Bbb Q;\ H_1({\frak g}_2)=\Bbb Q[-1]$; $H_i({\frak g}_2)=0$ for $i>1$. The induction shows that the homology of ${\frak g}_n$ is finite dimensional, therefore {\em the map (\ref{incl*}) is a quasi-isomorphism.} The operadic maps of insertion into the $k$-th position $o_k:U{\frak g}_{n-1}\otimes U{\frak g}_2\to U{\frak g}_n$, where $k=1,2,\ldots,n-1$ induce maps $o_k^*:H_\bullet({\frak g}_{n-1})\otimes H_\bullet({\frak g}_2)\to H_\bullet({\frak g}_n)$, and the $(k+1)$-th summand in (\ref{decompose}) is equal to $o_k^*(H_\bullet({\frak g}_{n-1})\otimes H_1({\frak g}_2))$. The induction argument shows that \begin{enumerate} \item[1.] The homology operad $n\mapsto H_\bullet({\frak g}_n)\cong H_\bullet(C_\bullet^{{\Bbb Q}}(U{\frak g}_n))$ is generated by $H_\bullet({\frak g}_2)$, therefore the homology operad of $C_\bullet^{{\Bbb Q}}(O_A(n))$ is generated by the homology of $C_\bullet^{{\Bbb Q}}(O_A(2))$. \item[2.] The total dimension of $H_\bullet({\frak g}_n)$ and of the homology of $C_\bullet^{{\Bbb Q}}(O_A(n))$ is $n!$. \end{enumerate} The first statement implies that the map (\ref{k}) is surjective on the homology level, and the second statement means that the map (\ref{k}) is bijective since ${\rm dim}\; e_2(n)=n!$. $\bigtriangleup$ Let ${\Bbb Q}(PaB_n)$ be the ${\Bbb Q}$-additive category generated by $PaB_n$. We have a map ${\Bbb Q}(PaB_n)\to C_{\Bbb Q}$ sending all morphisms from $PaB_n$ to ${\rm Id}$. Thus, ${\Bbb Q}(PaB_n)\in {\Bbb Q} Cat'$. The oreadic sructure on $PaB_\bullet$ induces the one on ${\Bbb Q} PaB_\bullet$. Any associator $\Phi\in A^{pb}_3$ over $\Bbb Q$ produces a map of operads $\phi:{\Bbb Q}(PaB_\bullet)\to O_A(\bullet)$. Indeed, define $\phi$ on ${\rm Ob}\; PaB_n$ by sending any object to the only object of $O_A(n)$. There are only two objects in $PaB_2$, let us denote them $x_1x_2$ and $x_2x_1$. The morphisms between these two objects correspond to the non-pure braids. Let $x\in B_2$ be the generator. We define $\phi(x)=e^{t_{12}/2}$. Take the two objects $(x_1x_2)x_3$ and $x_1(x_2x_3)$ of $PaB_3$ corresponding to the identical permutation $e\in S_3$, and the morphism $i$ between them, corresponding to the identical braid $e_b\in B_3$. Define $\phi(i)=\Phi$. Since the operad $PaB_\bullet$ is generated by $x$ and $i$, these conditions define $\phi$ uniquely. The definition of the associator is equivalent to the fact that $\phi$ is well-defined. This construction is very similar to the one from \cite{BN}. The map $\phi$ produces a map of operads $C_\bullet^{{\Bbb Q}}{PaB_\bullet}\cong C_\bullet^{{\Bbb Q}}({\Bbb Q}(PaB_\bullet))\to C_\bullet(O_A)$. It is well known that the homology operad of $C_\bullet{PaB_\bullet}$ is $e_2$. It is easy to check that $\phi$ is a quasi-isomorphism for $\bullet=2$ and hence it is a quasi-isomorphism of operads (since $e_2$ is generated by $e_2(2)$). By Proposition \ref{qis}, $k$ is a quasi-isomorphism. Thus, the chain operad ${C_\bullet{PaB_\bullet}}$ is quasi-isomorphic to $e_2$.
1,108,101,565,859
arxiv
\section{Introduction} \subsection{Background and motivation} Magnetic reconnection in plasmas plays an important role in converting magnetic energy to particle kinetic energy \cite{Sonnerup.1979,BurchandDrake.2009}. At the heart of magnetic reconnection is an electron diffusion region (EDR), where inflowing sheared magnetic fields merge, changing their connectivity \cite{Vasilyunas.1975,Sonnerup.1979,Hesse.2011}. The reconnection electric field $E_R$ cycles magnetic flux through the EDR, thereby mediating the local reconnection rate, maintaining the out-of-plane current density $J_M$, and contributing to the energy conversion rate through $J_ME_R>0$\cite{Hesse.2018}. NASA's Magnetospheric Multiscale (MMS) mission investigates reconnection in Earth's magnetosphere \cite{Burch.2016a}. In one EDR observed by MMS, a clear steady reconnection electric field, $E_R$, showed remarkable agreement with both the reconnection rate \cite{Torbert.2018,Genestreti.2018c,NakamuraTKM.2018} and electron energization rate \cite{Bessho.2018} (e.g., Fig. \ref{example}a). MMS has observed other EDRs with electron-scale peaks in the energy conversion rates that can exceed what is expected from typical values of $E_R$, in some cases by several orders of magnitude \cite{Burch.2016b,Burch.2018a,Burch.2018b,Burch.2020,Cassak.2017a,Genestreti.2017,Genestreti.2018a} (e.g., Fig. \ref{example}b). These large-amplitude energy conversion rates often originate from spatially and/or temporally oscillatory electric fields, such that $\vec{J}\cdot\vec{E}'$ displays both positive and negative values (where $\vec{E}'\equiv\vec{E}+\vec{v}_e\times\vec{B}$ is the electric field in the electron frame). We refer to such events as having ``patchy'' energy conversion rates. Patchy EDR energy conversion has been observed by MMS more commonly at Earth's magnetopause than in the magnetotail – though far fewer MMS magnetotail EDRs have been yet been identified. Magnetopause reconnection occurs between the highly variable shocked solar wind plasma and Earth's magnetospheric plasma. Magnetotail reconnection occurs within the magnetosphere between similar plasmas. Whereas magnetopause reconnection often has pronounced asymmetries between the two inflow regions and may occur for a wide range of magnetic shear angles \cite{Fuselier.2017}, magnetotail reconnection is often more symmetric with large magnetic shear angles \cite{Eastwood.2010}. While a wide range of conditions of a reconnecting plasma may plausibly influence the structure of an EDR and its energy conversion rate, the seven parameters investigated here focus on conditions that typically differ for magnetopause and magnetotail reconnection. \subsection{Potential causes of patchy energy conversion} Asymmetries of upstream densities and magnetic field strengths can displace the inflow stagnation line and X-line \cite{CassakandShay.2007,Burch.2016b}. When the momenta of the two inflowing plasmas are imbalanced a normal-directed current $J_N$ crosses the X-line, which is unique to asymmetric reconnection \cite{CassakandShay.2007,CassakandShay.2008,PritchettandMozer.2009,Burch.2016b}. Heavier ions penetrate farther past the X-line than lighter electrons. Negative charge accumulation occurs as electrons converge on the electron inflow stagnation point, or $\mathrm{S_e}$ point for brevity. As these bunched electrons are deflected into the outflow, they meander back and forth across the low-density-side separatrix. The resulting oscillatory $J_N$ and the strongly positive co-located $E_N$ lead to spatially oscillatory $J_NE_N$ \cite{Swisdak.2018,Burch.2018a,Pritchard.2019}. Separation between the X and $\mathrm{S_e}$ lines may therefore lead to spatially patchy $\vec{J}\cdot\vec{E}'$ in EDRs. Asymmetries of upstream densities and pressures can enable cross-field density and pressure gradients at the X-line \cite{Swisdak.2003}. Lower-hybrid or electron drift instabilities may promote the growth of waves and turbulence around the EDR \cite{Price.2016,Ergun.2017,Ergun.2019a,Ergun.2019b,Graham.2017,Le.2017,Wilder.2019}, which may alter the local energy conversion rate in and near the EDR \cite{Price.2016,Le.2017}. The most common form of drift wave found in/near MMS-observed asymmetric EDRs\cite{Wilder.2019} is a 3-d corrugation-like surface wave that originates near the separatrices and ultimately results from an ion pressure gradient \cite{Ergun.2019a,Ergun.2019b}. Alternatively, the corrugation-like surface waves may be a branch of the lower-hybrid drift instability, in which case they are expected to be driven by electron density or pressure gradients \cite{Graham.2017,Wilder.2019}. Thus the degree of asymmetry in the density, ion pressure and/or electron pressure may lead to spatially and temporally patchy $\vec{J}\cdot\vec{E}'$. During high-magnetic-shear reconnection, highly non-gyrotropic electron velocity distribution functions form as a result of cross-field meandering motions \cite{Hesse.2014,Burch.2016b,Torbert.2018} and the energy conversion is primarily from perpendicular-to-the-magnetic-field currents and electric fields \cite{Wilder.2017}. During low-shear reconnection, electrons are free to stream along a guide magnetic field \cite{Eriksson.2016,BurchandPhan.2016,Genestreti.2017,Genestreti.2018a} and the energy conversion is primarily from parallel currents and electric fields \cite{Wilder.2017}. These unstable velocity distribution functions in low and high magnetic shear EDRs have been shown to act as a free energy source for wave growth, which may modify the energy conversion rate within EDRs \cite{Burch.2018b,Burch.2019,Dokgo.2019,Khotyaintsev.2019}. Alternatively, the guide field may stabilize the EDR against the lower-hybrid drift instability \cite{Huba.1982,Price.2020}. Reconnection X-lines have preferred orientations, which optimize the reconnection rate \cite{Hesse.2013,Liu.2018b}. This optimum orientation, corresponding to the solid-line $M$ direction in Figure \ref{optimal_m_sketch}, bisects the upstream magnetic fields \cite{Hesse.2013}. If reconnection is forced to occur in an orientation that is not able to efficiently reconnect the inflowing magnetic energy (dashed $M$ direction, Fig. \ref{optimal_m_sketch}), then secondary reconnection lines may develop along the optimal orientation \cite{Liu.2018b}. This can occur when the reconnecting magnetic field is time-varying or has turbulent fluctuations, which will lead to flux pileup and flux rope generation in the outflow and modulations of the reconnection and flux transport rates \cite{NakamuraTKM.2021,Spinnangr.2021}. In 3-d kinetic simulations, flux ropes often become entangled \cite{Daughton.2011,Lapenta.2015}; it has been proposed that reconnection between entangled flux ropes may be the origin of patchy parallel electric fields observed by MMS\cite{Ergun.2016c}. Therefore, the time-varying upstream magnetic field could result in the patchy EDR J.E'. \begin{figure} \noindent\includegraphics[width=35pc]{optimal_v_suboptimal_convert.pdf} \caption{Left: a reconnection X-line with the optimal orientation (solid-line $M$ direction) that maximizes the upstream free magnetic energy and the reconnection rate. Right: reconnection with a suboptimal orientation (dashed-line $M$ direction) reconnects the free magnetic energy inefficiently, leading to the growth of secondary reconnection lines that form with optimal orientations\cite{Liu.2018b} (i.e., solid-line $M$ direction).} \label{optimal_m_sketch} \end{figure} \subsection{Outline of this study} To identify conditions in which patchy EDR energy conversion is most likely to be driven, we perform a multi-event study of 22 MMS-observed EDRs and correlate upstream parameters with the patchiness of the energy conversion. We find that the upstream parameter best-correlated with the patchiness of the energy conversion is the time variability of the upstream magnetic field direction. We then perform a large, 3-d, and fully-kinetic particle-in-cell (PIC) simulation of reconnection with a time-varying upstream field. We find that the current sheet develops secondary tearing lines that have orientations that maximize the reconnection rate of varying inflow fields. Parameter definitions, methodologies for their identification, and a description of the relevant capabilities of MMS are found in section II. In section III.A we present results of the multi-event study, finding that the strongest correlation is between the patchiness of the EDR energy conversion rate and time variability of the upstream magnetic field direction. In section III.B we analyze a three-dimensional fully-kinetic particle-in-cell (PIC) simulation of reconnection with an unsteady upstream magnetic field. Finally, in section IV, we summarize and interpret these results. \section{Methodology and MMS dataset} \subsection{Overview of methodology} We seek to understand whether one or more of several of the following descriptors of the upstream plasma conditions, enumerated below, may play a predominant role in controlling the patchiness of the EDR energy conversion rate $\sigma_{J\cdot E'}$. \begin{enumerate} \item Distance along the normal direction between the X and $\mathrm{S_e}$ lines normalized by the thickness of the EDR, which is estimated as \\ \begin{equation} \delta_{XSe}/2\delta_e=\frac{n_1B_{L2}^2-n_2B_{L1}^2}{\left(B_{L1}+B_{L2}\right)\left(n_1B_{L2}+n_2B_{L1}\right)} \label{eq:dxse} \end{equation} \\ \noindent where $n$ is the number density, $B_L$ is the reconnecting component of the magnetic field, and subscripts 1 and 2 indicate the parameter is associated with one or the other inflow region\cite{Cassak.2017a}. \item Ion thermal pressure asymmetry $(\left<P_{i1}\right>-\left<P_{i2}\right>)/P_{i0}$, where subscripts 1 and 2 denote the asymptotic pressures in the two inflow regions and the normalization parameter $P_{i0}$ is hybrid asymptotic scalar ion pressure, assumed to follow \begin{equation} P_{i0}=n_{0}T_{i0}=\left(\frac{n_1B_2+n_2B_1}{B_1+B_2}\right)\left(\frac{n_1T_{i1}B_2+n_2T_{i2}B_1}{n_1B_1+n_2B_2}\right), \label{eq:pi0} \end{equation} \\ \noindent based on previously derived expressions for the hybrid asymptotic number density \cite{CassakandShay.2007} and temperature \cite{Cassak.2017a}. \item Electron thermal pressure asymmetry $(\left<P_{e1}\right>-\left<P_{e2}\right>)/P_{e0}$, where $P_{e0}$ follows the form of equation \ref{eq:pi0}, where angular brackets indicate time averages \item Number density asymmetry $(\left<n_{1}\right>-\left<n_{2}\right>)/n_0$, where $n_0$ is given by the left-most parenthetical term in equation \ref{eq:pi0}. \item Normalized guide magnetic field strength $B_G/B_{L0}$, $B_{L0}$ is the hybrid reconnecting magnetic field component, which follows\cite{CassakandShay.2007} \\ \begin{equation} B_{L0}=\frac{2B_{L1}B_{L2}}{B_{L1}+B_{L2}} \label{eq:bl0} \end{equation} \\ \noindent and the hybrid asymptotic guide field $B_{G}$ is assumed to follow the same form. \item Angle between the actual and optimal ($M_{opt}$) X-line orientations in the $L$-$M$ plane, where $M_{opt}$ bisects the time-averaged inflow magnetic fields \cite{Hesse.2013}. \item Angular variability in the upstream magnetic fields $\delta\theta=\left<\mathrm{acos}(\hat{B}\cdot\left<\hat{B}\right>)\right>$. \end{enumerate} The ``patchiness" of the EDR energy conversion rate $\sigma_{J\cdot E'}$ is quantified as the standard deviation of the red and black curves in Figure \ref{example}, normalized by the maximum value of the red curve, i.e., \begin{equation} \sigma_{J\cdot E'}=\frac{\sqrt{\left<\left|\vec{J}\cdot\vec{E}'-J_ME_R\right|^2\right>-\left<\left|\vec{J}\cdot\vec{E}'-J_ME_R\right|\right>^2}}{\mathrm{max}\left(J_ME_R\right)}, \label{eq:sjep} \end{equation} \noindent where $\vec{E}'\equiv\vec{E}+\vec{v}_e\times\vec{B}$ is the electric field in the electron rest frame and the normalization quantity is the maximum value of $J_ME_R$ in the EDR. $E_R$ is a constant value determined as $E_R=R<V_{Ai0}B_0>$, where $R$ is the normalized reconnection rate and the theoretical maximum $R\simeq0.2$ value\cite{Liu.2017,Liu.2018a} is assumed, $V_{Ai0}$ is the hybrid asymptotic upstream ion Alfv\'en speed, and $B_0$ is the hybrid asymptotic upstream reconnecting magnetic field $B_L$. With the exception of $E_R$, all other parameters in equation (4) are evaluated in the EDR. Figure \ref{example}a shows an extremely laminar EDR energy conversion case while Figure \ref{example}b shows an extremely patchy event. \begin{figure} \noindent\includegraphics[width=25pc]{example_events.pdf} \caption{A comparison of the observed non-ideal energy conversion rate $\vec{J}\cdot\vec{E}'$ and the rate expected based on a uniform and constant reconnection electric field $J_ME_R$. Two events are shown, which were identified in Earth's magnetotail \cite{Torbert.2017} (left) and at the magnetopause \cite{BurchandPhan.2016} (right).} \label{example} \end{figure} If patchy EDR energy conversion results from charge accumulation at the $\mathrm{S_e}$ line then large-amplitude and spatially-oscillatory $J_NE_N'$ should contribute predominantly to the overall product $\vec{J}\cdot\vec{E}'$. We also define and calculate separate ``patchiness'' terms for $J_LE_L'$, $J_ME_M'$, and $J_NE_N'$, \begin{equation} \sigma_{J_i\cdot E_i'}=\frac{\sqrt{\left<\left|J_iE_i'-\delta_{iM}J_ME_R\right|^2\right>-\left<\left|J_iE_i'-\delta_{iM}J_ME_R\right|\right>^2}}{\mathrm{max}\left(J_ME_R\right)}, \label{eq:sjeplmn} \end{equation} \noindent where $i$ is $L$, $M$, or $N$ and $\delta_{iM}=1$ for $i=M$ and zero otherwise. \subsection{MMS dataset} MMS consists of four identically-equipped spacecraft that, during the periods studied here, flew in an electron-scale tetrahedral formation \cite{Burch.2016a,Fuselier.2016}. MMS science data are available in two principal modes, burst and survey, which describe the resolution of the data returned to ground. High-resolution burst-mode data are typically only available during current sheet crossings and are required for analyzing EDRs. Lower-resolution survey mode data are used when analyzing the asymptotic inflow regions. The fast plasma investigation obtains 3-d velocity distribution functions and moments of ions and electrons once per 150-ms and 30-ms, respectively, in burst mode (4.5-second cadences for both species in survey mode)\cite{Pollock.2016}. For magnetopause EDRs, mass-per-charge-separated ion composition data from the hot plasma composition analyzer \cite{Young.2016} are used to help distinguish the magnetosheath, magnetosphere, and mixed boundary layer plasmas. Comparitively high He$^{++}$ and negligible O$^+$ concentrations are expected in the asymptotic upstream magnetosheath, while the opposite is expected in the magnetosphere inflow region. 3-d electric and magnetic field measurements are obtained by the electric field double probes\cite{Lindqvist.2016,Ergun.2016a} and fluxgate magnetometers\cite{Russell.2016}, respectively. Burst-mode electric field data are available at 8,192 Hz. Survey-mode magnetometer data are available at 8 Hz. The particle moments from the fast plasma investigation are used to calculate current densities uniquely at each of the four spacecraft\cite{Phan.2016a}. $\vec{J}\cdot\vec{E}'$ is also calculated uniquely at each spacecraft and is smoothed to remove sub-$d_e$-scale oscillations. \subsection{Analysis methods and event selection criteria} First we identify EDR events. Throughout this paper EDR refers to the ``central EDR'', which is distinguished from the extended electron jet region often referred to as the ``outer EDR'' \cite{Phan.2007,Chen.2008}. Generally speaking, the central EDR is where field lines merge \cite{Hesse.2011,Zenitani.2011,Burch.2016a,Torbert.2018}. We started with 36 EDR events, 34 of which were identified at the dayside magnetopause\cite{Chen.2016,Chen.2017,Lavraud.2016,Burch.2016b,BurchandPhan.2016,Webster.2018,Ergun.2017,Torbert.2017,Genestreti.2018a,Pritchard.2019,Li.2020,Burch.2020} and 2 in the magnetotail\cite{Torbert.2018,Zhou.2019}. Next, we require that MMS observed both asymptotic inflow regions for several minutes. The trajectory MMS takes through an EDR depends almost entirely on the time-dependent motion of the EDR, which varies from event to event. In some cases, MMS does not fully cross the EDR into one inflow region; these events are discarded, leaving 27 EDRs. Three events for which plasma parameters during an inflow interval could not clearly be associated with the EDR interval (e.g., when large rotations in the upstream magnetic field were observed during the crossing) were discarded, leaving 24 EDRs. Average $LMN$ coordinates were determined for these 24 EDRs. Here, average specifies that a single coordinate system is used to define an EDR interval, whereas the axes may vary during the crossing \cite{Denton.2018}. The maximum directional derivative of $\vec{B}$ (MDD-B) technique\cite{Shi.2005} was used to identify the EDR current sheet normal $N$. For some events, MDD-B did not find a stable normal direction; in these cases, maximum variance of the electric field\cite{Paschmann.1986,Sonnerup.1987} (MVA-E) was used to identify $N$. Maximum variance of the magnetic field\cite{SonnerupandCahill.1967} (MVA-B) was then used to determine a direction $L^*$. $M$ was then evaluated as $N\times L^*/\left|N\times L^*\right|$ and $L=M\times N$. Similar hybrid techniques for finding LMN coordinates have been used previously \cite{Genestreti.2018c,Denton.2018}. Two events were discarded because EDR coordinates could not be confidently established, leaving 22 total EDR events for this study (20 magnetopause and 2 magnetotail events). We use Spearman's $\rho$ coefficient to evaluate the strength of the correlations between the patchiness of the energy conversion in our 22 EDRs with the seven parameters enumerated in the list of section II.A. $\rho$ is a non-parametric measure of the strength with which two variables are associated \cite{myers2013research}. This approach was chosen because (1) the magnitude of $\rho$ is not strongly influenced by outlying data points and (2) we do not have to assume any particular functional form describing the relationships between the patchiness and the seven parameters; rather, only a monotonous relationship is assumed. We refer to correlations with $|\rho|\leq1/3$ as weak, $1/3\leq|\rho|\leq2/3$ as moderate, and $|\rho|\geq2/3$ as strong. We also evaluate a confidence interval for each correlation, i.e., the probability that a non-zero correlation is not the result of random chance, which is based on the sample size (22 EDRs) and the strength of the correlation ($=1-\rho\sqrt{2}$). We adopt a ``95$\%$ rule'', meaning that only correlations with $\geq$95$\%$ confidence (2$\sigma$) are deemed significant. \section{Results} \subsection{Multi-event study} Figure \ref{scatter1}a-e show the first five parameters in the enumerated list in section II.A. Of these five parameters the separation between the X and $\mathrm{S_e}$ lines (Fig. \ref{scatter1}a), as defined in equation \ref{eq:dxse} is the only parameter moderately and significantly correlated with the patchiness of the energy conversion. A note of caution is required, however, regarding the clustering of data points in the parameter space of figure \ref{scatter1}a. Since we do not have enough EDRs to control for all parameters simultaneously, it is not possible to discern whether the separation of magnetotail (two bottom/left-most data points in Fig. \ref{scatter1}a) and magnetosheath (twenty right-most data points in Fig. \ref{scatter1}a) EDRs are due to unique aspects of reconnection caused by X and $\mathrm{S_e}$ line separations or due to other differences between the magnetopause and magnetosheath current sheets. However, when the two outlying magnetotail data points are excluded, the correlation coefficient and confidence drop only slightly to 0.5 and 97$\%$, respectively, meaning that the correlation is still moderate and significant. Figure \ref{scatter1}f shows the component-specific patchiness parameter of equation \ref{eq:sjeplmn}. If charge accumulation at the $\mathrm{S_e}$ line was the predominant cause of patchy energy conversion, then the energy conversion rates of patchier events is expected to be dominated by $J_NE_N$. However, there is no clear dominance of the patchiness of $J_LE_L'$ (blue), $J_ME_M'$ (green), and $J_NE_N'$ (red) to the overall patchiness of $\vec{J}\cdot\vec{E}'$. \begin{figure} \noindent\includegraphics[width=30pc]{internal_source_scatter_plots_v4.pdf} \caption{Vertical axes are $\sigma_{J\cdot E'}$ defined in Eq. 1 for 22 EDRs. Horizontal axes are the normalized (a) separation between the X and electron stagnation ($\mathrm{S_e}$) lines, a derived quantity based on the inflow magnetic field and density asymmetries, (b) the patchiness of $J_LE_L'$ (blue), $J_ME_M'$ (blue), and $J_NE_N'$ (red), as defined in equation \ref{eq:sjeplmn}, (c) scalar ion thermal pressure asymmetry, (d) scalar electron thermal pressure asymmetry, (e) density asymmetry, and (f) guide field strength, the definitions of which are found in the numbered list in section II.A. (a) and (c)-(f): Spearman correlation coefficients for and their confidence values are in the upper left of each panel, magnetopause EDRs are colored orange and magnetotail EDRs are purple.} \label{scatter1} \end{figure} Weak correlations were found between the patchiness of the energy conversion and the ion (Fig. \ref{scatter1}b) and electron (Fig. \ref{scatter1}c) thermal pressure asymmetries, the density asymmetry (Fig. \ref{scatter1}d), and the guide field strength (Fig. \ref{scatter1}e); all correlations were all below our $95\%$ confidence threshold for significance. Observations and simulations suggest that these parameters may play a role in modulate the energy conversion rate at or very near the EDR, at least in some limiting circumstances. Since we are unable to control for all parameters simultaneously, the results of Fig. \ref{scatter1} may only be interpreted as evidence that these parameters do not exert a singular or predominant influence on the patchiness of the EDR energy conversion, over the parameters' ranges typically found in the magnetosphere. The final two parameters from section II.A are shown in figure \ref{scatter2}a and \ref{scatter2}b: the angle between the actual EDR $M$ and optimum $M_{opt}$ directions and the angular variability of the upstream magnetic field, respectively. Errors in the EDR coordinate axes determined with the hybrid MDD-B/MVA technique may be $\sim$4$^\circ$-to-10$^\circ$ based on previous MMS case analyses \cite{Denton.2018,Genestreti.2018c}. We find that most of the EDRs are separated from the optimum $M_{opt}$ direction by angles less than our assumed $10^\circ$ of uncertainty. \begin{figure} \noindent\includegraphics[width=25pc]{external_source_scatter_plots_v3.pdf} \caption{Vertical axes are $\sigma_{J\cdot E'}$ defined in Eq. 1 for 22 EDRs. Horizontal axes are: (a) the angular difference in the $L-M$ plane between the EDR $M$ direction and the optimum $M$ direction, which bisects the time-averaged upstream magnetic fields and (b) the angular variability of the upstream magnetic field direction. Spearman correlation coefficients and confidence values are in the upper left of each panel. Magnetopause EDRs are colored orange and magnetotail EDRs are purple.} \label{scatter2} \end{figure} The parameter most strongly and significantly correlated with $\sigma_{J\cdot E'}$ is the time variability of the upstream magnetic field direction (Figure \ref{scatter2}b). This correlation may indicate that, while the EDR may be fairly well aligned with the time-averaged optimum $M_{opt}$ direction, time variations in $M_{opt}$ may also lead to secondary tearing growth. This result is in good agreement with recent 2-dimensional particle-in-cell simulations\cite{NakamuraTKM.2021} of reconnection with fluctuating magnetic fields. In the following section we investigate this result further by analyzing a 3-dimensional simulation of reconnection with a non-uniform inflow magnetic field. \subsection{Simulation of reconnection with varying inflow conditions} A three-dimensional fully-kinetic simulation was performed to investigate the behavior of reconnection under non-uniform inflow conditions. The simulation was run using the electromagnetic particle-in-cell code {\it VPIC} \cite{Bowers.2008}. The initial magnetic field profile of the primary asymmetric current sheet was taken from a previous work\cite{Liu.2018b}; however, a tangential discontinuity (TD) was added in the upstream magnetosheath (see Figure \ref{picsetup}b). The TD convects with the inflow toward the X-line, meaning the spatial variations in the inflow field translate to time-varying boundary conditions for the diffusion region. The upstream TD was an ion-scale rotation of the inflow magnetic field by $45^\circ$, which was chosen to loosely match the largest variations in the upstream field direction for the event of Figure \ref{example}b. To reduce turbulence resulting from periodic conditions at the $M$ boundaries, the simulation box was oriented such that the optimal $M_{opt}$ direction of the primary reconnecting current sheet was aligned with the simulation $M$ coordinate\cite{Liu.2018b}. A full description of the simulation set-up is provided in Appendix A. We limit our investigation to a single time of the simulation, $t=128/\Omega_{ci}$ (where $\Omega_{ci}$ is the ion cyclotron period), which is roughly 50$\Omega_{ci}$ after the TD convected into the diffusion region. At this time, strong $\vec{J}\cdot\vec{E}$ resulting from the initial conditions were no longer apparent. At $t=128/\Omega_{ci}$ some readily identifiable impacts of the time-varying inflow appear in the simulation. Figure \ref{sim1}a shows a cut through the $L-M$ plane at $N/d_e=1$, the approximate location of the primary X-line. In the $L-M$ plane, reconnection lines are identified as dividing lines that separate oppositely-directed $B_N$. Secondary tearing lines, shown in Figure \ref{sim1}a, are likely a result of the TD impact. The axes of the secondary tearing lines $M'$ are tilted by roughly $45^\circ$ relative to the primary X-line $M$, which is consistent with the expected optimal $M$ direction (along the line bisecting the upstream fields) after the $45^\circ$ rotation of the upstream $\vec{B}$ associated with the TD. Persistent features associated with the primary X-line, which was oriented in the optimal direction under the initial upstream conditions, appear simultaneously with the secondary tearing modes. \begin{figure} \noindent\includegraphics[width=25pc]{pic_results_v2.pdf} \caption{The (a) reconnected component of the magnetic field and (b) electron-frame energy conversion rate in the simulation $L-M$ plane.} \label{sim1} \end{figure} \section{Summary, interpretation of results, and future work} The overarching goal of this study was to determine the origin of patchy non-ideal energy conversion rates $\vec{J}\cdot\vec{E}'$ commonly found in MMS-observed electron diffusion regions (EDRs). We examined 36 EDRs, finding 22 that were suitable for a multi-event study. The patchiness of the energy conversion rate was quantified by $\sigma_{J\cdot E'}$, as defined in equation \ref{eq:sjep}, which is the difference between the MMS-observed energy conversion rate and the rate expected from a uniform, steady reconnection electric field with a normalized strength of 0.2. The patchiness of the energy conversion was then correlated with seven parameters describing the geometry of the diffusion region and its upstream conditions: the (1) separation between the X and electron stagnation ($\mathrm{S_e}$) lines, a function of the magnetic field and density asymmetry, (2) ion scalar pressure asymmetry, (3) electron scalar pressure, (4) density asymmetry, (5) guide field strength, (6) the angle between the average EDR and optimum $M$ directions, the latter being the line bisecting the time-averaged upstream magnetic fields, and (7) the time variability of the upstream field. The principal findings from the multi-event study are: \begin{enumerate} \item The patchiness of the energy conversion rates in our EDR events is not correlated with the density asymmetry, ion and electron pressure asymmetries, nor the guide field strength. \item A moderate and significant correlation is observed between the patchiness of the EDR energy conversion and the separation between the X and $\mathrm{S_e}$ lines, which is a function of the magnetic field and density asymmetry. There is no clear dominance of $J_LE_L'$, $J_ME_M'$ or $J_NE_N'$ in EDRs with patchy energy conversion. \item The majority of EDRs have an average $M$ direction within (10$^\circ$) uncertainty bars of being aligned with the optimum direction, which bisects the time-averaged upstream magnetic fields and maximizes the reconnection rate. \item The best correlation is observed between the patchiness of the EDR energy conversion and the time variability of the upstream magnetic field direction. \end{enumerate} A three-dimensional particle-in-cell (PIC) simulation was performed to investigate the behavior of reconnection with non-uniform inflow conditions. Reconnection began along a primary X-line, which had an optimum orientation that bisected the initial upstream field, thereby maximizing the initial reconnection rate. After a tangential discontinuity impacted the diffusion region and the immediately-upstream magnetic field rotated by 45$^\circ$ secondary tearing lines developed, which radiate from the primary X-line at an angle consistent with the change in the magnetic shear (by 45$^\circ$). Due to high noise levels, which may have been due in part to an initial state of disequilibrium, we were not able to quantify the patchiness of the energy conversion during the TD impact in the simulation studied here. We interpret the findings in the following way: of the sources studied here, the predominant source of patchiness in the EDR energy conversion rate is the time variability of the inflowing magnetic field directions. The causal relationship may be due to the formation of secondary tearing lines, which develop from a primary tearing line in unsteady inflow conditions, as was seen in the simulation. Whereas the direction of the primary reconnection line seems to be (at least, most commonly) set by the direction that bisects the time-averaged upstream fields, the growth of secondary tearing lines may be the mechanism that maximizes the reconnection rate under time-varying inflow fields. This is just one possible interpretation, since no clear enhancement in $\vec{J}\cdot\vec{E}'$ was observed at the simulated secondary tearing lines. It is possible the single clean variation in the simulated magnetic field was not complex enough in its structure to lead to entangled flux rope formation\cite{Ergun.2016c,Daughton.2011,Lapenta.2015} and discernibly patchy $\vec{J}\cdot\vec{E}'$. Additionally, it is possible that the initially noisy $\vec{J}\cdot\vec{E}'$, found early in the simulation, did not provide an adequate benchmark with which patchy $\vec{J}\cdot\vec{E}$ could be identified. This interpretation is comparable to findings from previous works, which studied in two-dimensional PIC simulations and found that the growth of secondary tearing lines and modulations in the reconnection rate result from time-varying inflow magnetic field configurations \cite{NakamuraTKM.2021,Spinnangr.2021}. In comparison to the aforementioned two-dimensional pictures, we suggest that the secondary tearing lines may form with oblique (3-d) geometries such that the reconnection rate is maximized for the time-varying field. Our interpretation and findings are also comparable with earlier MMS-based investigations. These studies suggested that patchiness in the reconnection rate may lead to the formation of tangled flux ropes, which, in turn, may reconnect with one another and generate patchy and large-amplitude electric fields \cite{Ergun.2016c}. Further simulation work is needed to develop a quantitive relationship between unsteady inflow magnetic fields and patchy reconnection. In addition to existing studies of two-dimensional simulations, three-dimensional simulations should be conducted to determine whether entanglement and reconnection of secondary flux ropes lead to enhanced energy conversion rates. One question that cannot be answered at present is whether or not patchy electron-scale reconnection has a discernible impact on reconnection at larger scales. It has recently been suggested that at/above ion scales, reconnection at Earth's magnetopause appears to have a continuous global-scale structure \cite{Fuselier.2021}. Reconciling the patchiness of reconnection at electron-scales with the apparent continuous and quasi-two-dimensional nature of reconnection at much larger scales may be possible in the near future as, in its current extended mission, the inter-spacecraft separations will be increased such that MMS will be able to resolve electron and ion-scales simultaneously. \begin{acknowledgments} We acknowledge the contributions made by many MMS team members to the success of the mission and the accessibility and high quality of the MMS data. This study has used several routines from the Space Physics Environment Data Analysis System \cite{spedas} and has benefited from conversations with Dr. Michael Hesse, Dr. Richard Denton, Dr. Paul Cassak, and Dr. Dominic Payne. The simulation is performed on Frontera at Texas Advanced Computer Center (TACC). KJG is supported by NASA grant 80NSSC20K0848. XL and YL are supported by MMS grant 80NSSC18K0289. \end{acknowledgments} \section{Data Availability} MMS data are publicly available at https://lasp.colorado.edu/mms/sdc/public/. The simulation is available upon request to Xiaocan Li ([email protected])
1,108,101,565,860
arxiv
\section{Hardware in context}\label{sec:intro} Radio-frequency (RF) signal generators are devices designed to produce continuous and pulsed signals in the RF and microwave domains with defined and adjustable frequency, phase and amplitude. Often they provide one or several methods to modulate these properties in order to create a continuous-wave (CW) output, single pulses, pulse trains or more complex waveforms. These devices have a wide-range of applications, both in industrial, commercial and laboratory settings, spanning from wireless communication, automated test equipment, imaging and spectroscopy for healthcare to experiments in neuroscience \cite{Juutilainen2011,Meneghetti2020,Yaghmazadeh2022}, biophysics \cite{DuemaniReddy2008,Corsetti2021}, microscopy \cite{Saggau1998,Duocastella2020}, particle and nuclear physics \cite{Demarteau2016}, atomic and molecular physics \cite{Pruttivarasin2015,Donnellan2019,Bertoldi2020}, and quantum simulation and computing \cite{Arute2019,Altman2021,Amico2021}. There is no single general purpose instrument that is able to cover such breadth of scopes and technical requirements, instead many open-source and commercial solutions have been developed to address one or several tasks, with their specific strengths, limitations and cost. Recently, RF equipment has found vast application in industrial manufacturing, atomic physics and microscopy to precisely steer laser beams using RF-driven acousto-optical devices \cite{Duocastella2020,Gavryusev2019,Ricci2020,Ricci2022} and to control processes or states of matter that are sensitive to this frequency domain \cite{Arute2019}, either directly through electromagnetic radiation emitted by an antenna or indirectly by driving electronic or opto-electronic equipment. Achieving a fine degree of control often requires to generate complex sequences of single RF tones, multi-frequency waveforms and rapid frequency sweeps that interleave on microsecond timescales and span a wide band of several hundreds $\si{\mega\hertz}$. Most often a single RF output is not sufficient and typically four channels or more have to be used concurrently, while respecting stringent phase-coherence and sub-$\si{\micro\second}$ synchronization conditions. Arbitrary waveform generators (AWG) are a very flexible class of RF devices that can fulfill these demands. Commercial solutions \cite{Signatek,SpectrumInstrumentation,Allcock2021} tend to have a substantial Cost Per Channel (CPC) in the range of 2000-3000 \euro, while laboratory developed solutions \cite{Baig2013,Bowler2013,Govorkov2014} are more affordable. These projects are based on field-programmable gate arrays (FPGA) that are very flexible and adaptable to changing requirements, but often their frequency bandwidth is limited to few tens of $\si{\mega\hertz}$. Besides, they present usability constraints because their software interfaces are often not trivial to program and integrate into existing laboratory control systems. RF devices that employ Direct Digital Synthesis (DDS) to produce high spectral purity single-tones and frequency sweeps are a second valid class of solutions. Current state-of-the-art technology allows to manufacture DDS chips that can synthesize $\SI{1.4}{\giga\hertz}$ sine-waves, such as the Analog Devices (AD) AD9914 \cite{AD9914,AD9914PCBZ}, or even reach $\SI{4.2}{\giga\hertz}$ \cite{Zhang2014}, but their CPC is rather high in the range of 800-1000 \euro. More affordable generators are built using one or several DDS chips that cost approximately 50 \euro\space each and can provide a $\SIrange{200}{400}{\mega\hertz}$ output, such as the four channel AD9959 \cite{AD9959,AD9959PCBZ} and the single channel AD9910 \cite{AD9910,AD9910PCBZ} chips. Many commercial products have been developed using these or similar DDS technologies and the most accessible multi-channel equipment \cite{GraAndAfch,Novatech,ModularSystemControls,AAOptoElectronic} has a CPC within 50-200 \euro, but is severely limited in the Output Reprogramming Rate ($ORR \leq \SI{10}{\kilo\hertz}$). Higher speed RF drivers reach an $ORR\geq \SI{1}{\mega\hertz}$ by leveraging a built-in memory that can store thousands of consecutive settings received from a computer, allowing output stabilization and amplification, and complex modulation schemes, with the downside of a significantly increased CPC ranging from 500 \euro \cite{MoglabsQRF,SinaraUrukul,Kasprowicz2022} to 2000-3000 \euro \cite{SpinCoreDDS300,SpinCoreDDS1000,Wieserlabs,WieserlabsDual,MoglabsXRF}. Several research groups have developed in-house DDS-based signal generators to reach an affordable CPC of 250-400 \euro, while preserving specifications comparable to the best commercial products or even adding customized functionality, such as digital input-output channels or a complete experiment timing and control system \cite{Pruttivarasin2015,Perego2018,Prevedelli2019,Donnellan2019,Bertoldi2020,Allcock2021}. Like for AWGs, FPGAs have been used to program and control the DDS chips with $ORR\geq \SI{1}{\mega\hertz}$, sub-$\si{\micro\second}$ jitter triggering and providing a large command memory. Some designs have been presented without disclosing the implementation details \cite{Liang2009,Li2016,Perego2018,Donnellan2019}, while others have released all material as open-source \cite{Pruttivarasin2015,Bertoldi2020,Kasprowicz2022}. Whilst complying with the license terms, the latter choice enables any user to reproduce, enhance and adapt the equipment to their own specific requirements way beyond what a proprietary solution can allow, while substantial economic savings may be obtained \cite{Pearce2020}. Furthermore, the original developers receive recognition for their work and may benefit from improvements contributed by the wider community of users. Recently, the development of microcontroller units integrated into a development board (MCU) with ARM central processing units (CPU) running at clock rates of tens of $\si{\mega\hertz}$ has paved the way to an alternative to FPGAs for achieving negligible jitter, substantial command storage capacity and $ORR\geq \SI{1}{\mega\hertz}$. This approach presents several advantages: MCUs such as the broad Arduino-compatible family of devices \cite{Arduino} are programmed in C++, instead of requiring knowledge of the more specialized VHDL and VERILOG languages, which lowers the usage barrier and vastly broadens the potential user community. Next, MCUs have longer product lifetimes than FPGAs and provide notably greater software and hardware compatibility between different product generations, including application programming interfaces, communication buses and even dimensional blueprints which even enable drop-in replacements or upgrades. This feature stimulates the creation and continued support of extensive software libraries that ease the firmware development and of interoperable off-the-shelf hardware plugins and extensions. Additionally, MCUs provide several standardized communication interfaces that may allow interacting with arbitrary devices under control (DUC) that present the same protocol, such as the Inter-Integrated Circuit (I\textsuperscript{2}C) and Serial Peripheral Interface (SPI). Interestingly, this may obviate the need of realizing custom printed circuit boards (PCB) to pilot a DDS or another DUC. All together these aspects help to reduce development time and CPC expenditure. This paper presents an open-source low-cost Arduino-based control system that can store millions of commands received from a computer (PC) via Universal Serial Bus (USB) and perform reliable high-speed programming of an arbitrary device under its control through a single- or quad-wire SPI. We use a Teensy 4.1 development board \cite{Teensy41} as the MCU. The Teensy 4.1 is an Arduino-compatible development board with an ARM Cortex-M7 CPU running at $\SI{600}{\mega\hertz}$, which grants it several times more computing power than provided by the $\SI{84}{\mega\hertz}$ ARM Cortex-M3 CPU of an Arduino Due \cite{ArduinoDue}. The controller software architecture operates as a real-time state machine, making it easily extensible and adaptable to any DUC. Each configuration change can be triggered either externally or internally, with rates up to $\approx\SI{200}{\kilo\hertz}$ when using the standard SPI library. Outstandingly, if the standard single-wire SPI library is replaced by a custom implementation that benefits from port masking optimizations, the $ORR$ rate doubles in single-wire SPI mode and reaches $\approx\SI{1}{\mega\hertz}$ with quad-wire SPI communication, surpassing all prior MCU-based solutions \cite{ModularSystemControls,GraAndAfch}. Leveraging this flexible system, we developed a low-cost programmable four-channel RF signal generator, based on an Analog Devices 9959 evaluation board \cite{AD9959,AD9959PCBZ}, and demonstrated its capability, validating its performance for all use-cases that require $ORR\leq \SI{1}{\mega\hertz}$, low jitter and stand-alone operation with large command memory. The total system cost is currently 641.79 \euro, resulting in a notably low CPC of 160.45 \euro. First, we present an overview of the hardware and software that composes our system. Then, all design files and materials are provided and discussed, along with build and operation instructions. Finally, we measure and validate the performance of our open-source programmable four-channel RF signal generator controlled by an Arduino-based MCU. \section{Hardware description}\label{sec:hw_descr} \subsection{Overview}\label{sec:hw_descr:overview} \begin{figure}[!ht] \centering \includegraphics[width=1\textwidth]{MCU_DUC.pdf} \caption{Block diagram of the MCU-based system, designed to pilot an arbitrary device-under-control via single-wire or quad-wire SPI, while receiving commands from a computer via USB. The MCU is programmed to run a real-time state machine that reacts to external events and provides a fast command memory for the DUC.} \label{fig:scheme} \end{figure} The design of the programmable four-channel RF signal generator that we developed is based on a general purpose and flexible architecture made of four main elements: the user determines on a PC the set of commands and settings that the MCU has to program into the DUC with precise timing supplied either internally or externally through a timing system. The MCU software implements a real-time state machine that can receive new settings and commands during run-time, store them in a fast internal memory, control the DUC and react to external events. The block diagram of the system is shown in Fig.~\ref{fig:scheme} and its elements will be presented in detail in the following. \subsection{Hardware components}\label{sec:hw_descr:hw} The central hardware element of our architecture is the MCU hosted on a development board because it interfaces the PC with the DUC and acts as a hardware and software abstraction layer, lifting from the user the need of knowing the technical details of the device to be controlled. Thanks to its built-in memory, the MCU can work both in tandem with the PC, receiving new commands during run-time, or standalone after storing the sequence of commands and settings to be applied to the DUC. Furthermore, it can be either externally triggered or perform timing functions itself since almost all microcontroller development boards provide many digital and some analog input and output (IO) channels. We chose to use a Teensy 4.1 development board \cite{Teensy41} as the MCU because it is currently the most powerful Arduino-compatible solution \cite{Arduino}. Its software is programmed in C++ (described in detail in the following Sec.~\ref{sec:hw_descr:soft_mcu}), a language which many developers and researchers can work with, granting a large potential user community, instead of the more specialized VHDL and VERILOG languages that are required to operate most FPGAs. The Teensy has an ARM Cortex-M7 CPU with a float point math unit running at $\SI{600}{\mega\hertz}$ which provides several times more computing power than the $\SI{84}{\mega\hertz}$ ARM Cortex-M3 CPU of an Arduino Due \cite{ArduinoDue}. This enables to process input data and IO communications with sub-$\si{\micro\second}$ timescale latency and jitter, both via USB 2.0 (with the PC) and SPI interfaces (with the DUC) or I\textsuperscript{2}C. It has a total of 55 IO pins with different characteristics, making it capable of reacting to external events and providing precise triggering to the DUC. Several sets of four IO pins can be toggled together very quickly using a hardware-based port mapping optimization, a feature that we leveraged to implement a custom single- and quad-wire SPI that significantly increased the data rate with the DUC, as demonstrated in Sec.~\ref{sec:val}. Furthermore, this MCU has a large memory subsystem consisting of a 1024K random-access-memory (RAM), 7936K Flash, 4K EEPROM and with the option of QSPI memory expansion up to 16~MByte by soldering two extra RAM chips. Notably, the interaction with the PC can be realized not only via USB, but also through the Ethernet 10/100 Mbit interface. This option has not been implemented in our project, but should be easy to add through an existing library. The computer has no special hardware requirements and can run any operating system that supports the Python Jupyter notebook software package and USB or Ethernet communication. The timing system has to provide five interrupts for the MCU and five triggers for the DUC (as shown in Fig.~\ref{fig:pinconn} and presented in detail in Sec.~\ref{sec:build_instr}), totaling a requirement of ten digital transistor-transistor logic (TTL) channels, and can be either a standalone device or a board integrated in the PC, such as the National Instruments (NI) board PCI-6251 \cite{NationalInstrumentsPCI6251} that we used in our implementation. This PCI Multifunction I/O Device has 16 analog inputs (16-Bit, 1.25 MegaSamples/s), 2 analog outputs and 24 digital IO, out of which 8 can be timed up to 10 MHz. One of its TTL lines is logically combined through an OR gate circuit (Fig.~\ref{fig:schematicaux}(B)) with a TTL line from the MCU to generate a logic signal that triggers the output update on the DUC. The MCU can be engineered and programmed to control any DUC that provides a supported interface, such SPI and I\textsuperscript{2}C. Since we aimed to realize an open-source low-cost programmable four-channel RF signal generator, we leveraged the capabilities of the Arduino-based control system to drive an AD9959/PCBZ evaluation board \cite{AD9959PCBZ}. This off-the-shelf equipment uses an AD9959 DDS chip \cite{AD9959} which provides four synchronous RF outputs that can reach $\SI{200}{\mega\hertz}$, generate single tones and linear frequency/phase/amplitude sweeps, and has independent frequency/phase/amplitude control. This RF source presents a narrow output spectrum with low phase-noise and it has $\SI{12}{\hertz}$ or better frequency tuning resolution, 14-bit phase offset resolution and 10-bit output amplitude scaling resolution. To operate, it requires two supply voltages ($+\SI{1.8}{\volt}$ DDS core and $+\SI{3.3}{\volt}$ serial I/O) and a stable and spectrally narrow sinusoidal frequency reference that can be provided either by soldering on-board a crystal oscillator or supplying a clock signal externally. The clock signal is fed into a phase-locked loop (PLL) where a selectable $4\times$ to $20\times$ REF\_CLK multiplier is applied to generate the internal clock signal that provides timing to all internal components of the DDS chip and determines the maximum RF frequency that can be generated without incurring in spectrum distortions, which is 40\% of this rate. As clock signal source, we used an external $\SI{25}{\mega\hertz}$ temperature compensated crystal oscillator with a frequency stability of $\pm 280$ parts per billion, assembled following the schematic presented in Fig.~\ref{fig:schematicaux}(A). The MCU is powered directly by the PC through the USB cable, while all other components of our programmable four-channel RF signal generator receive the required $+\SI{3.3}{\volt}$ and $+\SI{1.8}{\volt}$ supply voltages from the power supply circuit that consists of an AC/DC and a DC/DC converters, as depicted in Fig.~\ref{fig:schematicaux}(C). \subsection{MCU software}\label{sec:hw_descr:soft_mcu} The MCU software loaded into the Teensy 4.1 board implements a real-time state machine able to listen to any incoming serial USB communication from PC, and to execute a set of predefined commands in response to external interrupt events. Here we will present its architecture in more detail. The \textit{setup()} function initializes the state machine when the software is first loaded into the MCU board and configures the digital pins needed to interact with the DUC and the external timing system. Moreover, upon setup a hard reset signal is issued to the DUC to force its internal registers to their default state. Next, depending on the selected reference clocking configuration, the internal PLL-based clock multiplier factor is optionally set by programming the Function Register 1 (FR1) of the DUC via SPI. Finally, the serial USB communication between the PC and the MCU board is initialized, and external interrupt requests (IRQs) are enabled. The I/O block handling the reception of subsequent DUC settings from the PC is realized by two functions that are executed consecutively within the \textit{loop()} function of the MCU sketch. Respectively, they read and parse incoming data strings, and push the received DUC configurations into the first in, first out (FIFO) buffers allocated for each of the four DDS channels. Similarly, there are separate sets of FIFO buffers designated for each specific channel register to be programmed into the DUC (refer to the AD9959 datasheet for a complete description of these registers). The size of the FIFO buffers was set to 4500 elements to maximize the usage of the dynamic memory normally accessible on the Teensy 4.1 board (RAM1). This size corresponds to the maximum number of output configurations that can be activated on the DUC without the need for a new USB communication when all four RF channels are simultaneously updated and operated in frequency sweep mode, which represents the most memory-consuming scenario. If such use-case would not be foreseen, the number of elements could be increased by adapting the software to the expected workload. Furthermore, this threshold may be heightened by exploiting the secondary RAM space (RAM2) through dynamic memory allocation via the \textit{malloc()} function and by QSPI memory expansion, as mentioned in the previous subsection. Before writing data to these memory buffers, the received floating point frequency values (namely single-tone frequencies, frequency sweep limit frequencies and step sizes) are converted to the tuning data words to be programmed into the respective internal registers of the DUC. In fact, performing this conversion directly when receiving the data strings from the PC enables a faster refresh of the DUC, increasing the maximum achievable $ORR$. External interrupt events are used to react asynchronously to specific user commands set on the PC and they are disabled during the USB and SPI communication sessions. They are activated on the rising edge of the toggling of the designated interrupt pins that are connected to the external timing lines, as presented in detail in Sec.~\ref{sec:build_instr}. The interrupt service routines (ISRs) are the following: \begin{itemize} \item \textit{initSingleSPI()}: enables the custom single-wire SPI communication mode (default), with the simultaneous control of the designated SDIO\_0 and SCLK pins implemented via direct digital port manipulation; \item \textit{initQuadSPI()}: enables the custom quad-wire SPI communication mode, which uses an efficient hardware implementation based on the fast simultaneous control of the designated SDIO pins via direct digital port manipulation; \item \textit{softResetMCU()}: resets all MCU state variables to their default values, and clears the content of the FIFO buffers that store DDS tuning words previously received from the PC and not yet activated on the DUC; \item \textit{hardResetDUC()}: issues a master reset pulse on the active-high reset pin of the DUC, reinitializing its internal registers to their default state. Afterwards, all channels are set to the single-tone mode of operation with their default 0x00 frequency tuning words, i.e. $\SI{0}{\mega\hertz}$. Then, if required, the routine programs the PLL multiplier factor back to the user-set value; \item \textit{updateDUC()}: this routine first retrieves and interprets a channel configuration byte header associated with each data string communicated to the MCU, properly adjusting the mode of operation (i.e., single-tone or linear frequency sweep) of the relevant channels which need to be updated. More specifically, the least significant nibble of this header points out the channels operating in frequency sweep mode, whereas the most significant one identifies the reprogrammed channels, in accordance with the structure of the DUC \textit{channel selection register}, for a more efficient bit manipulation. Next, the ISR reads the new single-tone or sweep tuning words from the FIFO buffers of the channels which need to be updated, and transfers them to the DUC via SPI. In order to activate the received tuning words and effectively change the signal generator output, a trigger must be issued on the I/O update pin of the DUC after the SPI communication is complete. We implemented two options to generate this TTL: either the NI provides it, which requires careful synchronization with the execution of the update ISR, or the MCU itself supplies a pulse at the end of the data transfer. This latter approach, which we termed ``auto update" mode, requires to activate a variable in the source code to be engaged, as described in item 7 of Sect.~\ref{sec:op_instr}. Both options can work alternatively without hardware modification by using the digital logic OR gate circuit. \end {itemize} \subsection{Computer software}\label{sec:hw_descr:soft_pc} The Jupyter notebook included in the software repository provides an easy-to-use user interface, written in Python 3.8, which allows to communicate to the MCU board the frequency settings to be consecutively activated on the DUC. All the functions included in the notebook are exhaustively documented, with docstrings complying with the PEP 257 convention. As detailed in Section~\ref{sec:op_instr}, users have only to edit the configuration lists within the “Input to AD9959 DDS” cell at the top of the notebook to customize the desired sequence of DUC settings. Whereas the specified single-tone frequency values can be directly transferred to the board, the generation of linear frequency sweeps by the AD9959/PCBZ evaluation board requires a conversion step of the user input. The desired positive or negative frequency slopes have to be translated into discrete time steps and intermediate frequency step sizes that are respectively applied when sweeping up or down the output frequency. This is performed by a dedicated block of chirp configuration functions which minimize the residual between the input chirp parameter and the slope of the linear sweep actually produced by the encoded sweep ramp rate and delta-tuning words, by iteratively selecting the adopted time step among a predefined list of increasing programmable values related to the DUC internal clock rate. Furthermore, also the transient phase leading to the maximum start frequency in the case of a falling frequency sweep, or back to the minimum frequency of a rising frequency sweep, must be similarly programmed. In the present implementation, this was accomplished so that the whole sweeping range was covered in the minimum time step allowed by the DUC, i.e. $\SI{8}{\nano\second}$ at the peak $\SI{500}{\mega\hertz}$ clock rate, thus producing a quasi-instantaneous recovery of the user defined starting value. The developed Jupyter notebook finally features a set of functions devoted to handling the USB communication with the MCU. These take the single-tone and frequency sweep data related to the user-defined consecutive configurations of the DUC output channels, generate the channel mode byte header described in Sect.~\ref{sec:hw_descr:soft_mcu}, and composes the data strings which are finally encoded and transferred via USB to the MCU. \subsection{Device usefulness potential}\label{sec:hw_descr:usefullness} \noindent In summary, the presented open-source programmable RF signal generator provides the following benefits: \begin{itemize} \item[$\bullet$]It has four outputs that can reach $\SI{200}{\mega\hertz}$ and operate in single-tone or frequency sweep modes. \item[$\bullet$]The generator output can be reprogrammed very quickly, up to $ORR\approx\SI{1}{\mega\hertz}$. \item[$\bullet$]The hardware design and software are completely open-source, allowing easy extension and customization. \item[$\bullet$]All hardware components are off-the-shelf, for a total system cost of 641.79 \euro~and a 160.45 \euro~ CPC, very competitive with commercial and lab-built RF generators with similar specifications and applications. \item[$\bullet$]The MCU software realizes a full control system that is adaptable to drive many other devices. \end{itemize} \section{Design files summary}\label{sec:design} \noindent The design files are stored in the \href{https://dx.doi.org/10.17632/hvwyz5yhh2.1}{Mendeley repository} and grouped in three folders: \begin{itemize} \item[$\bullet$]{Software: it contains the MCU sketch and the PC to MCU communication code.} \item[$\bullet$]{Electronic Schematic: it contains the files describing the electronic design of the system.} \item[$\bullet$]{Documentation: it contains the datasheets for the components of the system.} \end{itemize} \noindent The key design files necessary to build and operate the system are the following: \vskip 0.2cm \tabulinesep=1ex \noindent \begin{tabu} to \linewidth {|X[1.35,1]|X[0.7,1]|X[0.6,1]|X[1.55,1]|} \hline \textbf{Design filename} & \textbf{File type} & \textbf{Open source license} & \textbf{Location of the file} \\\hline complete\_code\_pc\_mcu.zip & code archive from GitHub \cite{Github} & MIT & \href{https://dx.doi.org/10.17632/hvwyz5yhh2.1}{Mendeley}/Software/ \\\hline python\_pc.ipynb & Jupyter notebook & MIT & \href{https://dx.doi.org/10.17632/hvwyz5yhh2.1}{Mendeley}/Software/python\_pc/ \\\hline teensy\_mcu.ino & Arduino sketch & MIT & \href{https://dx.doi.org/10.17632/hvwyz5yhh2.1}{Mendeley}/Software/teensy\_mcu/ \\\hline CircularBuffer-1.3.3 & Arduino library & GNU GPL v3 & \href{https://dx.doi.org/10.17632/hvwyz5yhh2.1}{Mendeley}/Software/teensy\_mcu/lib/ \\\hline BOM.ods & OpenDocument spreadsheet & CC BY-SA 4.0 & \href{https://dx.doi.org/10.17632/hvwyz5yhh2.1}{Mendeley}/Electronic Schematic/ \\\hline Kicad\_Circuits.zip & Kicad project & CC BY-SA 4.0 & \href{https://dx.doi.org/10.17632/hvwyz5yhh2.1}{Mendeley}/Electronic Schematic/ \\\hline ElectronicSchematicsAll.pdf & PDF & CC BY-SA 4.0 & \href{https://dx.doi.org/10.17632/hvwyz5yhh2.1}{Mendeley}/Electronic Schematic/ \\\hline CircuitsClockORgatePWR.pdf & PDF & CC BY-SA 4.0 & \href{https://dx.doi.org/10.17632/hvwyz5yhh2.1}{Mendeley}/Electronic Schematic/ \\\hline PinConnections.pdf & PDF & CC BY-SA 4.0 & \href{https://dx.doi.org/10.17632/hvwyz5yhh2.1}{Mendeley}/Electronic Schematic/ \\\hline \end{tabu} \section{Bill of materials summary}\label{sec:bom} \vskip 0.2cm \tabulinesep=1ex \noindent \begin{tabu} to \linewidth {|X[2.0cm]|X[5cm]|X[1.5cm]|X[1.4cm]|X[1.4cm]|X[1.6cm]|X[2.5cm]|} \hline \textbf{Designator} & \textbf{Component} & \textbf{Number} & \textbf{Unit cost (\euro)} & \textbf{Total cost (\euro)} & \textbf{Source of materials} & \textbf{Material type} \\\hline MCU & Teensy 4.1 development board & 1 & 31.53 & 31.53 & \href{https://www.pjrc.com/store/teensy41.html}{PJRC} & Other \\\hline DUC & AD9959/PCBZ & 1 & 494.80 & 494.80 & \href{https://www.analog.com/en/design-center/evaluation-hardware-and-software/evaluation-boards-kits/eval-ad9959.html\#eb-buy}{Analog Devices} & Other \\\hline Clock & CTS 535L250X2GT5 TCXO $\SI{25}{\mega\hertz}$ CLP SNW & 1 & 19.69 & 19.69 & \href{https://www.digikey.it/en/products/detail/cts-frequency-controls/535L250X2GT5/10711662}{Digi-Key} & Other \\\hline OR & SN74LVC1G3208DBVR AND/OR Logic gate & 1 & 0.38 & 0.38 & \href{https://www.digikey.it/en/products/detail/texas-instruments/SN74LVC1G3208DBVR/863609}{Digi-Key} & Other \\\hline PWR1 & LRS-50-3.3 AC/DC Converter $\SI{3.3}{\volt}$ $\SI{33}{\watt}$ & 1 & 15.92 & 15.92 & \href{https://www.digikey.it/en/products/detail/mean-well-usa-inc/LRS-50-3-3/7705049}{Digi-Key} & Other \\\hline PWR2 & TPS82671EVM-646 DC/DC Converter $\SI{1.8}{\volt}$ & 1 & 23.96 & 23.96 & \href{https://www.digikey.it/en/products/detail/texas-instruments/TPS82671EVM-646/2441410}{Digi-Key} & Other \\\hline ProtBoard & SBBTH1506-1 prototype board & 3 & 1.09 & 3.27 & \href{https://www.digikey.it/en/products/detail/chip-quik-inc/SBBTH1506-1/5978222}{Digi-Key} & Other \\\hline J1, J2, J3, J4 & RF2-04A-T-00-50-G SMA female jack through hole & 4 & 1.84 & 7.36 & \href{https://www.digikey.it/en/products/detail/adam-tech/RF2-04A-T-00-50-G/9830588}{Digi-Key} & Other \\\hline CBsma & Cable SMA-SMA male RG-316 $\SI{0.5}{\meter}$ & 2 & 14.60 & 29.20 & \href{https://www.digikey.it/en/products/detail/cinch-connectivity-solutions-johnson/415-0029-MM500/6579658}{Digi-Key} & Other \\\hline CBusb & Cable USB 2.0 A male to micro B male $\SI{5}{\meter}$ & 1 & 7.99 & 7.99 & \href{https://www.digikey.it/en/products/detail/assmann-wsw-components/AK67421-5/2175143}{Digi-Key} & Other \\\hline C1 & Capacitor $\SI{10}{\nano\farad}$ $\SI{50}{\volt}$ & 1 & 0.19 & 0.19 & \href{https://www.digikey.it/en/products/detail/kemet/C410C103K5R5TA7200/818228}{Digi-Key} & Ceramic \\\hline C2, C4 & Capacitor $\SI{100}{\nano\farad}$ $\SI{50}{\volt}$ & 2 & 0.22 & 0.44 & \href{https://www.digikey.it/en/products/detail/kemet/C322C104K5R5TA7303/12701413}{Digi-Key} & Ceramic \\\hline C3 & Capacitor $\SI{1}{\nano\farad}$ $\SI{50}{\volt}$ & 1 & 0.26 & 0.26 & \href{https://www.digikey.it/en/products/detail/tdk-corporation/FG18C0G1H102JNT06/5802785}{Digi-Key} & Ceramic \\\hline C5 & Capacitor $\SI{47}{\micro\farad}$ $\SI{25}{\volt}$ & 1 & 0.22 & 0.22 & \href{https://www.digikey.it/en/products/detail/kemet/ESK476M035AC3EA/9448273}{Digi-Key} & Electrolytic \\\hline TVS & SA7.0A-E3/54 Zener diode & 1 & 0.48 & 0.48 & \href{https://www.digikey.it/en/products/detail/vishay-general-semiconductor-diodes-division/SA7-0A-E3-54/2146115}{Digi-Key} & Semiconductor \\\hline JP1, JP2 & SPC02SYAN jumper & 2 & 0.10 & 0.20 & \href{https://www.digikey.it/en/products/detail/sullins-connector-solutions/SPC02SYAN/76375}{Digi-Key} & Other \\\hline CNm & Header Connector 32 position 2.54mm through hole & 2 & 0.79 & 1.58 & \href{https://www.digikey.it/en/products/detail/sullins-connector-solutions/PRPC032SAAN-RC/2775222}{Digi-Key} & Other \\\hline CNf & Receptacle Connector 32 position 2.54mm through hole & 2 & 2.16 & 4.32 & \href{https://www.digikey.it/en/products/detail/mill-max-manufacturing-corp/310-47-132-41-001000/7364043}{Digi-Key} & Other \\\hline \end{tabu}\\ \vskip 0.2cm \noindent The total cost of the components is 641.79 \euro, which leads to a CPC of 160.45 \euro. \section{Build instructions}\label{sec:build_instr} In order to power the system we used the circuit depicted in Fig.~\ref{fig:schematicaux}(C), connected using common insulated wires. An AC/DC converter (PWR1) is directly wall powered and generates an output voltage of $+\SI{3.3}{\volt}$, which is filtered by capacitors C4 and C5 (electrolytic) and protected by the TVS Zener diode. For convenience, these three through hole components were soldered and linked on a ProtBoard perforated prototype board. A DC/DC converter (PWR2) is powered from the filtered $+\SI{3.3}{\volt}$ line and produces a $+\SI{1.8}{\volt}$ output when the jumpers JP1 and JP2 are set in the pull-up position. The two voltage levels are necessary to power the DUC (AD9959/PCBZ evaluation board) via the TB1 connector. Additionally, both the OR gate and the clock source circuits adopted in the present application require a $+\SI{3.3}{\volt}$ supply voltage. \begin{figure}[!ht] \centering \includegraphics[width=0.9\textwidth]{CircuitsClockORgatePWR.pdf} \caption{Schematics of (A) the clock, (B) logic OR gate and (C) power supply circuits (the blue dotted line represents the constituent elements of the PWR2 evaluation board).} \label{fig:schematicaux} \end{figure} The DUC requires an external $\SI{25}{\mega\hertz}$ stable clock reference to operate and reach its peak specifications. We selected a temperature compensated crystal oscillator (Clock) with a frequency stability of $\pm 280$ parts per billion and we assembled the schematic presented in Fig.~\ref{fig:schematicaux}(A) on a ProtBoard, soldering the through hole filtering capacitors (C1, C2, C3) and output SMA jack (J1). An SMA cable (CBsma) delivers the clock signal to the DUC via the J9 connector (REF CLK). In parallel, the W9 jumper had to be set to the REF CLK position. This reference clock was then brought to $\SI{500}{\mega\hertz}$ by employing the internal phase-locked loop-based reference clock multiplier of the DUC, as described in Section~\ref{sec:op_instr}. The logical OR gate circuit was soldered on the third ProtBoard, following the schematic presented in Fig.~\ref{fig:schematicaux}(B). The OR chip has its two inputs and single output wired to three SMA jacks (J2, J3, J4). The second SMA cable (CBsma) was cut in half and a two position connector (split-off either from the multi-position male header CNm or female receptacle CNf) was soldered on the loose end, with one pin attached to the inner signal line and the other to the outer shielding. The connector type (male or female) should be selected depending on the header type present on the MCU and DUC. To power the MCU we opted to rely on the USB connector used for programming the board via the cable CBusb, instead of using an external power supply. Alternatively, $\SI{5}{\volt}$ may be supplied via the $\rm{V_{IN}}$ pin. However, for using the USB connection while employing an external power supply, the power provided by the USB cable should be properly isolated, so as to prevent the possibility of power flowing back to the PC. This can be accomplished by cutting apart the $\SI{5}{\volt}$ pads on the bottom side of the MCU board. \begin{figure}[!ht] \centering \includegraphics[width=0.8\textwidth]{pin_connections.pdf} \caption{System assembly diagram. The positive power supply at $\SI{3.3}{\volt}$ and $\SI{1.8}{\volt}$ is shown in red, with the common ground of all devices shown in black. Digital inputs from the NI board are shown in orange, with dark orange indicating the IRQ lines connected to the MCU and light orange indicating the digital input lines to the DUC. Blue denotes the digital connections between MCU and DUC, with light blue indicating the shielded high-frequency transmission lines employed by the custom SPI developed in this work. The $\SI{25}{\mega\hertz}$ reference clock is shown in green, whereas the jumpers to be disconnected for enabling the manual DUC control and the external clock input are respectively highlighted in blue and green.} \label{fig:pinconn} \end{figure} The MCU can control the DUC by establishing the connections illustrated in Fig.~\ref{fig:pinconn} to the I/O control headers. By default, the AD9959/PCBZ evaluation board is supplied with the USB communication enabled, a setting that disables these headers. To enact the external control of the DUC via the MCU, it is necessary to set the jumper W7 (PC\_CTRL) to manual and remove those on W1, W2, W3 and W10 (highlighted with the blue dotted line in Fig.~\ref{fig:pinconn}). Now, the MCU board must be connected to the header row (U2, U13) of the DUC, using, in particular, the PWR\_DWN, RESET, IO\_UPDATE and P0-3 (profile) pins, besides the pins related to the single/quad-wire SPI, i.e. chip select (CSB), serial clock (SCLK), SDIO\_0, SDIO\_1, SDIO\_2 and SDIO\_3. Since in the present application the DUC does not need to send a response back to the MCU board, a MISO (Master Input Slave Output) line was not implemented. The ground pins next to these connections on the DUC header row must be coupled to the ground references on the MCU for avoiding ground loops. Similarly, the ground of the trigger generator used to control the operation timing must also be connected to the same ground reference. At both ends of each link, one or two position connectors split-off either from the multi-position male header CNm or female receptacle CNf and soldered to an insulated cable can be used to realize mechanically and electrically stable wiring. The SPI communication between MCU and DUC was implemented in two alternative ways: via the standard single-bit SPI library or using custom functions performing single- or four-bit serial input operations. These functions use direct digital port manipulation for the fast simultaneous control of the designated SDIO and SCLK pins, in order to achieve improved serial transfer times with respect to the standard SPI library available in the Arduino platform. Specifically, all conventional SPI pins belong to the GPIO6 port of the MCU board; conversely, all the remaining pins indicated so far belong to different digital ports. Using these functions, we have verified the possibility of generating a SCLK having a frequency up to $\SI{120}{\mega\hertz}$. However, clocking the SPI chip at this maximum rate could not guarantee a reliable communication, in view of the parasitic elements affecting our system. To mitigate this disturbance, SPI connections were made using shielded cables, grounding the shield at both ends. Adopting a working SCLK rate of $\SI{60}{\mega\hertz}$ by setting a clock divider equal to 2 in the custom SPI functions enabled a reliable communication between the MCU and DUC. The digital pin connections between them are summarized in Table~\ref{tab:pins}. \begin{table*}[h!] \centering \caption{Digital pin connections between MCU and DUC.} \label{tab:pins} \setlength{\extrarowheight}{2pt} \begin{tabular}{lcc} \hline \multicolumn{1}{l}{\multirow{2}{*}{\textbf{Function}}} & \multicolumn{2}{c}{\textbf{Digital pins}}\\ \multicolumn{1}{c}{} & \textbf{MCU} & \textbf{DUC} \\ \hline \vspace{4pt} Power Control & 2 & PWR\_DWN \\ \vspace{4pt} Master Reset & 3 & RESET \\ \vspace{4pt} I/O Update & 5 & IO\_UPDATE \\ \vspace{4pt} Chip Select & 10 & CSB \\ \vspace{4pt} \begin{tabular}[c]{@{}l@{}}Single/Quad-wire SPI\\ Serial Clock\end{tabular} & 40 & SCLK \\ \vspace{4pt} \begin{tabular}[c]{@{}l@{}}Single/Quad-wire SPI\\ SDIO\end{tabular} & 19 & SDIO\_0 \\ \vspace{4pt} \begin{tabular}[c]{@{}l@{}}Quad-wire SPI\\ SDIO (second line)\end{tabular} & 18 & SDIO\_1 \\ \vspace{4pt} \begin{tabular}[c]{@{}l@{}}Quad-wire SPI\\ SDIO (third line)\end{tabular} & 14 & SDIO\_2 \\ \vspace{4pt} \begin{tabular}[c]{@{}l@{}}Quad-wire SPI\\ SDIO (fourth line)\end{tabular} & 15 & SDIO\_3 \\ \hline \end{tabular} \end{table*} The last indispensable connections are related to the external timing system which can be any device capable of producing 10 +$\SI{3.3}{\volt}$ TTL signals at rates $\geq\SI{1}{\mega\hertz}$. In our implementation, we used a National Instrument (NI) board (PCI-6251) to generate the interrupt trigger signals related to the commands described in Sect.~\ref{sec:hw_descr:soft_mcu}, in addition to the pulses sent to the IO\_UPDATE and P0-3 pins of the DUC, respectively required to activate the frequency configurations transferred via SPI, and to trigger the generation of the frequency ramps in frequency sweep mode. To meet the $\SI{3.3}{\volt}$ input voltage requirement of both the MCU and DUC boards, the $\SI{5}{\volt}$ output lines of the NI board were connected to a $\SI{5}{\volt}$ to $\SI{3.3}{\volt}$ voltage level translator (\href{https://www.digikey.it/en/products/detail/texas-instruments/SN74LVC4245ADWR/562892}{SN74LVC4245ADWR}). As detailed in Section~\ref{sec:hw_descr:soft_mcu}, we implemented two options to trigger the IO\_UPDATE pin on the DUC: either directly from the NI after the SPI communication is completed or from the MCU if the ``auto update" mode is engaged. To allow both options to work alternatively without hardware modifications, we use the digital logic OR gate circuit whose output goes to the DUC IO\_UPDATE pin, while its inputs are connected to the I/O update pin of the MCU board (pin 5) and to the designated digital output line of the trigger generator. \section{Operation instructions}\label{sec:op_instr} To operate the Arduino-based control system we developed an open-source software solution comprising two elements: the Arduino C++ sketch that constitutes the MCU program and the user interface implemented as a Python Jupyter notebook that runs on the PC and communicates with the MCU via USB. This notebook allows to compose and send the frequency configurations to be sequentially generated by the four-channel RF signal generator that we implemented as a demonstration of the MCU control system capabilities. The following procedure should be followed to operate the open-source software controlling the DUC: \begin{enumerate} \item Download the complete software package, comprising the Python 3 notebook and the C++ sketch to be loaded on the MCU board, either from the \href{https://dx.doi.org/10.17632/hvwyz5yhh2.1}{Mendeley repository} or from the corresponding \href{https://github.com/lens-biophotonics/open-fast-buffered-4ch-rf-gen}{GitHub repository} \cite{Github} for the latest version. \item Download and install the Arduino integrated development environment (IDE) (\href{https://www.arduino.cc/en/software}{Arduino Downloads}). \item Connect the MCU board to the PC via USB: this will power up the board. \item Power up the DUC. \item Open the C++ sketch using the Arduino IDE and, if required, modify the reference clock configuration of the DUC which is preset for an external $\SI{25}{\mega\hertz}$ clock source. In detail, after setting the frequency of the external reference clock (REF\_CLK), set the global c\_ClkMultiplier variable to \textit{false} if clocking the DDS chip directly with a high frequency source. Otherwise, the internal phase-locked loop-based reference clock multiplier will be enabled; in this case, users must also specify the REF\_CLK multiplier factor (c\_PLLmul), that must be between 4 and 20. The internal clock rate of the DDS chip determines the maximum RF frequency that can be generated without incurring in spectrum distortions, which is 40\% of this rate. \item If desired, activate the sketch debug mode by setting g\_debug to \textit{true} and open the IDE serial monitor. When enabled, the MCU will output to this monitor several debugging messages that may help identifying issues in the supplied configurations or other problems. \item If desired, activate the optional ``auto update" mode (g\_autoUpdate = \textit{true}), discussed in Section~\ref{sec:hw_descr:soft_mcu}. \item Use the Arduino IDE to load the C++ sketch on the MCU board. \item Open the Jupyter notebook user interface. \item If required, adjust the value of the DUC system clock frequency (SYSCLK) within the ``Setup Constants" cell at the top of the notebook. \item Verify the number of the serial USB port used to communicate with the MCU board by selecting Tools $>$ Port in the Arduino IDE menu. \item In order to define the list of single-tone frequencies and/or linear frequency sweeps to be sequentially generated by the four DDS channels upon successive update interrupt events, edit the respective Python lists within the ``Input to AD9959 DDS" cell. Specifically, whereas a simple floating-point value can be provided for the latter mode of operation, frequency sweeps are configured via Python dictionaries having as keys the start frequency, the chirp parameter (the slope of the frequency ramp, either positive or negative) and the sweep duration. Alternatively, a None keyword may be filled in whenever a particular channel should not be updated upon a specific step of the configuration sequence. \item If desired, activate the notebook debug mode (debug = True) displaying the Unicode data strings encoded and sent to the board. Moreover, users may optionally disable the serial USB transfer (transfer = False) for validating the input to the MCU board without actually transferring the data strings. \item Press the notebook's ``Run all" button to transfer the frequency configurations to the MCU board. \item If the MCU debug mode is active, users may verify in the IDE serial monitor the frequency configurations received via USB and the programming progress of the DUC. Adopt long pulse periods ($\sim$ ms) to allow the PC enough time to receive and display the MCU feedback messages. \item Send a pulse train to the designated update interrupt pin of the MCU board for sequentially writing the transferred frequency configurations to the SPI buffers of the DUC. Once each setting is uploaded, the DUC waits for an I/O update pulse to reprogram and output the new set of RF signals. This pulse must be synchronized with the pulse train and delayed at least by the duration of the SPI transfer communication. It may originate either from another channel of the trigger source or from the MCU itself if the ``auto update" mode of the C++ sketch has been enabled (item 7). \item If linear frequency sweeps have been configured, following the I/O update pulse, these need to be externally triggered via transitions of the corresponding DUC profile pin logic state, i.e. from low to high for rising sweeps and vice versa, as detailed in the Linear Sweep Mode section of the AD9959 data sheet. Such logic state has to last for the entire duration of the frequency sweep (\textit{no-dwell} mode disabled). \item If required, users may toggle the designated external interrupt pins from low to high in order to trigger the execution of the commands implemented by the ISRs described in Sect.~\ref{sec:hw_descr:soft_mcu}. \item By repeating items 12-17, additional configuration lists can be defined, transferred to the MCU (appending them to the ones previously sent, if not yet executed) and activated on the DUC. \end{enumerate} \section{Validation and characterization}\label{sec:val} In order to validate the operation of the Arduino-based control system and of the four-channel RF signal generator that it powers, we first confirmed that the USB and SPI communication steps were error-free and we measured their data throughput rates. Then, we verified that the DUC was correctly programmed for all possible setting combinations and we characterized the RF output spectrum and verified that we could generate the desired combinations of RF single-tone signals and frequency sweeps. \begin{figure}[!ht] \centering \includegraphics[width=0.6\textwidth]{USB_Transfer.pdf} \caption{USB communication data rate between PC and MCU board, evaluated by linearly fitting the data reported in Table \ref{tab:usb}.} \label{fig:transferUSB} \end{figure} We characterized the performance of the serial USB communication between the PC and MCU by evaluating the time required to transfer byte strings related to varying modes of operation of the four DDS channels, along with the time required to parse them and push incoming data into the designated memory buffers (Table~\ref{tab:usb}). The data string byte size increases with the number of channels to be simultaneously activated, especially when operated in frequency sweep mode. We find that the relationship between the amount of transferred data and the incurred time is highly linear, as shown in Fig.~\ref{fig:transferUSB}, and from a linear fit we extract an USB transfer rate of $1.97\pm0.01$~MByte$\si{\per\second}$ and a data decoding rate of $4.29\pm0.01$~MByte$\si{\per\second}$, while the overheads are quite small, respectively $\SI{0.96\pm0.01}{\micro\second}$ and $\SI{0.20\pm0.01}{\micro\second}$. \begin{table}[!ht] \small \centering \caption{PC-MCU USB communication properties against different channel configurations.} \label{tab:usb} \setlength{\extrarowheight}{2pt} \begin{tabular}{ccrcc} \hline \multicolumn{2}{l}{\textbf{Channels operation}} & \multirow{2}{*}{\textbf{Bytes}} & \multirow{2}{*}{\textbf{USB Transfer Time [\textmu s]}} & \multirow{2}{*}{\textbf{Decoding Time [\textmu s]}} \\ Single-tone & Frequency Sweep & & & \\ \hline 1 & 0 & 19 & 8 & 4 \\ 2 & 0 & 29 & 13 & 6 \\ 3 & 0 & 39 & 18 & 8 \\ 4 & 0 & 49 & 23 & 10 \\ 0 & 1 & 51 & 24 & 11 \\ 1 & 1 & 61 & 29 & 13 \\ 2 & 1 & 71 & 34 & 15 \\ 3 & 1 & 81 & 39 & 17 \\ 0 & 2 & 93 & 45 & 21 \\ 1 & 2 & 103 & 50 & 23 \\ 2 & 2 & 113 & 55 & 25 \\ 0 & 3 & 135 & 67 & 31 \\ 1 & 3 & 145 & 72 & 33 \\ 0 & 4 & 177 & 88 & 40 \\ \hline \end{tabular} \end{table} Next, we similarly assessed the achievable communication rate between the MCU and the DUC against different channel operation configurations, comparing the transfer times allowed by the standard single-wire Arduino SPI library with the ones obtainable with our custom single- or four-wire SPI implementation, which exploits direct port manipulation on the Teensy 4.1, a hardware optimization. A serial clock rate of \SI{60}{\mega\hertz} was consistently adopted for the three SPI communication strategies, in order to establish a proper comparison. \begin{figure}[!ht] \centering \includegraphics[width=0.6\textwidth]{SPI_Transfer.pdf} \caption{SPI transfer data rate between MCU board and DUC, evaluated by linearly fitting the data reported in Table \ref{tab:spi}, related to the three tested serial operation modes. Measures were performed using a consistent SCLK rate of \SI{60}{\mega\hertz}. A considerable throughput increase is noticeable for the custom quad-wire SPI developed in this work.} \label{fig:transferSPI} \end{figure} These measures involved a subset of all possible channel programming combinations (i.e., an increasing number of single-tone signals or frequency ramps), and considered also the channel operation mode pre-programming needed when switching at least one of the four DDS channels from single-tone to frequency sweep mode and vice versa, requiring six additional bytes to be transferred in advance to the DUC. The measured transfer times are summarized in Table \ref{tab:spi}, and shown in Fig.~\ref{fig:transferSPI} (channel mode pre-programming excluded). The standard single-wire SPI library provided a data rate of $3.38\pm0.02$~MByte$\si{\per\second}$, while our custom hardware-optimized implementation reached $6.84\pm0.07$~MByte$\si{\per\second}$ in single-wire and $22.3\pm0.3$~MByte$\si{\per\second}$ in quad-wire modes. In all three cases the communication overhead and jitter was very small, respectively of $\SI{192\pm24}{\nano\second}$, $\SI{90\pm10}{\nano\second}$ and $\SI{20\pm2}{\nano\second}$. These results highlight that our custom hardware-optimized SPI implementation delivers a considerable improvement of the DDS input data rate with respect to the standard SPI library, up to $6.6$ times. As reported in Table~\ref{tab:spi}, this in turn leads to an up to 6 times increase in the maximum allowed $ORR$ of the four DDS channels when using quad-wire custom SPI, with a minimum $ORR = \SI{188}{\kilo\hertz}$ in the most data communication intensive case of four pre-programmed frequency sweeps, and a peak $ORR = \SI{1.33}{\mega\hertz}$ when only a single-tone setting is modified. This last result demonstrates that our four-channel RF signal generator reaches the desired performance specification and is competitive with other state-of-the-art solutions presented in Section~\ref{sec:intro}. \begin{table}[!ht] \small \caption{MCU-DUC SPI communication times against different output channel configurations with three different transmission modes.} \label{tab:spi} \setlength{\extrarowheight}{2pt} \begin{threeparttable} \begin{tabular}{lrrrrrrr} \hline \multirow{2}{*}{\textbf{Channels operation}} & \multirow{2}{*}{\textbf{Bytes}} & \multicolumn{2}{c}{\textbf{Custom Quad-wire SPI} $\dagger$} & \multicolumn{2}{c}{\textbf{Custom Single-wire SPI} $\dagger$} & \multicolumn{2}{c}{\textbf{Arduino SPI} $\dagger$} \\ & & \textbf{Time [\textmu s]} & \textbf{Rate [kHz]} & \textbf{Time [\textmu s]} & \textbf{Rate [kHz]} & \textbf{Time [\textmu s]} & \textbf{Rate [kHz]} \\ \hline 1 Single-Tone & 7 & 0.8 & 1333.3 & 1.5 & 649.4 & 2.6 & 380.2 \\ 1 Single-Tone $\ddagger$ & 13 & 1.1 & 892.9 & 2.6 & 386.1 & 4.7 & 213.7 \\ 2 Single-Tones & 14 & 1.1 & 917.4 & 2.7 & 374.5 & 4.8 & 207.9 \\ 2 Single-Tones $\ddagger$ & 20 & 1.5 & 684.9 & 3.7 & 268.1 & 6.9 & 145.1 \\ 3 Single-Tones & 21 & 1.4 & 694.4 & 3.8 & 263.9 & 7.0 & 142.9 \\ 3 Single-Tones $\ddagger$ & 27 & 1.8 & 552.5 & 4.8 & 206.6 & 9.1 & 110.4 \\ 4 Single-Tones & 28 & 1.8 & 565.0 & 4.9 & 203.7 & 9.2 & 108.9 \\ 4 Single-Tones $\ddagger$ & 34 & 2.2 & 465.1 & 6.0 & 167.8 & 11.2 & 89.0 \\ 1 Frequency Sweep & 25 & 1.5 & 675.7 & 4.1 & 243.3 & 7.9 & 126.7 \\ 1 Frequency Sweep $\ddagger$ & 31 & 1.9 & 526.3 & 5.2 & 193.8 & 10.0 & 100.2 \\ 2 Frequency Sweeps & 50 & 2.7 & 374.5 & 7.8 & 127.7 & 15.4 & 65.1 \\ 2 Frequency Sweeps $\ddagger$ & 56 & 3.0 & 328.9 & 8.9 & 112.7 & 17.5 & 57.3 \\ 3 Frequency Sweeps & 75 & 3.8 & 263.2 & 11.5 & 86.7 & 22.8 & 43.8 \\ 3 Frequency Sweeps $\ddagger$ & 81 & 4.2 & 239.2 & 12.6 & 79.5 & 24.9 & 40.1 \\ 4 Frequency Sweeps & 100 & 4.9 & 202.4 & 15.3 & 65.5 & 30.3 & 33.0 \\ 4 Frequency Sweeps $\ddagger$ & 106 & 5.3 & 188.3 & 16.3 & 61.3 & 32.4 & 30.8 \\ \hline \end{tabular} \begin{tablenotes} \item \footnotesize $\dagger$ SCLK = $\SI{60}{\mega\hertz}$ \item \footnotesize $\ddagger$ channel operation mode pre-programming included \end{tablenotes} \end{threeparttable} \end{table} Furthermore, we verified the correct activation of the frequency tuning words transferred via SPI, in order to validate the reliability of our custom single- and quad-wire solutions based on fast direct port manipulation. Fig.~\ref{fig:spectrum} shows the spectrum of a $\SI{10}{\mega\hertz}$ RF signal generated by the DUC, as measured using the Fast Fourier Transform mathematical operation mode of a Rigol MSO5072 oscilloscope with a resolution bandwidth of $\SI{50}{\hertz}$. We found the measured spectrum to be consistent with the typical performance characteristics reported in the AD9959 data sheet \cite{AD9959}. The simultaneous refresh of two DDS channels is instead shown in Fig.~\ref{fig:scope}, as captured with a Keysight Infinivision MSOX2024A oscilloscope: three subsequent update interrupt requests detected on the designated digital pin (magenta) trigger the SPI transfer of the data words sequentially stored in the FIFO buffers of the MCU. Following the SPI communication (yellow), an I/O update pulse (not shown) activates the new frequency values which are then generated by the respective DDS output channels after a fixed data latency of a few tens of SYSCLK periods. At the maximum SYSCLK rate of $\SI{500}{\mega\hertz}$, these respectively correspond to $\sim \SI{50}{\nano\second}$ and $\sim \SI{80}{\nano\second}$ for DDS channels operated in single-tone and frequency sweep mode. \begin{figure}[!ht] \centering \includegraphics[width=0.8\textwidth]{spettro.pdf} \caption{Typical RF spectrum of a single $\SI{10}{\mega\hertz}$ output of the DUC, measured using the Fast Fourier Transform mathematical operation mode of a Rigol MSO5072 oscilloscope.} \label{fig:spectrum} \end{figure} \begin{figure}[!ht] \centering \includegraphics[width=0.8\textwidth]{segnali.pdf} \caption{Two-channel signal trace captured with a Keysight Infinivision MSOX2024A oscilloscope, displaying two sequences of three single-tone sinusoids of increasing frequency, activated in response to subsequent rising edges on the designated interrupt pin (UPDATE IRQ, shown in magenta) in ``auto update" mode. The SCLK signal corresponding to the SPI communication between MCU and DUC is shown in yellow.} \label{fig:scope} \end{figure} \section{Conclusion} The programmable Arduino-based four-channel RF signal generator that we have developed is an open-source and low cost solution that uses readily available components, like the $\SI{200}{\mega\hertz}$ AD9959/PCBZ \cite{AD9959PCBZ} as the RF DDS source and the Teensy 4.1 \cite{Teensy41} as the microcontroller unit. The total system cost is currently 641.79 \euro, resulting in a notably low cost per channel of 160.45 \euro, which makes it very competitive with respect to both commercial \cite{ModularSystemControls,GraAndAfch,MoglabsQRF,SinaraUrukul,Kasprowicz2022,SpinCoreDDS300,SpinCoreDDS1000,Wieserlabs,WieserlabsDual,MoglabsXRF} and lab-built RF generators \cite{Liang2009,Li2016,Pruttivarasin2015,Perego2018,Prevedelli2019,Donnellan2019,Bertoldi2020,Allcock2021} that have similar specifications and applications. The internal software architecture has been designed to operate as a real-time state machine, allowing it to receive new commands and frequency settings from the PC via USB, store up to millions of them in its internal memory and to almost concurrently reprogram via SPI and activate the DDS outputs with sub-$\si{\micro\second}$ latency and jitter. We have validated the performance of our device, that surpasses all prior MCU-based solutions \cite{ModularSystemControls,GraAndAfch}, and demonstrated that it can generate single-tones or frequency sweeps in an externally or internally triggered arbitrarily programmed sequence. When using our custom quad-wire SPI implementation, that benefits from hardware optimizations, we have achieved high output change rates that depend only on the required amount of reconfiguration data, ranging from a minimum $ORR = \SI{188}{\kilo\hertz}$ for four pre-programmed frequency sweeps and a peak $ORR = \SI{1.33}{\mega\hertz}$ when changing only a single-tone output. These rates were achieved while adopting a SCLK of $\SI{60}{\mega\hertz}$ and may be more than doubled if serial port I/O operations are conducted at the maximum speed of $\SI{200}{\mega\hertz}$ supported by the DUC. These characteristics make our RF signal generator suitable for a broad range of applications in biophysics, microscopy, quantum, atomic and molecular physics and industrial manufacturing, such as driving acousto-optical devices or controlling the state of processes or matter sensitive to this frequency domain. By being open-source, state machine based and relying on standard interfaces for communication, the design of our RF generator is easily extensible and customizable for specific applications. Furthermore, the MCU architecture realizes a flexible and complete control system that is adaptable to many other devices. Likewise, the C++ software code presents a low skill barrier to be understood and modified using the Arduino IDE. Perspective improvements of our RF source would be the addition of amplitude and phase control which requires only additional software development, the enhancement of the SPI clock rate to further increase the $ORR$ and the augmentation of command memory by 16~MByte by soldering two extra RAM chips. The functionality of the Jupyter notebook can be expanded to provide an iterative MCU communication method to supply configuration sequences whose length surpasses the MCU memory, to receive status information from the MCU and to provide IRQ functionality via USB, lowering the amount of required TTL timing lines. Finally, it should be possible to design a more general purpose MCU software code, lacking RF generator-specific functionality, that could ease the adaptation to another DUC type interfaced via SPI. Such future improvements and bug corrections will be made available in our \href{https://github.com/lens-biophotonics/open-fast-buffered-4ch-rf-gen}{GitHub repository} \cite{Github}.\\ \noindent \textbf{CRediT author statement}\\ \noindent \textbf{Michele Sorelli}: Software, Investigation, Validation, Visualization, Writing - Original Draft, Writing- Reviewing and Editing. \textbf{Marco Marchetti}: Software, Investigation, Validation, Writing - Original Draft. \textbf{Pietro Ricci}: Investigation, Validation, Visualization. \textbf{Domenico Alfieri}: Conceptualization, Funding acquisition, Project administration, Writing- Reviewing and Editing. \textbf{Vladislav Gavryusev}: Conceptualization, Methodology, Supervision, Software, Investigation, Visualization, Writing - Original Draft, Writing- Reviewing and Editing. \textbf{Francesco Saverio Pavone}: Funding acquisition, Project administration, Writing- Reviewing and Editing.\\ \noindent \textbf{Declaration of Competing Interest}\\ \noindent The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.\\ \noindent \textbf{Acknowledgements}\\ \noindent The authors would like to acknowledge the help of Dr. Giuseppe Sancataldo with the development of the ATTRACT project \cite{Ricci2022}.\\ \noindent This project has received funding from the ATTRACT project funded by the EC under Grant Agreement 777222 and from the H2020 EXCELLENT SCIENCE - European Research Council (ERC) under grant agreement ID n. 692943 BrainBIT. This project has also received funding from the European Union’s Horizon 2020 Framework Programme for Research and Innovation under the Grant Agreement No. 871124 (Laserlab-Europe), and was supported by the EBRAINS research infrastructure, funded from the European Union’s Horizon 2020 Framework Programme for Research and Innovation under the Specific Grant Agreement No. 945539 (Human Brain Project SGA3). This research has also been supported by the Italian Ministry for Education, University, and Research in the framework of the Advance Lightsheet Microscopy Italian Mode of Euro-Bioimaging ERIC. Vladislav Gavryusev has been funded by a Marie Skłodowska-Curie Fellowship (MSCA-IF-EF-ST ``MesoBrainMicr" grant agreement No. 793849).\\ \bibliographystyle{plainurl} \noindent
1,108,101,565,861
arxiv
\section{Introduction} Computational materials and molecular modelling have proved to be an invaluable tool in predicting new materials and processes and interpreting experimental phenomena on the microscopic level. The predictive performance of atomistic simulations strongly depends on the accuracy of the employed atomic interaction model, of which those based on solving the Schr\"odinger equation are generally regarded as the most reliable. However, many problems of interest remain intractable, even when using approximate solutions of the quantum mechanical problem, such as \ac{DFT}, due to the high computational cost and its scaling with respect to system size. While interatomic potentials based on simple analytical forms open up access to significantly larger system sizes and longer simulation times, their parameterisation is often insufficiently accurate for predictive modelling. Surrogate models for quantum mechanical calculations, based on highly flexible functional forms provided by machine learning methods which are fitted using high-quality \textit{ab initio\xspace}{} reference data emerged in the last two decades~\cite{Behler.2007,Shapeev.2016,Thompson.2015,Drautz.2019,Artrith.2016,Bartok.2010,Schutt.2018}. These \acp{MLIP} reproduce the \textit{ab initio\xspace}{} potential energy surface to a high accuracy in a computationally efficient way, allowing access to large time and length scale simulations~\cite{Deringer.2020}. In this work, we focus on the fitting aspect of \acp{MLIP}, \emph{i.e.} the process that determines the model parameters based on a set of reference data points. Even though fitting is typically a one-off operation, and its computational cost leaves the cost of a subsequent simulation using the \ac{MLIP} largely or completely unaffected, it can use significant resources and can be a limiting factor in applying ever increasing data bases or exploring the space of model hyperparameters. Depending on the regression method, some \acp{MLIP} are based on solving a linear system to obtain the model weights. Here we present a set of application principles that can be used to distribute the workload among multiple processes when fitting such models, allowing efficient utilisation of massively parallel computer resources. We have implemented these in the \ac{GAP} framework and demonstrated excellent scaling in both memory and computational efficiency up to thousands of cores. We note that similar efforts have been made to parallelise the \code{fitSNAP} code used for fitting \ac{SNAP}~\cite{Thompson.2015} linear models~\cite{fitSnap}. Other \ac{MLIP} implementations which are based on kernel methods~\cite{Lilienfeld.2015} or linear regression, such as the \ac{LML}~\cite{Goryaeva.2019} and \ac{ACE}~\cite{Drautz.2019} approaches, would also benefit from similar developments. \section{Theory}\label{sec:theory} We provide a brief overview of the \ac{GAP} framework, and for more details we refer the reader to a more comprehensive review~\cite{Deringer.2021} of the method. \Ac{GAP} is a Bayesian regression method that aims to create surrogate models for the quantum mechanical interactions by using a reference database consisting of atomic configurations and associated microscopic properties obtained from \textit{ab initio\xspace}{} calculations. Strategies to create such databases are discussed elsewhere~\cite{Li2015,Vandermause2020}; here we start from a set of configurations, each consisting of Cartesian coordinates with the corresponding atomic species information, and for \textit{ab initio\xspace}{} data we use total energies, forces and virial stress components. As Cartesian coordinates do not transform in an invariant fashion when applying energy-conserving symmetry operations to the atomic positions, such as rotations, translations, inversion and permutation of the indices of identical atomic species, it is beneficial to first transform the Cartesian coordinates to descriptors, which form the input vectors $\mathbf{x}_i$ of length $d$ to the regression method. In general, \ac{GAP} approximates the total energy of a configuration $A$ in the form of a sparse \ac{GP}~\cite{QuinoneroCandela:2005wp,Snelson:2006vi} \begin{equation} E_A = \sum_{i \in A} \sum_j^M c_j k(\mathbf{x}_i,\mathbf{x}_j) \end{equation} where the first sum includes all descriptor vectors in configuration $A$, and the second sum is over a set of representative descriptor vectors, or sparse points $M$. The kernel function $k(\mathbf{x}_i,\mathbf{x}_j)$ evaluates the similarity of descriptors $\mathbf{x}_i$ and $\mathbf{x}_j$, and $c_j$ are the regression weights that need to be fitted such that predicted properties match the \textit{ab initio\xspace}{} values as closely as possible. Forces and virial stress components can be obtained by differentiating this expression with respect to atomic coordinates or lattice deformations, which is a trivial, but rather tedious operation and we omit it from here for brevity. Denoting the $N$ \textit{ab initio\xspace}{} reference properties by $\mathbf{y}$ and predicted properties by $\tilde{\mathbf{y}}$, we formulate the regression problem as minimising the loss function \begin{equation} \mathcal{L} = (\mathbf{y} - \tilde{\mathbf{y}})^T \boldsymbol{\Sigma}^{-1} (\mathbf{y} - \tilde{\mathbf{y}}) + \mathbf{\kc}^T \mathbf{K}_{MM} \mathbf{\kc} \label{eq:loss} \end{equation} with respect to the weights $\mathbf{\kc}$. The matrix $\boldsymbol{\Sigma}$ is diagonal and its elements are inversely related to the importance of each data point. While the first term is responsible for achieving a close fit to the data points, the second term is controlling overfitting via a Tikhonov regularising expression, which forces the elements of $\mathbf{\kc}$ to remain small. The elements of the matrix $\mathbf{K}_{MM}$ are built from the kernel function values $K_{ij} = k (\mathbf{x}_i, \mathbf{x}_j)$ between the sparse point set $\{\mathbf{x}_i\}_{i=1}^M$ where we typically use $M \ll N$. The minimum of the loss function in \cref{eq:loss} can be determined analytically, and the result is \begin{equation} \mathbf{c} = (\mathbf{K}_{MM} + \mathbf{K}_{MN} \boldsymbol{\Sigma}^{-1} \mathbf{K}_{NM})^{-1} \mathbf{K}_{MN} \boldsymbol{\Sigma}^{-1} \mathbf{y} \label{eq:GP_sparse} \end{equation} where the elements of $\mathbf{K}_{MN}$ are given by \begin{equation} K_{ij} = \sum_{\alpha \in j} k(\mathbf{x}_i,\mathbf{x}_\alpha) \end{equation} where $\mathbf{x}_i$ is a descriptor vector from the sparse set, and $j$ denotes a target total energy and the sum includes all descriptors that contribute to $y_j$. For convenience, we use the notation $\mathbf{K}_{MN}^T \equiv \mathbf{K}_{NM}$. Elements of $\mathbf{K}_{MN}$ corresponding to derivative observations are calculated similarly, using the appropriate gradients of the kernel function $k$, for which further details may be found in the review article by Deringer \textit{et al\xspace}{}~\cite{Deringer.2021}. The complexity of solving \cref{eq:GP_sparse} scales with $\mathcal{O}(M^2 N)$, which is significantly more favourable than the $\mathcal{O}(N^3)$ scaling of a full \ac{GP} implementation. However, Foster \textit{et al\xspace}{} have shown~\cite{Foster:2009wy} that the solution may lead to numerically unstable results at large data sets, i.e. uncertainties in the input lead to disproportionate errors in the output. Following their suggestions, we first define the $(N+M)\times M$ matrix \begin{equation} \mathbf{A} = \begin{bmatrix} \boldsymbol{\Sigma}^{-\sfrac{1}{2}}\mathbf{K}_{NM} \\ \mathbf{L}_{MM}^T \end{bmatrix} \end{equation} where the lower triangular matrix $\mathbf{L}_{MM}$ is the result of the Cholesky decomposition of $\mathbf{K}_{MM}$ such that $\mathbf{K}_{MM} = \mathbf{L}_{MM}\mathbf{L}_{MM}^T$. Introducing $\mathbf{b}$ by padding the vector of target properties $\mathbf{y}$ by an $M$-long vector of zeros \begin{equation} \mathbf{b} = \begin{bmatrix} \mathbf{y} \\ \mathbf{0} \end{bmatrix} \end{equation} we rewrite \cref{eq:GP_sparse} as the solution of the least-squares problem \begin{equation} \min_\mathbf{c} (\mathbf{A}\mathbf{\kc} - \mathbf{b})^T (\mathbf{A}\mathbf{\kc} - \mathbf{b}) \end{equation} that leads to the solution in the form of \begin{equation} \mathbf{\kc} = (\mathbf{A}^T \mathbf{A})^{-1} \mathbf{A}^T \mathbf{b} \textrm{.} \label{eq:sparseAsolution} \end{equation} A numerically stable solution can be obtained by first carrying out a QR factorisation of $\mathbf{A} = \mathbf{Q} \mathbf{R}$ where $\mathbf{Q}$ is orthogonal, namely, it is formed by \emph{orthonormal} column vectors: \begin{equation} \mathbf{Q}^T \mathbf{Q} = \mathbf{I} \textrm{,} \end{equation} while $\mathbf{R}$ is an upper triangular matrix. Substituting the factorised form of $\mathbf{A}$ into \cref{eq:sparseAsolution} results in \begin{equation} \mathbf{\kc} = (\mathbf{R}^T \mathbf{Q}^T \mathbf{Q} \mathbf{R})^{-1} \mathbf{R}^T \mathbf{Q}^T \mathbf{b} = \mathbf{R}^{-1} \mathbf{Q}^T \mathbf{b} \textrm{.} \end{equation} The computational complexity of creating $\mathbf{A}$ is determined by the cost of creating its two constituent blocks. The calculation of the upper block scales as $\mathcal{O}(MN)$, due to $\boldsymbol{\Sigma}$ being diagonal, while the Cholesky factorisation resulting in the lower block scales as $\mathcal{O}(M^3)$, resulting in an overall scaling $\mathcal{O}(MN)$, as $N \gg M$. The QR factorisation of $\mathbf{A}$ requires $\mathcal{O}(M^2N)$ floating point operations, hence dominating the overall cost of evaluating \cref{eq:sparseAsolution}. We note that multiplying by $\mathbf{R}^{-1}$ can be implemented as a series of back substitution operations, due to the upper triangular matrix form of $\mathbf{R}$. \section{Implementation} The workflow of obtaining the sparse or representative points and associated vector of weights $\mathbf{\kc}$ from a set of reference \textit{ab initio\xspace}{} configurations is implemented in the \texttt{gap\_fit}\xspace{}{} program, and distributed as part of the software package \Ac{QUIP}, which is a Fortran package implementing atomistic simulation tools, including low-level functions to manipulate atomic configurations, a selection of interatomic potentials, tight-binding models and the \ac{GAP} framework. The source code is publicly available on Github~\cite{QUIPGAPGithub}. \subsection{Program structure} \begin{figure} \includegraphics[width=0.7\columnwidth]{gapfit_schema.pdf} \centering \caption{Schema of \texttt{gap\_fit}\xspace{}{} using serial/thread-parallel (black arrows) and data-parallel (blue arrows) execution code paths.} \label{fig:structure} \end{figure} The \texttt{gap\_fit}\xspace{}{} program is controlled via a set of command line arguments consisting of key-value pairs, which can also be passed as a configuration file. The program also requires a set of reference configurations in the extended XYZ format~\cite{XYZ}, containing any combination of total energies, forces and virial stresses, and optionally, the definition of a baseline potential model. The major steps of the fitting process are outlined in \cref{fig:structure}. After initialisation and reading of the command line arguments, the training structures are parsed for the number of target properties: total energies, forces and virial stress components, to determine the value of $N$. Based on the descriptors, the amount of storage space needed for the descriptor arrays $\mathbf{x}$ and their derivatives $\mathbf{x}'$ with respect to Cartesian coordinates are calculated and then allocated. From the descriptor vectors, $M$ are chosen as a representative (sparse) set. The procedure for choosing can be controlled by command line arguments, including selecting a random subset, clustering and CUR-based approaches~\cite{Mahoney.2009}. It is also possible to provide the chosen representative points via files, an option we make use of for the parallel version (see \cref{sec:parallel}). After setting the sparse points, the covariance matrices $\mathbf{K}_{MN}$ and $\mathbf{K}_{MM}$ are calculated. From these, matrix $\mathbf{A}$ is constructed and \cref{eq:sparseAsolution} is solved via QR decomposition using linear algebra routines, using \Ac{LAPACK} for single node applications. The intermediate processing, such as the computation of the elements of covariance matrices had already been augmented by \Ac{OpenMP} directives along the target data dimension $N$, which lead to a thread-based parallelisation on a single process. This, however, restricts the program to the memory and processing resources of a single node, and performance is further limited by the fact that the speed a computational core can access an arbitrary memory region is inhomogeneous due to the physical layout of the memory. That results in a decrease of efficiency when additional threads are employed, leading to a degradation of performance which prevents full utilisation of all available cores in a node. We present the parallel scalability of a test problem in \cref{fig:openmp_scaling}, where we varied the size of contiguous subsets of \ac{OpenMP} loops, referred to as \emph{chunks}. \begin{figure} \centering \includegraphics[width=\columnwidth]{img/chunksizes.png} \caption{Scaling of computing time of a non-MPI \texttt{gap\_fit}\xspace{}{} calculation with the number of \ac{OpenMP} threads for different chunk sizes. The reference time is \SI{653}{s}.} \label{fig:openmp_scaling} \end{figure} As an example of the limitations imposed by the OpenMP implementation of \texttt{gap\_fit}, the practical problem of fitting a \ac{GAP} for carbon~\cite{Rowe.2020} --- one of the largest single-element training datasets assembled to date --- took more than 6 days on a single node and required more than \SI{1}{TB} memory to accommodate the design and covariance matrices~\cite{gc:private}. This restricted the ability of practitioners to build complete training sets or to experiment with choices of hyperparameters. \subsection{Multi-process parallelisation} \label{sec:parallel} To go beyond the limitations posed by the shared memory requirement, poor parallel performance, and specialist hardware, we propose a multi-process framework with data distribution and inter-node communication. We have established in \cref{sec:theory} that both the memory requirement and the computational effort scale linearly with the number of target properties $N$, therefore it is convenient to distribute memory and tasks along this dimension of the problem. The two most memory intensive operations are the calculation of the descriptor vectors together with their gradients, and the covariance matrices. The ratio of these depends strongly on the particulars of the fitting problem, in particular the dimensionality $d$ of descriptor vectors and the number of sparse points $M$. In our parallel scheme, we distribute atomic configurations across independent processes, such that the number of target properties are as even as possible. We note that the size of individual atomic configurations may be highly inhomogeneous, therefore the number of forces per configuration can vary substantially across the database, necessitating an extra step that determines the optimal spread of data. We have employed a greedy algorithm that first collects configurations in a list and sorts them by descending number of target properties. We then assign the largest (by target property) unassigned configuration to the process which currently has the least total number of target properties. This process repeats until the list is exhausted. With the configurations allotted to \ac{MPI} processes, the descriptor calculations may proceed locally, and once completed, individual portions of $\mathbf{K}_{MN}$, denoted as $\mathbf{K}_{Mn}$ can be evaluated. For this, local copies of the sparse set of $M$ descriptor values need to be present locally, the particulars of which we discuss later in \cref{sec:sparse}. The na\"ive solution of the linear system represented by \cref{eq:GP_sparse} may be adapted trivially to a distributed $\mathbf{K}_{MN}$: the terms $\mathbf{K}_{MN} \boldsymbol{\Sigma}^{-1} \mathbf{K}_{NM}$ and $\mathbf{K}_{MN} \boldsymbol{\Sigma}^{-1} \mathbf{y}$ can be calculated locally and reduced across processes as \begin{equation} \mathbf{K}_{MN} \boldsymbol{\Sigma}^{-1} \mathbf{K}_{NM} = \sum_{n \in N} \mathbf{K}_{Mn} \boldsymbol{\Sigma}_n^{-1} \mathbf{K}_{nM} \end{equation} and \begin{equation} \mathbf{K}_{MN} \boldsymbol{\Sigma}^{-1} \mathbf{y} = \sum_{n \in N} \mathbf{K}_{Mn} \boldsymbol{\Sigma}_n^{-1} \mathbf{y}_n \end{equation} where we denote distributed blocks of $\boldsymbol{\Sigma}$ and $\mathbf{y}$ by $\boldsymbol{\Sigma}_n$ and $\mathbf{y}_n$, respectively. The rest of the calculation only involves matrices up the size of $M\times M$. However, the direct solution, as described in \cref{sec:theory} is numerically unstable, therefore we need to adapt the solution based on the QR-factorisation. The \Ac{ScaLAPACK} library provides some of the linear algebra features of the \Ac{LAPACK} library for distributed matrices, most commonly leveraging the \Ac{MPI} framework for communication between nodes, which is widely available on computing clusters. We chose to leverage the \ac{ScaLAPACK} implementation, therefore we need to take \ac{ScaLAPACK}'s data distributing principles in consideration. The procedure names are the same as for \Ac{LAPACK} but with a prefix \code{p}, e.g. the QR-factorisation subroutine is \code{pdgeqrf} instead of \code{dgeqrf}. For the rest of our discussion, we will use the prefixed names. \Ac{ScaLAPACK} asserts that matrices are block-cyclicly distributed on a 2D processor grid. This is a generalisation of cyclic and block distribution, both of which can be used as special cases. Considering a matrix $\mathbf{A}_{R \times C}$ with $R$ rows and $C$ columns, we can cut it into blocks $\mathbf{a}_{r \times c}$. The last blocks in each row or column may have fewer columns or rows. The blocks are then distributed in a round-robin fashion amongst the processors in the $p \times q$ processors grid, wrapping around the grid until all blocks have been assigned. For our use-case we start by considering a block distribution along the rows for a processor grid of $p \times 1$. This entails a row block size equal to the local number of rows for each process ($\mathbf{a}_{r \times C}$ and $\mathbf{b}_{r \times 1}$ with $r = \lceil R / p \rceil$). We fill these blocks by assigning each structure (i.e. several rows of $\mathbf{a}$) to a single process, thereby each atomic configuration is local on exactly one process. The solution to $\mathbf{A} \mathbf{\kc} = \mathbf{b}$ is invariant to swapping rows of $\mathbf{A}$ as long as the corresponding entries in $\mathbf{b}$ are swapped accordingly. This allows us to choose the row block size freely while arriving at the same result irrespective of the assignment of atomic configurations to processes. The column block size is unrestricted, since each row is fully assigned to a single process. Our greedy algorithm, as described above and presented in \cref{fig:distributeA}, distributes atomic configurations and rows of $\mathbf{L}_{MM}^T$ such that each local $\mathbf{A}_n$ block is as equal in size as possible. \Ac{ScaLAPACK} requires that all processes use a uniform block factor for all their blocks. To fill the gaps left by the distribution a padding of zero rows (i.e. rows filled with zeroes) is inserted into both $\mathbf{A}$ and $\mathbf{b}$. The distribution strategy and the block size settings of \ac{ScaLAPACK} should ensure that the number of padding rows are kept to a minimum to prevent afflicting memory and efficiency penalties. \begin{figure} \includegraphics[width=\columnwidth]{matrix_plain.svg.png} \centering \caption{Serial (left) and distributed (right) solution of $\mathbf{A} \mathbf{\kc} = \mathbf{b}$. The input training data is distributed across the \Ac{MPI} processes P1, P2, P3 to balance load (but the original order of rows is preserved on each). The $\mathbf{L}_{MM}^T$ matrix (yellow) in the serial implementation and its rows are distributed in the parallel implementation. Each local $\mathbf{A}_i$ is filled with zero rows (white) to adjust to uniform matrix size, which is a multiple of the block size.} \label{fig:distributeA} \end{figure} Solving the linear system via QR decomposition with \Ac{ScaLAPACK} is split into three steps. First, $\mathbf{A}$ is converted using \code{pdgeqrf} into the upper triangular matrix $\mathbf{R}$ and the Householder reflectors $\mathbf{H}$, which occupy the remaining lower triangular elements of $\mathbf{A}$. The latter is accompanied by an array $\mathbf{\tau}$ of weights. Reflectors and weights are then used by \code{pdormqr} to perform the multiplication $\mathbf{Q}^T \mathbf{b}$. Finally, the linear system represented by the upper triangle matrix $\mathbf{R}$ is solved by \code{pdtrtrs}, utilising the backsubstitution algorithm, to give $\mathbf{\kc} = \mathbf{R}^{-1} \mathbf{Q}^T \mathbf{b}$. We note that there is a requirement in \code{pdtrtrs} that the row and column block sizes must be equal. Setting the column block size ($c$) to the generally much larger row block size ($r$) is formally possible, but this drastically increases the size of the working arrays the \ac{ScaLAPACK} routines require, which scale with the square of the column block size ($\propto c^2 + r c$). Setting instead the row block size ($r$) to the column block size ($c$) implies adding additional zero rows for padding the local matrices to maintain the divisibility by the block factor and thus the assignment of the configurations to the processes. Both of these approaches result in increased memory requirements and a deterioration of computational efficiency. However, it is possible to exploit the fact that our distribution strategy relies on a single processor column ($q = 1$), changing the column block size does not affect the distribution of the data. We can therefore use one column block size for the first two calls (\code{pdgeqrf}, \code{pdormqr}) and then change that value for the third call (\code{pdtrtrs}) to fulfill its requirement without decreasing the efficiency of the former calls. Being able to control both block sizes independently revealed that a moderate column block size of about 100 is optimal for both memory usage and efficiency. For such a setting, the row block size does not have a significant impact on parallel efficiency. \subsection{Sparse point selection}\label{sec:sparse} In \texttt{gap\_fit}\xspace{}{}, the set of $M$ sparse points are typically determined as a subset of all descriptor values, although for low-dimensional descriptors such as bond length it is convenient to use a uniform grid. Depending on the method, the selection of sparse points may depend on the values of descriptor vectors calculated from the entire training data set. If the descriptors are distributed, clustering or CUR-based methods require fine-tuned communication between the processes and for simplicity, we suggest a two-step workflow. Since the calculation of descriptor vectors -- without their gradients -- is not computationally expensive and memory storage is not a concern, sparse point selection can be performed using serial execution. We first run \texttt{gap\_fit}\xspace{}{} on a single process, optionally using \ac{OpenMP} threading, to select sparse points which are written into files, and terminating the run, which can be achieved by the command line argument \code{sparsify\_only\_no\_fit=T}. The output is converted to input via a helper script for the subsequent run using \Ac{MPI} processes. This step can be skipped if the sparse points file has been provided by external means or can be reused from an earlier calculation. \subsection{Peak memory usage} One of the pitfalls of running a formerly serial program in parallel with distributed data is that duplicate data may accumulate unnecessarily, especially if multiple processes are run on the same node, and therefore shared memory can be utilised. For example, it is convenient to calculate the matrix $\mathbf{K}_{MM}$ on each process because it only depends on the sparse points, and its size does not depend on the training set. However, each process requires only a small part of the resulting matrix $\mathbf{L}_{MM}$, and storing multiple copies of $\mathbf{K}_{MM}$ adds an unnecessary constant overhead to the memory requirements. To prevent the allocation of possibly several GB memory per process, $\mathbf{K}_{MM}$ is only calculated on a single process, then converted to $\mathbf{L}_{MM}$, and only the necessary rows are distributed via \code{mpi\_scatterv} calls to independent \ac{MPI} processes. It is also important to avoid inadvertent duplication of data when converting between data structures used by different parts of the program. This can be alleviated by performing calculations directly on the data memory as \Ac{LAPACK} does. For user-defined types we use pointers to the original matrices to prevent copying of data. Further, source data of relevant size is deallocated after final usage. This decreases the memory overhead per \Ac{MPI} process and therefore also the peak memory requirement of the program. \Cref{fig:memory_schema} shows schematically how the memory usage of \texttt{gap\_fit}\xspace{}{} run changes over time. For our program there are two parts of the execution which may lead to peak memory usage. The main one is after allocating the descriptors, especially the derivatives $\mathbf{x}'$. After the covariance calculation of each descriptor, we deallocate the corresponding source data. This is reflected by the step-wise decline of the memory. The other peak manifests towards the end of a program run when the matrices $\mathbf{K}_{MM}$ and then $\mathbf{A}$ are assembled and the linear system is subsequently solved. \Ac{ScaLAPACK} requires additional work arrays for some of its routines depending on the block sizes of the distributed matrices, especially the column block size. \begin{figure} \centering \includegraphics[width=0.9\columnwidth]{memory_schema.pdf} \caption{Schematic memory usage during \texttt{gap\_fit}\xspace{}{} run over time. Descriptors (5 shown) $\mathbf{x}$ and their derivatives $\mathbf{x}'$ constitute the majority of the first peak. The memory associated with them is released after each processing, leading to a step-wise decline. Matrices $\mathbf{K}_{MM}$ and $\mathbf{A}$ and working arrays for solving the latter make up the second peak, which can be more shallow than depicted here.} \label{fig:memory_schema} \end{figure} \section{Practical Examples} \begin{figure*} \centering \begin{subfigure}[b]{0.48\textwidth}% \centering% \includegraphics[width=\textwidth]{img/hea4k_itime_mem.png}% \label{fig:hea4k_itime_mem}% \end{subfigure} \hfill \begin{subfigure}[b]{0.48\textwidth}% \centering% \includegraphics[width=\textwidth]{img/sic10k_itime_mem.png}% \label{fig:sic10k_itime_mem}% \end{subfigure} \caption{Adjusted speedup (reference time / current time) and total memory requirements (inset) of \texttt{gap\_fit}\xspace{}{} vs cores (72 per node) with different splits between \Ac{MPI} tasks per node vs \Ac{OpenMP} threads per task. Fitting times are shown for the HEA model (left, \num{396178} target properties) and the SiC model (right, \num{2482085} target properties), with reference times \SI{24449}{s} and \SI{31626}{s}, respectively. Both models used \num{20300} representative (sparse) points.} \label{fig:examples} \end{figure*} Initial proof-of-principle fitting runs of a silicon dataset~\cite{Bartok.2018} showed that the fitting weights from an \Ac{MPI} run are comparable to those from serial runs when using the same representative set. The difference can be attributed to numerical uncertainties noting that even two runs initiated with identical parameters may differ to the same magnitude. The difference can be attributed to the fact that the order of numerical operations is non-deterministic and the floating point arithmetic is neither associative nor distributive, leading to small differences in covariance matrices in different executions. Ill-conditioning of matrix $\mathbf{A}$ amplifies the noise due to the different order of operations, leading to only a few comparable significant digits in the resulting weights. We have therefore tested the accuracy of the predictions with the resulting potential models using the $\Delta$ metric suggested to compare \ac{DFT} packages~\cite{Lejaeghere.2016}. We found that equivalent \ac{GAP} models differ only up to \SI{1}{\mu eV}, indicating excellent reproducibility despite the differences in the weights. We then applied the implementation with varying proportions of \Ac{MPI} processes and \Ac{OpenMP} threads on two example training sets, consisting of an \Ac{HEA} and silicon carbide (SiC) datasets. These calculations were performed on the Raven cluster of the Max Planck Computing and Data Facility. Each node has 72 cores and either \SI{256}{GB} or \SI{512}{GB} of RAM. For single-node calculations we used high-memory nodes with \SI{2048}{GB} of RAM. In both cases we combined two-body descriptors with \ac{SOAP} descriptors~\cite{Bartok.2013}. We assign separate two-body descriptors for each pair of species, and separate \ac{SOAP} descriptors for each species. \Cref{fig:examples} depicts the inverse time relation with respect to the number of cores for different \Ac{MPI} to \Ac{OpenMP} ratios (T:C), e.g. ``36:2'' means that 36 \Ac{MPI} processes were used per node (with 72 cores), each with two \Ac{OpenMP} threads. This resembles Amdahl's law \begin{equation} S(n) = t(1) / t(n) = 1 / [(1 - p) + p / n], \end{equation} where the speedup $S$ describes the relative time for a serial run $t(1)$ versus a parallelised one $t(n)$ depends on the number of cores $n$ and the relative time $p$ spent in portions that benefit from multiple cores. Our training systems were too large to be run on a single core within the maximum wall-time limit, so we used the highest time available instead for our adjusted speedup ($S^*$). Because of this, the values are only comparable within the same training set. Note that these timings may contain some random noise due to other calculations on the cluster (albeit not on the same nodes) and generally unpredictable behavior of interconnect, network, hardware or the operating system in general. The insets show the total memory required for these calculations across all nodes, estimated from the average resident set size (AveRSS) as queried from the queueing system. \subsection{High-Entropy alloy} The high-entropy MoNbTaVW alloy (\Ac{HEA}) training set~\cite{Byggmastar:2021} consists of 2329 configurations -- each containing an uneven number of atoms -- with 2329 total energies, $\num{383739}$ forces, and $\num{10110}$ virials for a total of $N = \num{396178}$ target properties. With 20 sparse points per two-body descriptor (15) and 4000 per SOAP descriptor (5) the total number of sparse points is $M = \num{20300}$. Thus, $\mathbf{A}$ consists of $n_\mathbf{A} = \num{8454503400}$ elements and occupies about \SI{67.6}{GB}. Looking at \cref{fig:examples}, using only a few nodes, it is advantageous to use as many cores for \Ac{MPI} as possible to reduce the total runtime. For 72:1 ratio this trend changes somewhere between 864 and 1152 cores where the time stays constant with increasing cores. The same happens for 36:2 ratio between 1728 and 2304 cores, and for 24:3 between 2304 and 3456 cores. This behaviour stems from the choice of our implementation, which splits the training set along the structures, which cannot be done arbitrarily for a finite set. For these 2329 configurations, the limit is at about two structures per \Ac{MPI} process. The reference time for this set is \SI{24449}{s} (6.8 hours), obtained from the single (high-memory) node calculation with 18:4 split of cores. The 72:1 run is about twice as fast (1.85) for a single node. Using six nodes (432 cores) adds another factor of 4.27 for a total of 7.91. From there the benefits dwindle from factor 1.16 (10.23) despite doubling (twelve nodes) to the constant speedup around 16 nodes of ca. 12.6. The 36:2 configuration starts slower at 1.57 for one node, achieves almost the same speedup as 72:1 for eight nodes (8.38) and is even faster for twelve (11.57 vs 10.23). It achieves a speedup of 18.59 at its best but decreases to 17.53 for 64 nodes. The same happens for 24:3, from a maximum of 20.55 down to 19.72. The 18:4 configuration achieves 20.88 at 64 nodes. This efficiency comes at the cost of a memory overhead, which increases approximately linearly with the number of cores. The higher the portion of \Ac{MPI} usage, the steeper this overhead is. In fact, the graphs coincide if they are plotted against the number of \Ac{MPI} processes (not shown): in that case the slope ranges between \num{1.4} and \SI{1.8}{GB} per \Ac{MPI} process. For 72:1 this results in \SI{2.60}{TB} on 16 nodes and \SI{9.42}{TB} on 64. The former is comparable to the \SI{2.66}{GB} 18:4 uses on 64 nodes, since both apply 1152 \Ac{MPI} processes. \subsection{Silicon carbide} The 4865 silicon carbide (SiC) systems of this training set contain 4865 energies, $\num{2448030}$ forces, $\num{29190}$ virials for a total of $N = \num{2482085}$ target properties. With 100 sparse points per two-body descriptor (3) and $\num{10000}$ per SOAP descriptor (2) the total number of sparse points is $M = \num{20300}$. Thus, $\mathbf{A}$ consist of $n_\mathbf{A} = \num{50798415500}$ elements and occupies \SI{406.4}{GB}. Due to the much larger training set, not all node configurations from the \Ac{HEA} set were viable, especially for lower node numbers. The reference time is \SI{31626}{s} for a 24:3 run on a single node. The trend that more \Ac{MPI} processes are more efficient holds up here as well but the processes are not saturated as rapidly as in the case of the \Ac{HEA} system. This effect may start between 3456 and 4608 cores for the full \Ac{MPI} run (72:1), which is later than in the \Ac{HEA} set even taking the structure numbers into account because of the proportionally lower number of small structures in this larger set of only two species. The memory overhead per \Ac{MPI} process is between \num{2.1} and \SI{2.2}{GB}. \section{Conclusion and Outlook} The recent addition of \Ac{MPI} parallelisation to our program \texttt{gap\_fit}\xspace{}{} by using the \Ac{ScaLAPACK} library makes it possible to split the training data evenly into batches to be processed independently up to the solving of the linear system. It alleviates the need for high-memory nodes so commodity HPC nodes may be used for arbitrarily large fitting problems; while computation time has been significantly reduced the due to the pleasingly parallel algorithm. Thus larger training sets do not impede the computation and more sparse points can be used, increasing the accuracy of the model. We showed the time scaling and memory requirements for varying proportions of \Ac{MPI} processes vs \Ac{OpenMP} threads in two example training sets, consisting of an high-entropy alloy (\Ac{HEA}) and silicon carbide (SiC). It is generally advisable to use most of the processors for \Ac{MPI} in terms of computational efficiency, so even on a single node benefits from this new feature. It is especially effective for larger training sets while the sparse points are covered by the \Ac{OpenMP} threads. One should keep the total number of \Ac{MPI} processes below some fraction of the total number of structures, e.g. 0.5 so that an even distribution is still possible. The memory overhead due to the parallelisation has been reduced but is still significant. Depending on the available memory resources, a higher share of \Ac{OpenMP} threads is preferable. We are confident that an even smaller memory footprint will be achieved in a further development. The highest impact on both \Ac{MPI} and non-\Ac{MPI} memory requirements would be to fully restructure the descriptor processing loop so that each descriptor is processed fully before the next one. In practical tests we have seen that the parallel \code{gap\_fit} code can decrease the time required to fit potentials from days to minutes. We anticipate this will be an important step to enable potential fitting to be embedded within other higher-level workflows such as active learning~\cite{Li2015,Vandermause2020}, committee models~\cite{Imbalzano2021} as well as enabling model hyperparameters to be tuned or optimised, known to be important for improved uncertainty quantification~\cite{Vandermause2020}. \begin{acknowledgments} We thank Harry Tunstall for providing the SiC dataset and early testing as well as G\'abor Cs\'anyi and Miguel Caro for useful discussions. This work was financially supported by the NOMAD Centre of Excellence (European Commission grant agreement ID 951786) and the Leverhulme Trust Research Project Grant (RPG-2017-191). ABP acknowledges support from the CASTEP-USER project, funded by the Engineering and Physical Sciences Research Council under the grant agreement EP/W030438/1. We acknowledge computational resources provided by the Max Planck Computing and Data Facility provided through the NOMAD CoE, the Scientific Computing Research Technology Platform of the University of Warwick, the EPSRC-funded HPC Midlands+ consortium (EP/T022108/1) and ARCHER2 (https://www.archer2.ac.uk/) via the UK Car-Parinello consortium (EP/P022065/1). We thank the technical staff at each of these HPC centres for their support. For the purpose of Open Access, the author has applied a CC-BY public copyright licence to any Author Accepted Manuscript (AAM) version arising from this submission. \end{acknowledgments} \bibliographystyle{apsrev4-2} \section{Dump} \subsection{Text} The memory requirement for a program run depends on the structures, target properties, sparse points, and GAP definitions. A single node usually does not have more than one terabyte of RAM, often much less than that. This restricted the broad application of QUIP/GAP to smaller models, training sets, and/or special high memory nodes. To alleviate this restriction, we updated Gap Fit so it can be run with MPI in a SPMD\footnote{single program, multiple data} paradigm using the ScaLAPACK library for linear algebra computation on distributed data. Our goal is to provide our fitting capabilities inside the a database GUI. Users will select a batch of data to fit their GAP on, get the result within a few minutes (or faster) and be then able to refine it by adding additional data. This will require a more modular fitting approach. \subsection{Structures} \begin{align} \mathbf{A} \mathbf{\kc} = \mathbf{b} \\ \mathbf{K}_{MM} = \mathbf{L}_{MM}\mathbf{L}_{MM}^T \\ \mathbf{Q}^T \mathbf{Q} = \mathbf{I} \end{align} The basic structure of the program is: \begin{itemize} \setlength\itemsep{0em} \item Initialise \item Read input \item Determine sizes \item Process input \item Sparsify \item Covariate \item Solve \item Finalise \end{itemize} Parallelisation: \begin{itemize} \setlength\itemsep{0em} \item Map configurations to tasks \item Distribute tasks among processes \item Read data locally for distribution \item Factorise and solve $\mathbf{A}$ with ScaLAPACK \end{itemize} ScaLAPACK QR routines: \begin{arglist} \setlength\itemsep{0em} \item [pdgeqrf] splits $\mathbf{A}$ into $\mathbf{Q}$ (technically $\mathbf{H}$) and $\mathbf{R}$ \item [pdormqr] multiplies $\mathbf{Q}^\top$ with $\mathbf{b}$ \item [pdtrtrs] solves $\mathbf{R}$ and $\mathbf{Q}^\top\mathbf{b}$ for $\mathbf{\kc}$ \end{arglist} \subsection{QR sizes} \begin{align} \underset{m \times n}{A} \underset{n \times 1}{x} &= \underset{m \times 1}{b} \\ \underset{m \times m}{Q} \underset{m \times n}{R} \underset{n \times 1}{x} &= \underset{m \times 1}{b} \end{align} If $m \ge n$ \begin{align} \begin{bmatrix}\underset{m \times n}{Q_1} & \underset{m \times m-n}{Q_2}\end{bmatrix} \begin{bmatrix}\underset{n \times n}{R_1} \\ \underset{m-n \times n}{R_2}\end{bmatrix} \underset{n \times 1}{x} &= \underset{m \times 1}{b} \\ \underset{m \times n}{Q_1} \underset{n \times n}{R_1} \underset{n \times 1}{x} &= \underset{m \times 1}{b} \\ \underset{n \times n}{R_1} \underset{n \times 1}{x} = \underset{n \times m}{Q_1^T} \underset{m \times 1}{b} &= \underset{n \times 1}{c} \end{align} If $m < n$ \begin{align} \underset{m \times m}{Q} \begin{bmatrix}\underset{m \times m}{R_1} && \underset{m \times n-m}{R_2}\end{bmatrix} \underset{n \times 1}{x} &= \underset{m \times 1}{b} \\ \underset{m \times m}{Q} \underset{m \times n}{R} \underset{n \times 1}{x} &= \underset{m \times 1}{b} \\ \underset{m \times n}{R} \underset{n \times 1}{x} = \underset{m \times m}{Q^T} \underset{m \times 1}{b} &= \underset{m \times 1}{c} \end{align} \begin{align} \underset{\downarrow \times n}{\tilde R} \underset{n \times 1}{x} = \underset{\downarrow \times m}{\tilde Q^T} \underset{m \times 1}{b} &= \underset{\downarrow \times 1}{\tilde c} \end{align}
1,108,101,565,862
arxiv
\section{Introduction} \label{sec:intro} \subsection{The Radius Gap} The \textit{Kepler} mission, the first dedicated space-based search for exoplanets, revolutionized our understanding of planet formation by detecting hundreds of super-Earth and sub-Neptune-sized planets \citep[e.g.,][]{batalha2014,thompson2017}. The \textit{Kepler} observations indicate that, for orbital periods $\lesssim$400 days, small planets (1-4~R$_\oplus$\xspace) are much more frequent in the Galaxy than larger, Saturn and Jupiter-sized planets \citep{howard2012,dressing&charbonneau2013,fressin2013,petigura2013,ciardi2015,burke2015}, which were previously the most commonly detected planets \citep[e.g.,][]{marcy2005,udry2007,wright2012}. Furthermore, \cite{fulton2017} (hereafter F17) recently showed that by incorporating more precise, uniformly-derived stellar parameters (and thus stellar radii estimates) than the original \textit{Kepler} Input Catalog (KIC) values, a bimodality in both the observed and intrinsic distributions of small planets, previously hidden by larger planet radii uncertainties, is exposed. The authors detect a gap in the radius distribution between 1.5 and 2~R$_\oplus$\xspace, and determine that planets above and below the gap have nearly equal completeness-corrected occurrence rates but those within the gap have an occurrence rate decreased by $\gtrsim 50$\%. The location of this gap in both the observed and intrinsic planet radius distributions is noteworthy because it occurs around the radius (1.6~R$_\oplus$\xspace) at which planets are thought to shift from being rocky to gaseous \citep{rogers2015,marcy2014}. A gap in the intrinsic planetary radius distribution between 1.5 and 2.5~R$_\oplus$\xspace was predicted by \cite{owen&wu2013} in their theoretical study of thermal contraction and hydrodynamic evaporation of volatile envelopes. Similar studies by \cite{lopez2012} and \cite{lopezfortney2013} examined the role that thermal evolution and mass loss play in individual exoplanetary systems -- Kepler-11 and Kepler-36, respectively -- and also generalized their results to predict the frequency of planets as a coupled function of orbital period and thus XUV radiation from the host star, and core composition. However, \cite{lopezfortney2013} find a less significant and also different location of the radius gap (around 2-2.5~R$_\oplus$\xspace) as compared to \cite{owen&wu2013}, due to the differences in parameter space exploration of Lopez \& Fortney, including many different combinations of core mass and initial composition. After F17 published observational evidence of a clear exoplanet radius gap, a new study by \cite{owen&wu2017} provided a simple analytical model predicting that photoevaporation of volatile envelopes naturally herds planets into two groups. The first group is comprised of planets where the hydrogen/helium envelope size is less than the core size (and less than a few percent mass) of the planet and is thus stripped away, leaving a bare core. The second group is comprised of planets where the hydrogen/helium envelope is roughly the same size as the core (and a few percent mass) of the planet and the timescale for mass loss is longest. By assuming a constant, Earth-like core composition and a distribution of core sizes centered at 3~M$_\oplus$\xspace, the Owen \& Wu model predicts two peaks in the planet radius distribution, coincident with those observed by F17. With a different core composition, the gap shifts, and with a range of core compositions, it is smeared out. The radius gap then appears to be a necessary outcome of both homogeneous core compositions of small planets and the photoevaporation of their volatile envelopes. \subsection{The Role of Stellar Multiplicity in Exoplanet Radius Derivations} \subsubsection{Detected Companions} In their analysis of the California Kepler Survey Sample \citep{petigura2017} of planet radii, F17 applied a series of filters, removing \textit{Kepler} Objects of Interest (KOIs) with orbital periods longer than 100 days, known false positives \citep{morton2011,morton2012,morton2016,kolbl2015}, impact parameters larger than 0.7, exoplanets around dim stars, exoplanets around giant stars, and planets orbiting stars with effective temperatures below 4700 K and above 6500 K. These filters resulted in a sample size decreased from 2025 KOIs with well-characterized parameters to 900 after the filtering process. The corresponding exoplanet radii were then re-derived using the light curve parameters of \cite{mullally2015}. In all of the \textit{Kepler} data releases (DRs) \citep{mullally2015,thompson2017,thompson2018}, all KOIs are assumed to be single unless an additional entry in the KIC appears within the pipeline aperture used for the photometry, in which case the light curve is adjusted for the excess flux of the KIC star (see more in \S2.1.1). However, we know \citep[e.g.][]{adams2012,adams2013,adams2014,horch2014,cartier2015,gilliland2015,torres2015,everett2015,barclay2015,hirsch2017} that unseen stellar companions can and have influenced the determination of transiting planet radii, and that $\sim$50\% of exoplanet host stars are in multiple star systems \citep{horch2014,furlan2017,matson2018}, similar to stars not known to host exoplanets \citep[e.g.,][]{raghavan2010,duchene&kraus2013}. If a KOI is assumed to be a single object, then any light emitted by stellar companion(s) in the same photometric aperture can contribute to the measured flux of the primary star. If a planet transits a star with an overestimated flux, the transit depth will appear shallower, and the derived planetary radius will be underestimated. This uncertainty in the measured planetary radius is augmented further by the uncertainty around which star the planet orbits (primary or secondary), especially if the distance to the secondary star, and thus whether it is actually bound to the primary star or not, is unknown. The ratio of the true planet radius to the observed radius is \begin{equation} \frac{R_{p}(true)}{R_{p}(observed)} = \frac{R_{t \star}}{R_{1 \star}} \sqrt{\frac{F_{total}}{F_t}} \equiv X_R \end{equation} \noindent where $R_{1 \star}$ is the radius of the primary star, and $R_{t \star}$ and $F_t$ are the radius and brightness of the star the planet is actually transiting \citep{ciardi2015}. F17 looked into removing from their sample the KOIs with known companions or large dilution corrections, but found no significant difference in the resulting observed exoplanet radius distribution and chose not to filter their catalog based on high-resolution imaging. The compilation of high-resolution imaging the F17 authors referenced is \cite{furlan2017}, which consists of 1903 primary KOIs and their 2297 known companions observed by various sources, including the \textit{Kepler} Follow-Up Observation Program \citep{lillo-box2012,lillo-box2014,horch2012,horch2014,everett2015,gilliland2015,cartier2015,wang2015a,want2015b,kraus2016,baranec2016,ziegler2017a,ziegler2017b,ziegler2018a,adams2012,adams2013,dressing2014,law2014,howell2011,horch2011}. These observations -- mostly from near-infrared AO and optical speckle -- of the separation, magnitude difference, and position angle between primary and companion stars were used by Furlan et al. to calculate correction factors ($X_R$, as defined in Eq. 1) for planet radii taking into account the ``third light'' contamination of the stellar companions. These factors were calculated under two separate assumptions -- the planets orbit the primary star (Furlan, Table 9) and the planets orbit the detected stellar companion (Furlan, Table 10). The sample in the Furlan catalog represents a biased group of the ``most interesting'' targets for planet confirmation, and is not complete. However, they find that $\sim$10\% of KOIs in their sample have a stellar companion within 1$\arcsec$ and $\sim 30$\% have a companion within 4$\arcsec$ (one \textit{Kepler} pixel). The observed fraction of stellar companions is expected to be \textit{lower} than the actual fraction due to sensitivity and completeness limitations \citep{furlan2017}. That is, based on sample selection, observing conditions, and the sensitivity and resolution of the available instruments, the true fraction of KOIs with companions is expected to be higher than these fractions, especially considering companions that are faint ($\Delta$mag$\gtrsim 6-8$) and/or very close ($\lesssim 0.1 \arcsec$ projected separation) to the primary star. This expectation motivates the work described in this paper to help quantify the effects of undetected companions. \subsubsection{Undetected Companions} \vspace{-5pt} What effect, then, do \textit{undetected} companions have on exoplanet radius estimates? \cite{ciardi2015} investigated this question for gravitationally bound companions, calculating probabilistic radius correction factors for planets based on expected stellar multiplicity rates and companion parameters from studies of field stellar populations. First Ciardi et al.~identified an appropriate isochrone for each KOI in the 23 October 2014 Kepler catalog, and then considered as viable companions all of the stars following the same isochrone with absolute Kepler magnitudes fainter than the target KOI. These potential fainter companions were used to derive the planetary radius corrections considering six multiplicity scenarios: a single star ($X_R = 1$), a binary system in which the planet orbits the primary star, a binary system in which the planet orbits the companion, a triple star system in which the planet orbits the primary star, a triple system in which the planet orbits the secondary star, and a triple star system in which the planet orbits the tertiary star. In cases in which the planet orbited the primary star, only the flux dilution factor (second term in Eq. 1) was relevant, since in this case $R_{1 \star} = R_{t \star}$. Second, \cite{ciardi2015} calculated the mean radius correction factor across the six multiplicity scenarios for each KOI by (1) fitting a third order polynomial to the radius correction factor versus mass ratio for each individual multiplicity scenario, (2) convolving each multiplicity scenario polynomial fit with the mass ratio distribution from \cite{raghavan2010}, (3) calculating a weighted mean for each multiplicity scenario for each KOI, and (4) convolving the six scenario corrections with the probability of the star being single (54\%), a binary (34\%), or a triple (12\%) star \citep{raghavan2010}. In multi-star systems, \cite{ciardi2015} assumed that the planet was equally likely to orbit any one of the stars. While the mean correction factor $\langle X_R \rangle$ depends on host star temperature, the authors estimate that, on average and \textit{assuming no ground-based follow-up}, the radii of KOIs are underestimated by an average factor of $\sim 1.5$ due to undetected companions. As described below, all of the KOIs in the F17 filtered observed sample have some form of ground-based follow-up observations to search for instances of ``third light'' in the \textit{Kepler} photometric aperture. These follow-up observations will decrease the number of undetected companions, and thus the predicted radius correction factors. \cite{ciardi2015} take this into account by assuming the following vetting observations: A few radial velocity (RV) observations over 6-9 months that are able to detect stellar companions with $\sim 2$ year orbital periods or less, and high-resolution imaging observations that are able to detect stellar companions with separations of $\gtrsim$0.1\arcsec. The authors then use the orbital period distribution of stellar companions from \cite{raghavan2010} combined with estimates of the distance to each KOI from the observed and absolute \textit{Kepler} magnitudes (the latter inferred from the isochrone fitting using the Dartmouth isochrones) to estimate the fraction of undetected companions for each KOI. The $X_R$ factors are then recalculated, assuming that detected companions have already been corrected for in the planet radius determination, by replacing the strict probability of a star being a multiple (46\%; from \citealt{raghavan2010} but see also \S3.3) with the probability that it is a multiple and the companion is undetected. The new $\langle X_R \rangle$, assuming the ground-based vetting observations described above, is $\sim$1.20, lower than the unvetted case but still significantly above unity. Our study builds on the framework of \cite{ciardi2015} to examine how undetected companions might affect the distribution of raw planet counts as a function of radius (the \textit{observed or raw count} versus completeness-corrected exoplanet radius distribution presented in F17). In \S2.1.1, we first apply radius correction factors, mostly from \cite{furlan2017}, to KOIs that are in the F17 sample. These correction factors are for detected stellar companions, and we assume that the KOIs with detected companions harbor no additional undetected companions. In \S2.1.2, we take the remaining KOIs without detected companions, apply a modified version of the Ciardi et al. radius correction factors, and show how this affects the observed exoplanet radius distribution. In \S2.2, we recalculate the modified radius correction factors assuming the KOIs are at a closer distance, more akin to the likely TESS sample of planet host stars, and show how these corrections have a smaller effect on the observed exoplanet radius distribution. Finally in \S3, we discuss how the application of the radius correction factors influences the robustness of the radius gap in the observed sample (versus completeness-corrected sample) and possible small planet formation scenarios, comment on our assumptions about multiplicity of planet host vs. non-host stars, and consider the broader implications for future high-resolution imaging follow-up observations of exoplanet host stars. We summarize our results in \S4. \section{Methods \& Results} \label{sec:methods} \subsection{California Kepler Survey Sample} F17 compared their sample to the \citet{furlan2017} high-resolution imaging catalog, and found no significant change to their observed planet radius distribution by removing KOI hosts with known companions or large dilution corrections. Ultimately they chose not to filter their catalog using high-resolution imaging results. In this section, we want to answer the question, how do stellar companions affect the bimodal observed exoplanet radius distribution found in F17? (As noted in F17, it is not straightforward to fold stellar multiplicity into occurrence rate calculations, and we do not take on that task here, focusing only on the observed, ``raw counts'' exoplanet radius distribution.) For completeness we investigate the effect of both detected and undetected companions. To account for the effect of \textit{detected companions}, we cross-matched the \textit{Kepler} host star sample with high-resolution imaging observations cataloged by \citet{furlan2017} and \citet{ziegler2018b} and applied average radius correction factors calculated as in Furlan et al.. To account for the effect of \textit{undetected companions} we use a prescription modified from \citet{ciardi2015} to calculate updated radius correction factors, and applied these values. \subsubsection{Detected Companions} Using an updated list of KOIs with high-resolution imaging \citep{furlan2017,ziegler2018b}, we verify that \textit{all} of the 900 KOIs in the F17 filtered sample have some kind of high-resolution imaging follow-up, and of those 321 have detected companions within 4\arcsec. As described above, \cite{furlan2017} calculated the average exoplanet radius correction for each KOI from various imaging observations, with the average weighted by the inverse of the square of the uncertainty of each observation (each bandpass). These average radius correction factors are calculated assuming the planet orbits the primary star (their Table 9), and assuming the planet orbits the brightest companion star (their Table 10). To accurately account for the stellar companions reported in \citet{furlan2017} and \cite{ziegler2018b}, we first checked whether any of these systems were ``unblended'' as defined by Furlan et al., that is, whether the detected companion star was a distinct source in the KIC. If a detected companion has a KIC value, this means that during pre-search data conditioning module (PDC) of the \textit{Kepler} data processing pipeline, the excess flux from the companion is accounted for and the light curve adjusted before it is fit for a planet radius \citep[Susan Mullally, priv. comm.]{keplermanual}. If this were the case, we would not want to account for the diluted flux a second time. For the 321 KOIs with detected companions in the F17 sample, only two of the companions had different KIC identifications, meaning that for these KOIs, their companions are accounted for in the reported planet radii. These stars -- KOI 1901 and KOI 4792 -- have detected companions at 3.85$\arcsec$ and 3.81\arcsec, respectively, and are removed when we applied below a constraint on the separation of companions. Thus there is no ``double'' correction applied in these cases. We next checked the angular separation of the detected companions, and filtered out KOIs with companions outside of a specific separation, either 1$\arcsec$ or 2\arcsec. The probability of a companion star being bound decreases as its spatial separation from the primary star increases -- inside 0.25\arcsec, $\sim100$\% of companions are bound, inside 1\arcsec, up to 80\% of companions are likely to be bound, and within 2\arcsec, $\lesssim50$\% of companions are likely to be bound \citep{horch2014,hirsch2017,matson2018}. Whether a companion is bound or not matters for calculating the radius correction under the assumption that the planet orbits the companion star, since in this case the correction factor includes the ratio of the stellar radii of the secondary and primary stars (see Eq. 6 in \citealt{furlan2017}). The radius of the secondary star can be determined using multi-color observations and isochrone fitting to interpolate from the primary star's stellar parameters \citep[e.g.,][]{huber2014} to the companion's parameters \citep{everett2015,hirsch2017}. However, this analysis is only possible if the two stars are bound and assumed to fall on the same isochrone. For a background star with unknown distance and interstellar extinction, it is much more difficult to accurately assess the stellar parameters, including stellar radius. We therefore chose to apply detected companion radius correction factors from \cite{furlan2017} (and those calculated based on the updated observations of \cite{ziegler2018b}) to stars with companions within 1$\arcsec$ and 2\arcsec, respectively, as these companions are most likely to be bound. Our analysis thus does not account for background companions, which exist at all separations but especially larger ones, since the radius correction factors are more difficult or impossible to calculate for these companions. \textit{Gaia} \citep{gaia2016,gaia2018} will enable better characterization of these ($\sim 1\arcsec$) background stars and calculation of their radius correction factors, but that is outside the scope of this paper. Furlan et al. provide two sets of radius correction factors in their work, one that is calculated assuming the planet orbits the primary star, and another assuming the planet orbits the secondary star. In Table \ref{tab:furlan_update}, we list the average radius correction factors, under these two assumptions, for the 156 KOIs in the F17 sample that have their brightest companion within 2$\arcsec$. As the correction factor does not depend on the planet properties, we do not repeat KOIs (e.g., K00041.01 has the same $X_R$ as K00041.02 and K00041.03). To make these radius correction factors more comparable to our modified $X_R$ values based on \cite{ciardi2015} (explained in the next section), we calculated a hybrid radius correction factor from \ref{tab:furlan_update}, $X_{R}^{primary}\times 0.7$ + $X_{R}^{secondary}\times 0.3$. In this equation, $X_{R}^{primary}$ assumes the planet orbits the primary star, and $X_{R}^{secondary}$ assumes the planet orbits the secondary star. The factors of 0.7 and 0.3 represent the probability that the planet orbits the primary versus the secondary star. Using instead an equal weighting of 0.5 and 0.5 for primary and secondary star, respectively, results in a very similar (qualitatively identical) radius distribution corrected for detected companions. We also note that an almost identical exoplanet radius distribution is produce if, instead of just using the $X_{R}^{primary}$ and $X_{R}^{secondary}$ values themselves, we draw $X_{R}^{primary}$ and $X_{R}^{secondary}$ values from a normal distribution centered on the mean $X_{R}$ values with a standard deviation equal to the uncertainties reported in \cite{furlan2017}, repeating the draw 500 times, and averaging the 500 raw count histograms together. \begin{table*}[t] \centering \caption{Average Radius Correction Factors} \vspace{12pt} \footnotesize \begin{tabular}{|c |c |c |} \hline KOI & Avg Radius Correction Factor & Avg Radius Correction Factor \\ & (planet orbits primary) & (planet orbits secondary) \\ \hline 18 & 1.0047 & 0 \\ 41 & 1.0083 & 3.6043 \\ 42 & 1.0349 & 1.9988 \\ 72 & 1.0005 & 7.7181 \\ 97 & 1.0113 & 2.2684 \\ 105 & 1.0004 & 3.0528 \\ \hline \end{tabular} \tablecomments{This table is available in its entirety in a machine-readable form online. A portion is shown here for guidance regarding its form and content.} \label{tab:furlan_update} \end{table*} Of the 321 KOIs in F17 with detected companions \citep{furlan2017,ziegler2018b}, 88 have the brightest companion star within 1\arcsec, and 156 have the brightest companion within 2$\arcsec$ (the rest have companions beyond 2\arcsec). In \hyperref[fig:fig1]{Figure 1} we show the distribution of $X_R$ values in the two cases (the KOI has a detected companion star within 1$\arcsec$ or within 2\arcsec), here assuming a 70/30 probability that the planet orbits the primary versus the secondary star. In \hyperref[fig:fig2]{Figure 2} we show the resulting observed exoplanet radius distributions after applying these $X_R$ values. The colored histograms represent the corrected exoplanet radius distribution, accounting for the detected stellar companions; the original observed distribution from F17 is shown as an unfilled histogram outlined with a black dashed line. These plots show only the raw counts of planet radii, do not contain any completeness corrections, and do not represent occurrence rates. In both the 1$\arcsec$ and 2$\arcsec$ cases there is only a small change in the exoplanet radius distribution -- some $\sim$0.8-1.6~R$_\oplus$\xspace planets shift to $\geq 1.8$~R$_\oplus$\xspace -- and the gap does not change in position or change much in depth, as suggested by F17. As most of the larger spatially separated ($\geq$1\arcsec) stars are unbound background (distant) sources, they tend to be fainter than the KOI and as such their brightness has little effect on the transit depth. Close ($\sim$2-4\arcsec) stars with approximately comparable brightness to the KOI will have KIC numbers and as such will have already been accounted for by the \textit{Kepler} pipeline. True bound companions, those inside 1\arcsec \citep{horch2014,hirsch2017,matson2018}, tend to be closer in brightness to the primary and therefore usually cause more significant transit dilution. Thus, moving forward, we concentrate on correcting for the detected companions within 1\arcsec. \begin{figure}[htp] \centering \includegraphics[width=0.6\linewidth,clip]{figures/furlan_xr_count_1arcsec_7030_09062018.png} \includegraphics[width=0.6\linewidth,clip]{figures/furlan_xr_count_2arcsec_7030_09062018.png} \caption{Distributions of the $X_R$ values corresponding to the 88 (top) or 156 (bottom) KOIs in the F17 filtered observed sample that have detected companions within 1$\arcsec$ (top) or 2$\arcsec$ (bottom).} \label{fig:fig1} \end{figure} \begin{figure}[htp] \centering \includegraphics[width=0.7\linewidth,clip]{figures/08272018_Furlancor1arcsec.png} \includegraphics[width=0.7\linewidth,clip]{figures/08272018_Furlancor2arcsec.png} \caption{Histograms of observed exoplanet radii from the filtered F17 sample of 900 KOIs (black dashed lines; their Figure 2 panel g), with exoplanet radius corrections applied for the detected companions from \cite{furlan2017} (filled, green histograms). Radius corrections from \citet{furlan2017} were only applied in cases where the brightest companion detected was within 1$\arcsec$ (top, 88 KOIs) or 2$\arcsec$ (bottom, 156 KOIs), and we assumed a 70/30 ratio between primary and brightest companion radius correction factors from \cite{furlan2017}. A 50/50 ratio produced similar histograms.} \label{fig:fig2} \end{figure} \subsubsection{Undetected Companions} As discussed in detail in \cite{ciardi2015}, companions around \textit{Kepler} stars can remain undetected even after vetting with high-resolution imaging and radial velocity follow-up. Below we describe how we accounted for potential undetected companions around the 812 KOIs from the filtered observed F17 sample that do not have detected companions within 1\arcsec. We assume that the 88 KOIs with detected companions within 1\arcsec, already corrected above, do not have additional undetected companions. If they did, their $X_R$ values would increase, but perhaps not significantly if the additional companion(s) had large $\Delta$magnitudes. The Ciardi et al. $X_R$ values were calculated under the assumptions that: (1) companions across all spectral types are equally detected, (2) each KOI could be single (their first multiplicity scenario as outlined in \S1.2.2), and (3) in the case of more than one star in the system the planet is equally likely to orbit any of the stars (50/50 in the case of a binary or 33/33/33 in the case of a triple). Then, whether or not the $X_R$ value is applied in any given case depends on the probability of the star being in a multiple system, and whether any companion stars have been detected or not. At this point, in calculating the $X_R$ values, we are interested in only the cases where the KOI is part of a multi-star system; we do not want to include the assumption that the KOI could be single as we account for that in the next step of our method. We also adopt a different ratio $o_{prob}$ for the probability the planet orbits the primary versus a companion star. While we do not know the true $o_{prob}$ value, testing different ratios is motivated by results in the literature as well as a toy statistical logic argument, outlined below. As a first example from the literature, \citet{barclay2015} examined Kepler-296, a binary consisting of two M dwarfs separated by 0.2$\arcsec$ and containing five transiting planets. Using statistical and analytic arguments they found that the brighter component, Kepler-296A, is strongly preferred by the data as the exoplanet host. Kepler-13 serves as a second example -- it consists of two A-type stars, where the brighter primary (Kepler-13A) hosts a transiting planet (Kepler-13Ab), and the fainter secondary (Kepler-13B) is orbited by a third star (Kepler-13BB) of spectral type G or later \citep{shporer2014}. A substantial multi-wavelength observational effort along with detailed statistical analysis places the hot Jupiter in this system also in orbit around the primary star. Finally, \cite{fess2018} has examined 29 \textit{Kepler} multi-planet systems with high-resolution images and a detected companion, and used the transit light curves to calculate the mean density of the host star and thus assign host stars to each of the 64 planets \citep{seager2002}. Results of this study find that $\sim$90\% of the planets are statistically more likely to orbit the primary star. Taking a back-of-the-envelope statistical approach, we find that exoplanets, especially small planets, are far more likely to be detected orbiting a brighter star versus a fainter star -- the signal-to-noise is higher and the transit depth contrast is larger around the brighter star. Dilution of the fainter star's light by the primary will also make any small planet transits around a secondary star very shallow, again reducing their chance of detection. A hard case is nearly equal brightness (mass) stars whereby any of the above techniques would not be able to differentiate between the two stars. However, in this case the planet radii will not change, regardless of which of the nearly identical stars the planet orbits. Based on these examples and argument, we modify the original Ciardi et al. $X_R$ factors to reflect only the multi-star scenarios, and choose to test three different scenarios for the probability that the primary vs. a companion star hosts the planet, $o_{prob}$ -- 90/10, 70/30, and the original 50/50. For each $o_{prob}$ ratio, the $X_R$ mean value and spread are calculated for each KOI\footnote{We did not include the 88 KOIs with detected companions corrected in the previous section. We also did not include KOIs 163, 958, 1947, 2564, 2815, 3114, 3197, 3220, or 4457 as they were not originally in \cite{ciardi2015}.} by (1) considering all possible companions to the KOI that are fainter in absolute magnitude but could fall along the same isochrone, (2) calculating the $X_R$ factors for the possible companions assuming the planet orbits the primary, (3) calculating the $X_R$ values for the possible companions assuming the planet orbits the secondary, (4) convolving the fits of (2) and (3) vs. mass ratio with the companion-to-primary mass ratio distribution of \cite{raghavan2010} to reflect the likelihood that a companion has a particular mass and thus brightness contrast, and (5) taking the mean and spread of these distributions, and combining them in an average weighted by the $o_{prob}$ ratio. We choose to stay consistent with the work of \citet{ciardi2015} and use this weighted mean approach to account for the planet orbiting the primary vs. secondary star These final $X_R$ mean and spread values are listed in \hyperref[tab:modxr]{Table 1} and shown in \hyperref[fig:fig3]{Figure 3}, where the blue (solid line), violet (dashed line), and orchid (dashed dotted line) histograms correspond to $o_{prob}$ of 90/10, 70/30, and 50/50, respectively. To capture the effect of scatter in the $X_R$ values calculated from this multi-step process, for each KOI we then create a 1000-element normal distribution with the corresponding $X_R$ mean and spread, referred to as dist$_{xr}$, which is always truncated at 1 to prevent any $X_R$ values $<1$. \begin{figure}[htp] \centering \includegraphics[width=0.68\linewidth,clip]{figures/09042018_new_correction_means.png} \includegraphics[width=0.68\linewidth,clip]{figures/09042018_new_correction_rms.png} \caption{Each new $X_R$ calculated for this study consists of a mean and a spread. In the top plot we show the distribution of mean values, in the bottom plot we show the distribution of RMS values. In blue (solid) lines are the $X_R$ values assuming a 90/10 probability ratio for the primary vs. a companion star hosting the planet, in violet (dashed) lines are the $X_R$ values assuming a 70/30 ratio, and in orchid (dash-dot) lines are the $X_R$ values assuming a 50/50 ratio.} \label{fig:fig3} \end{figure} \begin{table*}[t] \centering \caption{Modified Radius Correction Factors} \vspace{12pt} \footnotesize \begin{tabular}{|c |c |c |c |c |c |c |c |c |} \hline KOI & Fraction of multis & Fraction of multis &$X_R$ mean&$X_R$ rms&$X_R$ mean&$X_R$ rms&$X_R$ mean&$X_R$ rms\\ & not removed& not removed & 50/50 & 50/50 & 70/30 & 70/30 & 90/10 & 90/10 \\ & by vetting& by vetting (TESS) & & & & & & \\ \hline 2 & 0.290&0.054&1.459&0.438&1.312&0.287&1.204&0.175 \\ 3&0.083&0.000&1.874&0.508&1.538&0.322&1.2770&0.181 \\ 7&0.350&0.1026&1.989&0.957&1.584&0.600&1.278&0.259 \\ 10&0.423&0.166&1.540&0.493&1.352&0.319&1.209&0.182 \\ 17&0.378&0.126&1.581&0.513&1.372&0.328&1.213&0.184 \\ \hline \end{tabular} \tablecomments{Here we list (right-most columns) the different radius correction factors we calculated for each KOI, given different $o_{prob}$ values (50/50, 70/30, or 90/10). In the second and third columns, we list the fraction of companion stars not removed by the assumed vetted (ground-based RV and high-resolution imaging follow-up), in the case of typical \textit{Kepler} and TESS distances, respectively. This table is available in its entirety in a machine-readable form online. A portion is shown here for guidance regarding its form and content.} \label{tab:modxr} \end{table*} With the $X_R$ distributions in hand, we next determine the chance that a given star is in a multi-star system and thus when we need to multiply the planet radius by $X_R$. We choose to assume (and know in the high-resolution imaging case) that all of the KOIs have been vetted with ground-based follow-up, and that any companions that \textit{could} have been detected \textit{were} detected. If a star was not vetted, the probability of it being in a multi-system can be estimated at 46\%, based on both field stars and the observed binary fraction of \textit{Kepler} host stars \citep{raghavan2010,horch2014,matson2018}. In the case of vetting, this number has to be multiplied by the fraction of multiple stars that have \textit{not} already been detected/accounted. We adopt the fraction of multiples not removed for each KOI from \cite{ciardi2015}, where they assumed all companions with periods of $\lesssim 2$ years and separations of $\gtrsim 0.1 \arcsec$ were detected; these values are listed in \hyperref[tab:modxr]{Table 1}, second column, ``Fraction of multis not removed by vetting''. The fraction of multis not removed by vetting is then multiplied by 0.46 to represent the remaining probability that a KOI has an undetected companion in the vetted case we are considering. We refer to this final value as prob$_{multi}$. Finally, to calculate a probabilistic $X_R$ value for each KOI, we draw a random number $r$ out of 1000. If $r\leq 1000\times$prob$_{multi}$, we then draw a random value from dist$_{xr}$, which we call $X_R^{multi}$, and multiply the exoplanet radius by this value. If $r>1000\times$prob$_{multi}$, then we assign $X_R = 1$ and do not change the exoplanet radius. Applying the procedure above to the 812 KOIs without detected companions within 1\arcsec results in a new histogram of exoplanet radii. To this histogram, we add back in the planet radii that were already corrected for detected companions using the $X_R$ values from \cite{furlan2017}, from \S2.1.1. We then repeat the creation of the new histogram -- accounting for both possible undetected companions and adding back in the detected companions -- 1000 times, resulting in 1000 values for each bin in the exoplanet radii histogram. The mean and spread of each bin are represented as colored histograms in \hyperref[fig:fig5]{Figure 4}, \hyperref[fig:fig6]{Figure 5}, and \hyperref[fig:fig7]{Figure 6}, each representing a different $o_{prob}$ ratio (70/30, 90/10, 50/50). The original observed distribution from F17 is shown in each figure as an unfilled histogram outlined with gray dashed line. Note that we recalculated the $X_R$ values for the \textit{detected} companions in the previous section assuming the three different $o_{prob}$ values (different weightings of the Furlan et al. Tables 9 and 10). Again, these plots show only the raw counts of planet radii and do not contain any completeness corrections. In \hyperref[fig:fig8]{Figure 7} we also show all three distributions together for ease of comparison, along with the original F17 observed exoplanet distribution as a gray dashed line. \begin{comment} \begin{figure}[htp] \begin{center} \includegraphics[width=0.75\linewidth,clip]{figures/08282018_new_correction_draws.png} \caption{Here we show the distribution of $X_R^{multi}$ values in one representative case for each $o_{prob}$ value. The blue (solid) line represent the values assuming a 90/10 probability ratio for the primary vs. a companion star hosting the planet, the violet (dashed) lines represent a 70/30 ratio, and the orchid (dash-dot) lines represent a 50/50 ratio. Not included in these distributions are the instances where $X_R=1$; the plotted distributions only represent instances where $r\leq$1000$\times$prob$_{multi}$ (i.e., an unknown companion is assumed to exist in this simulation).} \end{center} \label{fig:fig4} \end{figure} \end{comment} \begin{figure}[htp] \begin{center} \includegraphics[width=1\linewidth,clip]{figures/09042018_version_7030_1000draws.png} \caption{Histogram of observed exoplanet radii from the filtered F17 sample of 900 KOIs (grey dashed line; their Figure 2 panel g), with exoplanet radius corrections applied for both detected and undetected companions (filled histogram). The height of each bar represents the average across 1000 repetitions of a different random draw of $r$, and then, if appropriate, from dist$_{xr}$, calculated assuming $o_{prob}$ = 70/30. The error bar represents the scatter in the values in each bin across the 1000 repetitions. In these plots, radius corrections from \citet{furlan2017} were only applied in cases where the brightest companion detected was within 1\arcsec, and we assumed a 70/30 weighting.} \end{center} \label{fig:fig5} \end{figure} \begin{figure}[htp] \begin{center} \includegraphics[width=1\linewidth,clip]{figures/09042018_version_9010_1000draws.png} \caption{Same as \hyperref[fig:fig5]{Figure 4}, except dist$_{xr}$ are calculated assuming $o_{prob}$ = 90/10. In these plots, radius corrections from \citet{furlan2017} were only applied in cases where the brightest companion detected was within 1\arcsec, and we assumed a 90/10 weighting.} \end{center} \label{fig:fig6} \end{figure} \begin{figure}[htp] \begin{center} \includegraphics[width=1\linewidth,clip]{figures/09042018_version_5050_1000draws.png} \caption{Same as \hyperref[fig:fig5]{Figure 4}, except dist$_{xr}$ are calculated assuming $o_{prob}$ = 50/50. In these plots, radius corrections from \citet{furlan2017} were only applied in cases where the brightest companion detected was within 1\arcsec, and we assumed a 50/50 weighting.} \end{center} \label{fig:fig7} \end{figure} \begin{figure}[htp] \begin{center} \includegraphics[width=1\linewidth,clip]{figures/09042018_comparison_1000draws.png} \caption{This figure shows a comparison of the histograms in \hyperref[fig:fig5]{Figure 4}, \hyperref[fig:fig6]{Figure 5}, and \hyperref[fig:fig7]{Figure 6}, as well as the histogram of observed exoplanet radii from the F17 sample of 900 KOIs as a dark grey dashed line.} \end{center} \label{fig:fig8} \end{figure} The ``true'' effect of detected and undetected companions on the raw count exoplanet radius distribution falls somewhere within the error bars in Figures \hyperref[fig:fig5]{4}-\hyperref[fig:fig7]{6}. For each of the $o_{prob}$ values, there is some filling in of the gap, as well as a shift in the smallest planet radii to larger values, as expected since the radius correction factor is always $\geq 1$. The different $o_{prob}$ cases agree within errors, except for the $\sim$5.5-6.1~R$_\oplus$\xspace bins where the 90/10 and 50/50 cases do not overlap within errors. Also, in the $o_{prob}$ = 50/50 case, the trend is for more of the $R_p \leq$1.7~R$_\oplus$\xspace planets to be shifted to $R_p \geq$3.5~R$_\oplus$\xspace, versus the $o_{prob}$ = 90/10 case, where the trend is for more of the $R_p \leq$1.6~R$_\oplus$\xspace planets to be shifted to 1.7~R$_\oplus$\xspace$\geq R_p \leq$2.1~R$_\oplus$\xspace, within the gap. However, in most cases the raw count radius gap is preserved (though less distinct), as is the drop-off of planet frequency around 3.5~R$_\oplus$\xspace (though the total frequency of planets larger than 3.5~R$_\oplus$\xspace increases). \subsection{Predictions for TESS} The recently-launched Transiting Exoplanet Survey Satellite \citep[TESS,][]{ricker2015} is focused on detecting planets around the brightest stars across the entire sky. As pointed out by \citet{ciardi2015}, because the stars will be $\sim10\times$ closer, the effectiveness of high-resolution imaging will improve greatly, decreasing the fraction of undetected companions from $\sim$40\% in the case of \textit{Kepler} to $\sim$16\% in the case of TESS. To understand how undetected companions might affect the exoplanet radius distribution observed by TESS, we can apply the same procedure as we did in the real KOI case above, calculating modified $X_R$ factors, but assuming distances 10$\times$ closer, which changes the probability that a star will have an undetected companion. In this case we do not have a detected companion sub-sample, so we apply the scheme outlined above to all 900 KOIs in the observed filtered F17 sample, except KOIs 163, 958, 1947, 2564, 2815, 3114, 3197, 3220, or 4457 as they were not originally in \cite{ciardi2015}. We also choose to set $o_{prob}=$70/30 for these calculations. The results are shown in \hyperref[fig:fig9]{Figure 8}; again, the raw counts of planet radii are plotted with no attempt to correct for completeness, and the F17 filtered observed sample is outlined with a gray dashed line. With high-resolution imaging follow-up that reaches well within separations of 1\arcsec, there is almost no difference between the corrected exoplanet radius distribution (colored histogram) and that not accounting for undetected companions (grey dashed line). What happens if there is \textit{not} high-resolution imaging follow-up of TESS targets? We investigate this scenario by assuming that prob$_{multi}$ = 0.46 for all stars; that is, none of the possible companions around a star have been detected or ruled out. We recalculate the observed exoplanet radius distribution for TESS under this assumption. The result, shown in \hyperref[fig:fig10]{Figure 9}, is that the corrected exoplanet radius distribution (colored histogram) differs significantly from the distribution inferred without accounting for undetected companions (grey dashed line). We conclude that one would infer a different and likely incorrect radius distribution if none of the companions were detected. \begin{figure}[h!] \begin{center} \includegraphics[width=1\linewidth,clip]{figures/09042018_TESSvetted7030_1000draws.png} \caption{Same as \hyperref[fig:fig5]{Figure 4}, except prob$_{multi}$ values are calculated assuming a more TESS-like target distance and the analysis is applied to all 900 KOIs (except KOIs 163, 958, 1947, 2564, 2815, 3114, 3197, 3220, or 4457 as they were not originally in \cite{ciardi2015}).} \end{center} \label{fig:fig9} \end{figure} \begin{figure}[h!] \begin{center} \includegraphics[width=1\linewidth,clip]{figures/09062018_TESSunvetted7030_1000draws.png} \caption{Same as \hyperref[fig:fig5]{Figure 4} or \hyperref[fig:fig9]{Figure 8}, except prob$_{multi}$ values are calculated assuming \textit{no} vetting, that is, prob$_{multi}$ is always$=0.46$.} \end{center} \label{fig:fig10} \end{figure} \begin{comment} \begin{figure}[tp] \begin{center} \includegraphics[width=1\linewidth,clip]{figures/09042018_F17novetting_1000draws2.png} \caption{Same as \hyperref[fig:fig5]{Figure 4}, except that we assume the sample has undergone no vetting for close stellar companions, and thus prob$_{multi}$ is always the field value of 0.46.} \end{center} \label{fig:fig11} \end{figure} \end{comment} \section{Discussion} \label{sec:discussion} \subsection{Radius Gap Robustness} We describe our method to account for detected and undetected stellar companions to KOIs in \S2.1 based on high-resolution imaging observations, comparing the isochrones of KOIs and considering viable companion stars, informed by statistics of stellar multiplicity in the field and in the \textit{Kepler} and \textit{K2} samples. This scheme, the results of which are shown in Figures \hyperref[fig:fig5]{4}-\hyperref[fig:fig8]{7}, tends to partially fill in the ``gap'' in the observed exoplanet radius distribution around 1.8~R$_\oplus$\xspace, diluting it but not erasing or significantly shifting it. The robust nature of the observed radius gap to detected and undetected companions is likely due in part to sample selection and the vetting of that sample. F17's sample includes the ``best and brightest'' targets, those for which they were able to obtain high-resolution optical spectroscopy and successfully determine more precise stellar parameters than originally derived in the KIC (from photometry). As the F17 authors describe, their sample is also filtered for false positives, and as we confirm, all of their final sample have high-resolution imaging observations. We recalculate the observed exoplanet radius distribution from F17 but assume there was no vetting -- that the probability of a star being in a multiple system (prob$_{multi}$) is always 46\% -- with the results shown in Figure \hyperref[fig:fig10]{9}. (The result is the same whether the stars are a \textit{Kepler}- or TESS-like distances.) The distribution is not as easily distinguishable as bimodal, and shows a larger count of $R_p > 3$~R$_\oplus$\xspace planets. This exercise highlights, for stars at any distance, the importance of proper vetting of planet candidate host stars with high-resolution imaging. \subsection{Implications for Planet Formation} It is instructive to consider what it means to ``dilute'' the radius gap, even slightly, as changing the radius gap may have implications for the average core composition of super-Earth and sub-Neptune sized planets. Numerous papers have shown that an ``evaporation valley'' in the radius distribution is a natural outcome of the photo-evaporation and thus mass loss of small planets' volatile-rich envelopes due to high-energy radiation from the host star \citep{owen&wu2017,chenrogers2016,jin2014,owen&wu2013,ciardi2013,lopezfortney2013,lopez2012}. A key parameter in these evaporation and thermal evolution models that controls the location of the valley is the core mass (or core density) of the planet, assumed not to change after formation. Since the core represents most of the mass in these planets, it controls the escape velocity and how easily an atmosphere can be evaporated. \cite{owen&wu2017} and \cite{jin&mordasini2017}, each using slightly different evaporation/mass loss models, found that the radius distribution of F17 was well matched by models populated with planets having uniformly rocky cores, composed of a silicate-iron mixture similar to the Earth's bulk density, and \textit{not} by planets with cores having a substantial mass fraction ($\gtrsim 75$\%) of ice/water or made purely of iron. These authors, as well as \cite{lopezfortney2013}, note that heterogeneity in the core composition would smear out the gap in the radius distribution. By accounting for possible undetected companions, we observe a slight smearing out of the observed radius distribution gap, particularly in the $o_{prob}$=90/10 case, which we think is the most realistic \citep{fess2018,bouma2018,barclay2015}. Our results suggests that, if there are undetected companions around the KOIs in the F17 sample, there could also be more heterogeneity in the core composition of most super-Earth and sub-Neptune planets than would be inferred from the original distribution. Specifically, a non-zero fraction of the cores could be composed of ice/water. Potential undetected companions complicate the origin story of these planets, as the addition of ice/water in the core opens up the possibility that they formed beyond the water ice line and migrated inwards, rather than only forming and migrating locally within the water ice line. Other factors not explored here, like the relative importance of X-ray/UV flux over time as a function of stellar mass, may also contribute to the radius distribution being smeared out. We note that \cite{vaneylen2018}'s independent study of the \textit{Kepler} planet radius distribution, using a smaller sample of KOIs (75 stars, 117 planets) than F17 but with asteroseismically-derived and thus even more precise stellar parameters (and thus more precise planet radii), also finds a bimodal distribution, with two peaks at 1.5 and 2.5~R$_\oplus$\xspace separated by a gap around 2~R$_\oplus$\xspace, shifted to slightly higher radii than the distribution in F17. \cite{vaneylen2018} do not include any description of a correction for detected or undetected companions, but we confirm that all of the KOIs in their sample have some kind of high-resolution imaging. Out of the 75 KOIs, 40 have detected companions, with separations ranging from 0.029$\arcsec$ to 3.85$\arcsec$. Of those KOIs with companions within $<1\arcsec$, the largest $X_R$ factor, assuming the planet orbits the primary star, is 1.33, and the smallest/largest $X_R$ factors, assuming the planet orbits the secondary star, are 1.38/7.02. There are seven of the 40 KOIs with detected companions that do not have enough color information to calculate $X_R$. It is intriguing to think that the apparent shift in the radius gap between the work of F17 and \cite{vaneylen2018} could be influenced by the effects of undetected stellar companions (although see also \citealt{fulton&petigura2018}). \subsection{Multiplicity Assumptions} The analysis presented here relies upon an assumption of what fraction of the planet host stars are expected to have stellar companions. We have applied radius correction factors to account for undetectable stellar companions based on the assumption that the stellar multiplicity rate of the \textit{Kepler} planet hosts is identical to the multiplicity rate for the solar neighborhood, 46\% as determined by \citet{raghavan2010}. \citet{ciardi2015} relied on the Raghavan multiplicity statistics (both the multiplicity rate and the observed distribution of companions in period and mass ratio) to simulate the \textit{Kepler} field, and the average radius correction values we apply in this work therefore depend on this assumption. Several studies have demonstrated that this assumption is valid, particularly for separations larger than a few tens of AU. \citet{horch2014} demonstrated that the multiplicity rate of \textit{Kepler} planet hosts as detected by the DSSI speckle camera was consistent with the solar neighborhood. With a typical resolution of 20 mas in the optical, this study was sensitive to stellar companions at separations of $\geq 20$ AU at the distance of a typical \textit{Kepler} star. \cite{matson2018} performed a similar survey for stellar companions around the somewhat nearer \textit{K2} planet hosts, and also recovered multiplicity rates similar to the solar neighborhood. We have applied radius correction factors to account for undetectable stellar companions based on the assumption that the total stellar multiplicity rate is 46\%, as determined by \citet{raghavan2010}. In contrast, a few surveys have reported evidence for suppressed stellar multiplicity around planet-hosting stars, for binaries with small to moderate separations. \citet{kraus2016} reports that multiplicity within 47 AU of planet hosts is suppressed by a factor of 0.34, based on a study of 382 KOIs with Keck/NIRC2 adaptive optics (AO) imaging. \citet{wang2014} also demonstrates a small suppression of stellar multiplicity at separations $\lesssim 1500$ AU, albeit for a smaller sample of 56 KOIs but using both AO imaging and radial velocity observations. Both studies argue that this suppression of multiplicity indicates that planet formation is more difficult in close binary systems. If stellar multiplicity is indeed suppressed at small separations around stars hosting planets, both the average ``vetted'' radius correction factors, and the fraction of stars to which these factors are applied, would need to be altered. In other words, the number of undetectable stellar companions hiding within the CKS survey sample would be reduced. The mere observation of the gap seen by F17 may indicate that indeed the stellar multiplicity may be lower than for the general field stars. However, recent work by \cite{matson2018}, using higher resolution speckle techniques, contradict the claims of close companion suppression, showing that the fraction of close companions in \textit{K2} exoplanet host systems are similar to the field star fraction. \subsection{Implications for Occurrence Rate Studies} In this work we only consider ``raw counts'', what we called the ``observed'', distribution of planet radii, and do not attempt to calculate occurrence rates. The same flux contamination effects that we described in \S1.2 for \textit{Kepler} host stars will of course also apply to stars not known to host planets within the \textit{Kepler} field, and will thus also influence the survey completeness and thus planet occurrence rates, particularly for smaller planets. While beyond the scope of this paper, we encourage future works to investigate to what extent the multiplicity of stars in the \textit{Kepler} parent sample influences the inferred planet occurrence rates. A similar high-resolution imaging survey of \textit{Kepler} non-planet hosting stars would also help determine more accurate planet occurrence rates. \subsection{Implications for TESS Follow-up} In the case of \textit{Kepler}, there is an orbital period/separation space in which even the best high-resolution imaging and RV follow-up do not detect companions, between $\sim$1000-100,000 days (see Figure 5 in \citealt{ciardi2015}). This is due to the \textit{Kepler} stars typically being far away, $\sim 900$ pc. Ciardi et al. calculate that, on average, ground-based observations leave $\sim 40$\% of possible companions around KOIs undetected. However, because \textit{K2} planet candidates are, and TESS Objects of Interest (TOIs) will be, closer than \textit{Kepler} targets, there is a vanishing orbital period/separation space in which high-resolution (within 1\arcsec) follow-up will not detect companions, with only $\lesssim 15$\% of stellar companions to \textit{K2} and TESS targets being missed \citep{ciardi2015,matson2018_submitted}. For comparison, the average $X_R$ calculated by \cite{ciardi2015} for vetted companions to KOIs is 1.20$\pm$0.06, while for companions to TOIs, the factor is only 1.07$\pm$0.03. We observe a similar trend in our modified $X_R$ values and the resulting exoplanet radius distributions -- with vetting there is a small but visible change in the distribution for the original KOI sample (e.g., \hyperref[fig:fig8]{Figure 7}), but there is almost no change for TESS-like distances (\hyperref[fig:fig9]{Figure 8}). However, as demonstrated in the \hyperref[fig:fig10]{Figure 9}, if TOIs are not vetted, the inferred exoplanet radius distribution (histogram outlined with a gray dashed line) will be different than the ``real'' distribution (filled in histograms). An incorrect distribution of planet radii will impact statistical studies of exoplanet occurrence rates and density distributions ($\rho \propto R^{3}$) \citep[e.g.,][]{furlan&howell2017}, and thus our understanding of the diversity of planets across the Galaxy. An incorrect distribution can also impact the acceptance or ``correct-ness'' of different planet formation models, as described above. \section{Summary} We investigated how (bound) close companions to transiting exoplanet host stars can affect the determination of accurate planet radii, specifically the observed \textit{Kepler} small planet radius distribution with a ``gap'' around 1.8~R$_\oplus$\xspace derived by \cite{fulton2017}. As outlined by \cite{ciardi2015}, such companions contribute to the flux measured in the photometric aperture, causing the flux of the star the planet is transiting to be overestimated, and thus the transit depth and planet radius to be underestimated. If the planet is orbiting the companion star, this can also add to the uncertainty in the inferred planet radius (see Eq. 1). The scope of this paper was limited to the study of raw planet counts, and does not include an analysis based on calculated planet occurrence rates. First, we investigated how accounting for detected and undetected companions might change the \cite{fulton2017} observed radius distribution. We used the compilation of high-resolution observations and calculated radius correction factors from \citet{furlan2017} and \citet{ziegler2018b} to show that correcting for \textit{detected} companions around KOIs (either 88 with companions within 1$\arcsec$ or 156 with companions within 2\arcsec) does not significantly change the observed exoplanet radius distribution (\hyperref[fig:fig2]{Figure 2}). We next modified the prescription of \cite{ciardi2015} to estimate exoplanet radius correction factors for \textit{undetected} companions, assuming (1) a multiplicity rate similar to both nearby field stars and \textit{Kepler} and \textit{K2} host stars; (2) that the KOIs were uniformly vetted for companions with orbital periods $\lesssim$2 years with RV observations and separations $\gtrsim 0.1 \arcsec$ with high-resolution imaging observations; and (3) different probabilities for the planet orbiting the primary versus secondary star ($o_{prob}$, 90/10, 70/30, or 50/50). We also assumed that the KOIs with detected companions did not have additional undetected companions. The resulting observed exoplanet radius distributions (\hyperref[fig:fig5]{Figure 4}, \hyperref[fig:fig6]{Figure 5}, and \hyperref[fig:fig7]{Figure 6}) still show the gap, but it appears to be partially filled in by the shifting of the smallest planets to larger radii (as expected, since by definition the radius correction factors are always $\geq 1$). The shape of this observed radius distribution has implications for the inferred formation pathways of small planets -- ``filling in'' the gap may indicate a more heterogeneous core composition, perhaps with some planets having water/ice material accreted from outside the snowline \citep[e.g.,][]{owen&wu2017,jin&mordasini2017}. Second, we applied the same undetected companion prescription to all 900 of the KOIs\footnote{except KOIs 163, 958, 1947, 2564, 2815, 3114, 3197, 3220, or 4457 as they were not originally in \cite{ciardi2015}} in the Fulton et al. observed filtered sample, but assumed a distance 10$\times$ closer, more similar to the stars that TESS will survey for transiting planets. We show that with high-resolution imaging vetting ($\lesssim 0.25$\arcsec), there is little to no uncertainty in the observed exoplanet radius distribution (\hyperref[fig:fig9]{Figure 8}). However, without any vetting, the exoplanet radius distribution, no matter the distance to the host stars, does not match the corrected distribution (\hyperref[fig:fig10]{Figure 9}). Thus it is critical that dedicated ground-based, high-resolution imaging observations of planet candidate systems continue in the TESS era. \acknowledgments Support for this work was provided by NASA through Hubble Fellowship grant HST-HF2-51399.001 awarded by the Space Telescope Science Institute, which is operated by the Association of Universities for Research in Astronomy, Inc., for NASA, under contract NAS5-26555. This research has made use of the NASA Exoplanet Archive, which is operated by the California Institute of Technology, under contract with the National Aeronautics and Space Administration under the Exoplanet Exploration Program. This research has made use of the Exoplanet Follow-up Observation Program website, which is operated by the California Institute of Technology, under contract with the National Aeronautics and Space Administration under the Exoplanet Exploration Program. High-resolution images referenced in this work have been obtained at the WIYN, Gemini, Palomar, and Keck telescopes. We thank Elise Furlan for fruitful discussion and sharing information that enhanced the quality of this work, and Susan Mullally for clarifying how the \textit{Kepler} PDC accounts for stellar companions in the calculation of planet radii. We thank the anonymous referee for their detailed comments that improved the quality of this paper. \software{astropy} \bibliographystyle{yahapj}
1,108,101,565,863
arxiv
\section{Introduction} The aim of this paper is to consider the general properties of gravitational field that is generated by a fractal distribution of particles. This problem is nowadays particularly relevant. In fact, there is a general agreement that galaxy distribution exhibits fractal behavior up to a certain scale \cite{slmp98,cp92}. The eventual presence of a transition scale towards homogeneity and the exact value of the fractal dimension are still matters of debate. Moreover it has been observed that cold gas clouds of the interstellar medium has a fractal structure, with $ 1.5 \le D \le 2$ in a large range of length scales \cite{lar,Scalo}. Derivatives and integrals of fractional order \cite{SKM} have found many applications in recent studies of fractal structures. Fractional analysis can have numerous applications: kinetic theories \cite{Zaslavsky1,Zaslavsky2,Physica,JPA05-2}; statistical mechanics \cite{chaos,PRE05,JPCS}; dynamics in complex media \cite{Nig,PLA05,PLA05-2,AP05,Chaos05}; and many others. The new type of problem has increased rapidly in areas in which the fractal features of a process or the medium impose the necessity of using non-traditional tools in "regular" smooth physical equations. In order to use fractional derivatives and fractional integrals for fractal distribution, we must use some continuous medium model \cite{PLA05,AP05,Chaos05}. We propose to describe the fractal distribution by a "fractional" continuous medium \cite{PLA05}, where all characteristics and fields are defined everywhere in the volume but they follow some generalized equations which are derived by using fractional integrals. In many problems the real fractal structure of medium can be disregarded and the fractal distribution can be replaced by some "fractional" continuous mathematical model. Smoothing of microscopic characteristics over the physically infinitesimal volume transforms the initial fractal distribution into "fractional" continuous model \cite{PLA05} that uses the fractional integrals. The order of fractional integral is equal to the fractal dimension of distribution. The fractional integrals allow us to take into account the fractality of the distribution. Fractional integrals are considered as an approximation of integrals on fractals \cite{RLWM,Nig4}. It was proved in \cite{RLWM} that integrals on net of fractals can be approximated by fractional integrals. In Ref. \cite{chaos}, we proved that fractional integrals can be considered as integrals over the space with fractional dimension up to numerical factor. In order to prove, we use the formulas of dimensional regularizations \cite{Col}. In this paper, we consider gravitational field of fractal distribution of particles. Fractal distribution is described by fractional continuous medium model \cite{PLA05,AP05,Physica,Chaos05}. In the general case, the fractal distribution of particles cannot be considered as continuous medium. There are points and domains that are not filled of particles. In Ref. \cite{PLA05}, we suggest to consider the fractal distributions as special ("fractional") continuous media. We use the procedure of replacement of the distribution with fractal mass dimension by some continuous model that uses fractional integrals. This procedure is a fractional generalization of Christensen approach \cite{Chr}. Suggested procedure leads to the fractional integration and differentiation to describe fractal distribution. In Section 2, the density of fractal distribution of mass is considered. In Section 3, we consider the simple examples of the gravitational field of homogeneous fractal distribution. In Section 4, the examples of gravitational quadrupole moments for fractal distribution are considered. Finally, a short conclusion is given in Section 5. \section{Mass and Balance of Mass for Fractal Distribution} \subsection{Mass of Fractal Distribution} Let us consider a fractal distribution of particles. For example, we can assume that particles with a constant density are distributed over the fractal. In this case, the number of particles $N$ enclosed in a volume of characteristic size $R$ satisfies the scaling law \begin{equation} N(R) \sim R^{D} ,\end{equation} whereas for a regular n-dimensional Euclidean object we have $N(R)\sim R^n$. For distribution of particles with number density $n({\bf r},t)$, we have that the mass density can be defined by \begin{equation} \rho({\bf r},t)=m n({\bf r},t) , \end{equation} where $m$ is the mass of a particle. The total mass of region $W$ is then given by the integral \begin{equation} M(W)=\int_W \rho({\bf r},t) dV_3 , \end{equation} or $M(W)=mN(W)$, where $N(W)$ is a number of particles in the region $W$. The fractional generalization of this equation can be written in the following form: \begin{equation} M(W)=\int_W \rho_D({\bf r},t) dV_D , \end{equation} where $D$ is a mass fractal dimension of the distribution, and $dV_D$ is an element of D-dimensional volume such that \begin{equation} \label{5a} dV_D=C_3(D,{\bf r})dV_3. \end{equation} For the Riesz definition of the fractional integral \cite{SKM}, the function $C_3(D,{\bf r})$ is defined by the relation \begin{equation} \label{5R} C_3(D,{\bf r})= \frac{2^{3-D}\Gamma(3/2)}{\Gamma(D/2)} |{\bf r}|^{D-3} . \end{equation} The initial points of the fractional integral are set to zero. The numerical factor in Eq. (\ref{5R}) has this form in order to derive usual integral in the limit $D\rightarrow (3-0)$. Note that the usual numerical factor $\gamma^{-1}_3(D)={\Gamma(1/2)}/{2^D \pi^{3/2} \Gamma(D/2)}$, which is used in Ref. \cite{SKM} leads to $\gamma^{-1}_3(3-0)= 1/[4 \pi^{3/2}]$ in the limit $D\rightarrow (3-0)$. For the Riemann-Liouville fractional integral \cite{SKM}, the function $C_3(D,{\bf r})$ is defined by \begin{equation} \label{5RL} C_3(D,{\bf r})= \frac{|x y z |^{D/3-1}}{\Gamma^3(D/3)} . \end{equation} Here we use Cartesian's coordinates $x$, $y$, and $z$. In order to have the usual dimensions of the physical values, we can use vector ${\bf r}$, and coordinates $x$, $y$, $z$ as dimensionless values. Therefore the density $\rho_D$ has the dimension of mass. Note that the interpretation of fractional integration is connected with fractional dimension \cite{chaos,PRE05}. This interpretation follows from the well-known formulas for dimensional regularizations \cite{Col}. The fractional integral can be considered as an integral in the fractional dimension space up to the numerical factor $\Gamma(D/2) /( 2 \pi^{D/2} \Gamma(D))$. If we consider the ball region $W=\{{\bf r}: \ |{\bf r}|\le R \}$, and the spherically symmetric distribution of particles ($n_D({\bf r},t)=n_D(r)$), then we have \begin{equation} N(R)=4\pi \frac{2^{3-D}\Gamma(3/2)}{\Gamma(D/2)} \int^R_0 n_D(r) r^{D-1} dr , \quad M(R)=mN(R). \end{equation} For the homogeneous ($n_D(r)=n_0$) fractal distribution, we get \begin{equation} N(R)=4\pi n_0 \frac{2^{3-D}\Gamma(3/2)}{\Gamma(D/2)} \frac{R^D}{D} \sim R^D . \end{equation} Fractal distribution of particles is called a homogeneous fractal distribution if the power law $N(R)\sim R^D $ does not depend on the translation of the region. The homogeneity property of the distribution can be formulated in the following form: for all regions $W$ and $W^{\prime}$ such that the volumes are equal $V(W)=V(W^{\prime})$, we have that the number of particles in these regions are equal too, $N(W)=N(W^{\prime})$. Note that the wide class of the fractal media satisfies the homogeneous property. In many cases, we can consider the porous media \cite{Por1,Por2}, polymers \cite{P}, colloid aggregates \cite{CA}, and aerogels \cite{aero} as homogeneous fractal media. In Refs. \cite{PLA05,AP05}, the continuous medium model for the fractal distribution was suggested. Note that the fractality and homogeneity properties can be realized for the fractional continuous model in the following forms: \noindent (1) {\it Homogeneity}: the local number density of homogeneous fractal distribution is translation invariant value that has the form $n({\bf r})=n_0=const$. \noindent (2) {\it Fractality}: the number of particles in the ball region $W$ obeys a power law relation $N_D(W) \sim R^D$, where $D<3$, and $R$ is the radius of the ball. \subsection{Flow of Fractal Medium} For distribution of particles with number density $n({\bf r},t)$ flowing with velocity ${\bf u}={\bf u}({\bf r},t)$, the resulting density ${\bf J}({\bf r},t)$ is given by \begin{equation} {\bf J}({\bf r},t)= m n({\bf r},t) {\bf u} , \end{equation} where $m$ is the mass of a particle. Measuring the field ${\bf J}({\bf r},t)$ passing through a surface $S=\partial W$ gives the flow (flux of mass) \begin{equation} I(S)=\Phi_J(S)=\int_S ({\bf J}, d{\bf S}_2) , \end{equation} where ${\bf J}={\bf J}({\bf r},t)$ is the flow field vector, $d{\bf S_2}=dS_2{\bf n}$ is a differential unit of area pointing perpendicular to the surface $S$, and the vector ${\bf n}=n_k{\bf e}_k$ is a vector of normal. The fractional generalization of this equation for the fractal distribution can be written in the following form \begin{equation} I(S)=\int_S ({\bf J}({\bf r},t), d{\bf S}_d) , \end{equation} where we use \begin{equation} \label{C2} dS_d=C_2 (d,{\bf r})dS_2 , \quad C_2(d,{\bf r})= \frac{2^{2-d}}{\Gamma(d/2)} |{\bf r}|^{d-2} . \end{equation} Note that $C_2(2,{\bf r})=1$ for $d=2$. In general, the medium on the boundary $\partial W$ has the dimension $d$. In the general case, the dimension $d$ is not equal to $2$ and is not equal to $(D-1)$. \subsection{Equation of Continuity for Fractal Distribution} The change of mass inside a region $W$ bounded by the surface $S=\partial W$ is always equal to the flux of mass through this surface. This is known as the law of mass conservation or the equation of balance of mass \cite{AP05}. If we denote by ${\bf J}({\bf r},t)$ the flow density, then mass conservation is written \begin{equation} \frac{dM(W)}{dt}=-I(S), \end{equation} or, in the form \begin{equation} \label{cecl} \frac{d}{dt} \int_W \rho_D({\bf r},t) dV_D= - \oint_{\partial W} ({\bf J} ({\bf r},t),d{\bf S}_d) . \end{equation} In particular, when the surface $S=\partial W$ is fixed, we can write \begin{equation} \label{drho} \frac{d}{dt} \int_W \rho_D({\bf r},t) dV_D= \int_W \frac{\partial \rho_D({\bf r},t)}{\partial t} dV_D .\end{equation} Using the fractional generalization of the mathematical Gauss's theorem (see Appendix), we have \begin{equation} \label{gt} \oint_{\partial W} ({\bf J} ({\bf r},t),d{\bf S}_d) = \int_W C^{-1}_3(D,{\bf r}) \frac{\partial}{\partial x_k} \Bigl( C_2(d,{\bf r})J_k({\bf r},t) \Bigr) dV_D .\end{equation} Substituting the right hand sides of Eqs. (\ref{drho}) and (\ref{gt}) in Eq. (\ref{cecl}), we find the equation of balance of mass in differential form \begin{equation} C_3(D,{\bf r})\frac{\partial \rho_D({\bf r},t)}{\partial t}+ \frac{\partial}{\partial x_k} \Bigl( C_2(d,{\bf r})J_k({\bf r},t) \Bigr) =0 . \end{equation} This equation can be considered as a continuity equation for fractal distribution of particles \cite{AP05}. \section{Gravitational Field of Fractal Distribution} \subsection{Gravitational Field} For a point mass $M$ at position ${\bf r}^{\prime}$ the gravitational field ${\bf F}$ at a point ${\bf r}$ is defined by \begin{equation} {\bf F}=G M \ \frac{{\bf r}^{\prime}-{\bf r}}{|{\bf r}^{\prime}-{\bf r}|^3} , \end{equation} where $G$ is a gravitational constant. For a continuous distribution $\rho({\bf r}^{\prime})$ of mass, the gravitational field ${\bf F}$ at a point ${\bf r}$ is given by \begin{equation} \label{E} {\bf F}_3({\bf r})=G \int_W \frac{{\bf r}^{\prime}-{\bf r}}{|{\bf r}^{\prime}-{\bf r}|^3} \rho({\bf r}^{\prime}) dV^{\prime}_3 . \end{equation} The fractional generalization of this equation for a fractal distribution of mass is given by the equation \begin{equation} \label{CLD} {\bf F}_D({\bf r})=G \int_W \frac{{\bf r}^{\prime}-{\bf r}}{|{\bf r}^{\prime}-{\bf r}|^3} \rho_D({\bf r}^{\prime}) dV^{\prime}_D , \end{equation} where $dV^{\prime}_D=C_3(D,{\bf r}^{\prime}) dV^{\prime}_3$. Eq. (\ref{CLD}) can be considered as Newton's law written for a fractal distribution of particles. Measuring the gravitational field passing through a surface $S=\partial W$ gives the gravitational filed flux \begin{equation} \Phi_F(S)=\int_S ({\bf F}, d{\bf S}_2) , \end{equation} where ${\bf F}$ is the gravitational field vector, and $d{\bf S}_2$ is a differential unit of area pointing perpendicular to the surface $S$. \subsection{Gauss's Law for Fractal Distribution} The Gauss's law tells us that the total flux $\Phi_F(S)$ of the gravitational field ${\bf F}$ through a closed surface $S=\partial W$ is proportional to the total mass $M(W)$ inside the surface: \begin{equation} \label{GL1} \Phi_F(\partial W)=4 \pi G M(W) . \end{equation} For the fractal distribution, Gauss's law states \begin{equation} \label{GL2} \int_S ({\bf F}_D,d{\bf S}_2)=4 \pi G \int_W \rho_D ({\bf r}) dV_D , \end{equation} where ${\bf F}={\bf F}({\bf r})$ is the gravitational field, and $\rho_D({\bf r})$ is the mass density, $dV_D=C_3(D,{\bf r})dV_3$, and $G$ is the gravitational constant. Gauss's law by itself can be used to find the gravitational field of a point mass at rest, and the principle of superposition can then be used to find the gravitational field of an arbitrary fractal mass distribution. If we consider the spherically symmetric fractal distribution $\rho_D({\bf r})=\rho_D(r)$, and the ball region $W=\{{\bf r}:\ |{\bf r}|\le R\}$, then we have \begin{equation} M(W)=4 \pi \int^R_0 \rho_D(r) C_3(D,{\bf r}) r^2 dr , \end{equation} where $C_3(D,{\bf r})$ is defined in Eq. (\ref{5R}), i.e., \begin{equation} \label{MW} M(W)=4 \pi \frac{2^{3-D}\Gamma(3/2)}{\Gamma(D/2)} \int^R_0 \rho_D(r) r^{D-1} dr . \end{equation} Using the sphere $S=\{{\bf r}: \ |{\bf r}|= R \}$ as a surface $S=\partial W$, we get \begin{equation} \label{PW} \Phi_F(\partial W)= 4 \pi R^2 F_D(R). \end{equation} Substituting Eqs. (\ref{MW}) and (\ref{PW}) in the Gauss's law (\ref{GL1}), we get the equation for gravitational field. As the result, the Gauss's law for fractal distribution with spherical symmetry leads us to the equation for gravitational field \begin{equation} F_D(R)=\frac{ \pi G 2^{5-D}\Gamma(3/2)}{R^2 \Gamma(D/2)} \int^R_0 \rho_D(r) r^{D-1} dr .\end{equation} For example, the gravitational field of homogeneous ($\rho_D(\bf r)=\rho_0$) spherically symmetric fractal distribution of mass is defined by \begin{equation} F_D(R)=\rho_0 \frac{ \pi G 2^{5-D}\Gamma(3/2)}{ D \Gamma(D/2)} R^{D-2} \sim R^{D-2} .\end{equation} \section{Multipole Moments for Fractal Distribution} \subsection{Multipole Expansion} A multipole expansion is a series expansion of the effect produced by a given system in terms of an expansion parameter which becomes small as the distance away from the system increases. Therefore, the leading one of the terms in a multipole expansion are generally the strongest. The first-order behavior of the system at large distances can therefore be obtained from the first terms of this series, which is generally much easier to compute than the general solution. Multipole expansions are most commonly used in problems involving the gravitational field of mass aggregations, the gravity and magnetic fields of mass and flow distributions, and the propagation of electromagnetic waves. To compute one particular case of a multipole expansion, let ${\bf R}=X_k {\bf e}_k$ be the vector from a fixed reference point to the observation point; ${\bf r}=x_k {\bf e}_k$ be the vector from the reference point to a point in the distribution; and ${\bf d}={\bf R}-{\bf r}$ be the vector from a point in the distribution to the observation point. The law of cosines then yields \begin{equation} d^2=R^2\Bigl( 1+\frac{r^2}{R^2} -2\frac{r}{R} \cos \; \theta \Bigr) , \end{equation} where $d=|{\bf d}|$, and $\cos \; \theta = ({\bf r},{\bf R})/(r R)$, so \begin{equation} d=R \sqrt{ 1+\frac{r^2}{R^2} -2\frac{r}{R} \cos \; \theta } . \end{equation} Now define $\epsilon ={r}/{R}$, and $x=\cos \; \theta$, then \begin{equation} \frac{1}{d}=\frac{1}{R} \Bigl( 1-2 \epsilon x+\epsilon^2 \Bigr)^{-1/2}. \end{equation} But $\Bigl( 1-2 \epsilon x+\epsilon^2 \Bigr)^{-1/2}$ is the generating function for Legendre polynomials $P_n(x)$ as follows: \begin{equation} \Bigl( 1-2 \epsilon x+\epsilon^2 \Bigr)^{-1/2}= \sum^{\infty}_{n=0} \epsilon^n P_n(x) , \end{equation} so, we have the equation \begin{equation} \frac{1}{d}=\frac{1}{R} \sum^{\infty}_{n=0} \Bigl(\frac{r}{R}\Bigr)^n P_n( \cos \; \theta) . \end{equation} The gravitational potential $U$ (${\bf F}=-\nabla U$) obeys ($1/d$) law. Therefore, this potential can be expressed as a multipole expansion \begin{equation} \label{11} U= -G \sum^{\infty}_{n=0} \frac{1}{R^{n+1}} \int_W r^n P_n( \cos \; \theta) \rho_D({\bf r}) dV_D. \end{equation} The $n = 0$ term of this expansion can be pulled out by noting that $P_0(x)=1$, so \begin{equation} U= - \frac{G}{R} \int_W \rho_D({\bf r}) dV_D- G \sum^{\infty}_{n=1} \frac{1}{R^{n+1}} \int_W r^n P_n( \cos \; \theta) \rho_D({\bf r}) dV_D. \end{equation} The nth term \begin{equation} U_n=- \frac{G}{R^{n+1}}\int_W r^n P_n( \cos \; \theta) \rho_D({\bf r}) dV_D \end{equation} is commonly named multipole, and for $n=2$ - quadrupole. \subsection{Gravitational Moment of Fractal Distribution} Gravitational moments describe the nonuniform distribution of mass. The gravitational quadrupole term is given by \begin{equation} U_2=- \frac{G}{R^3} \int_W r^2 P_2(\cos \; \theta) \rho_D({\bf r}) dV_D =\end{equation} \begin{equation} =-\frac{G}{2 R^3} \int_W r^2 (3 \cos^2 \; \theta-1) \rho_D({\bf r}) dV_D =\end{equation} \begin{equation} =-\frac{G}{2 R^3} \int_W ( \frac{3}{R^2}({\bf R},{\bf r})^2-r^2) \rho_D({\bf r}) dV_D .\end{equation} The quadrupole is the third term in a gravitational multipole expansion, and can be defined by \begin{equation} U_2= - \frac{G}{2 R^3} \sum^3_{k,l=1} \frac{X_k X_l}{R^2} M_{kl} , \end{equation} where $G$ is the gravitational constant, $R$ is the distance from the fractal distribution of mass, and $M_{kl}$ is the gravitational quadrupole moment, which is a tensor. The gravitational quadrupole moment is defined by the equation \begin{equation} M_{kl}=\int_W (3 x_k x_l-r^2\delta_{kl}) \rho_D({\bf r}) dV_D ,\end{equation} where $x_k= x, y$, or $z$. From this definition, it follows that \begin{equation} M_{kl}=M_{lk} , \quad and \quad \sum^{3}_{k=1} M_{kk}=0. \end{equation} Therefore, we have $M_{zz}=-M_{xx}-M_{yy}$. In order to compute the values \begin{equation} M^{(D)}_{xx}= \int_W [2x^2-y^2-z^2] \rho_D({\bf r}) dV_D , \end{equation} \begin{equation} M^{(D)}_{yy}= \int_W [-x^2+2y^2-z^2)] \rho_D({\bf r}) dV_D , \end{equation} \begin{equation} M^{(D)}_{zz}= \int_W [-x^2-y^2+2z^2)] \rho_D({\bf r}) dV_D , \end{equation} we consider the following expression \begin{equation} \label{Mabc} M(\alpha,\beta,\gamma)= \int_W [\alpha x^2+\beta y^2+\gamma z^2)] \rho_D({\bf r}) dV_D ,\end{equation} where we use the Riemann-Liouville fractional integral \cite{SKM}, and the function $C_3(D,{\bf r})$ in the form \begin{equation} C_3(D,{\bf r})= \frac{|x y z |^{a-1}}{\Gamma^3(a)} , \quad a=D/3. \end{equation} Using Eq. (\ref{Mabc}), we have \begin{equation} \label{MM} M^{(D)}_{xx}=M(2,-1,-1), \quad M^{(D)}_{xx}=M(-1,2,-1), \quad M^{(D)}_{zz}=M(-1,-1,2) . \end{equation} \subsection{Quadrupole Moment of Fractal Parallelepiped} Let us consider the example of gravitational quadrupole moment for the homogeneous ($\rho_D({\bf r})=\rho_0$) fractal distribution in the parallelepiped region \begin{equation}\label{par} W=\{(x;y;z): \ 0 \le x \le A,\ 0 \le y \le B , \ 0 \le z \le C \} . \end{equation} If we consider the region $W$ in the form (\ref{par}), then we get (\ref{Mabc}) in the form \begin{equation} M(\alpha,\beta,\gamma)= \frac{\rho_0 (ABC)^a}{(a+2)a^2 \Gamma^3(a) } (\alpha A^2+\beta B^2+\gamma C^2) . \end{equation} The total mass of this region $W$ is \begin{equation} M(W)=\rho_0 \int_W dV_D=\frac{\rho_0 (ABC)^a}{a^3 \Gamma^3(a)} .\end{equation} Therefore, we have the following equation \begin{equation} M(\alpha,\beta,\gamma)= \frac{a}{a+2} M(W) (\alpha A^2+\beta B^2+\gamma C^2) , \end{equation} where $a=D/3$. If $D=3$, then we have \begin{equation} M(\alpha,\beta,\gamma)= \frac{1}{3} M(W) (\alpha A^2+\beta B^2+\gamma C^2) . \end{equation} As the result, we get gravitational quadrupole moments $M^{(D)}_{kk}$ of fractal distribution in the region $W$: \begin{equation} M^{(D)}_{kk}=\frac{3D}{D+6} \ M^{(3)}_{kk} , \end{equation} where $M^{(3)}_{kk}$ are moments for the usual homogeneous distribution ($D=3$). By analogy with these equations, we can derive $M^{(D)}_{kl}$ for the case $k\not=l$. These quadrupole moments are \begin{equation} M^{(D)}_{kl}=\frac{4 D^2}{(D+3)^2} \ M^{(3)}_{kl} , \quad (k\not=l). \end{equation} Using $2<D\le 3$, we get the relations \begin{equation} 0.75 < \frac{3D}{D+6}\le 1 , \quad 0.64 < \frac{4 D^2}{(D+3)^2} \le 1 . \end{equation} Quadrupole moment of fractal ellipsoid is considered in Appendix. \section{Conclusion} The fractional continuous models for fractal distribution of particles can have a wide application. This is due in part to the relatively small numbers of parameters that define a random fractal distribution of great complexity and rich structure. In many cases, the real fractal structure of matter can be disregarded and the distribution of particles can be replaced by some fractional continuous model \cite{PLA05,AP05}. In order to describe the distribution with non-integer dimension, we must use the fractional calculus. Smoothing of the microscopic characteristics over the physically infinitesimal volume transforms the initial fractal distribution into fractional continuous model that uses the fractional integrals. The order of fractional integral is equal to the fractal dimension of the distribution. The fractional continuous model for the fractal distribution allows us to describe dynamics of a wide class of fractal media \cite{AP05,Chaos05,Physica}. The suggested results can have a wide application to galactic dynamics and cosmology. In particular, there is strong evidence that the distribution of mass beyond the scale of clusters of galaxies is fractal, with $D \simeq 1.2$, corresponding to a power-law two-point correlation function with exponent equal to $-1.8$. Fractal distributions may also be present within gravitational systems of a smaller scale, for example, galaxies. However, the results are incomplete in the following aspect: it is known that the fractal distribution of mass in the Universe is characterized by large density fluctuations, even within the fractal volume, therefore it is far from homogeneous. The nonhomogeneity of the fractal distribution can be described by the suggested fractional continuous model \cite{PLA05}. The fluctuation deviation from homogeneity can be parametrized by the so-called n-point correlation functions, with $n>2$ \cite{N1,N2,N3,N4}. Such density fluctuations are important in that they produce terms in the force field which can be described only statistically. An elementary example is the random distribution of particles in a three-dimensional sphere. Although the distribution can be considered as uniform when viewed at the scale of the sphere (with $D=3$), the Poisson noise of the density field will cause gravitational clustering in small scales that will finally prevail the overall evolution of the system, i.e., the latter will be quite a different from the evolution of a perfectly homogeneous sphere. \section*{Appendix} \subsection*{Fractional Gauss's theorem} In order to realize the representation, we derive the fractional generalization of the Gauss's theorem \begin{equation} \label{GT} \int_{\partial W} ({\bf J}({\bf r},t), d{\bf S}_2) =\int_W div( {\bf J}({\bf r},t) ) dV_3 , \end{equation} where the vector ${\bf J}({\bf r},t)=J_k{\bf e}_k$ is a field, and \begin{equation} div( {\bf J})=\frac{\partial {\bf J}}{\partial {\bf r}}= \frac{\partial J_k}{\partial x_k} . \end{equation} Here and later we mean the sum on the repeated index $k$ from 1 to 3. Using Eq. (\ref{C2}), we get \begin{equation} \int_{\partial W} ({\bf J}({\bf r},t),d{\bf S}_d) =\int_{\partial W} C_2(d,{\bf r}) ({\bf J}({\bf r},t) , d{\bf S}_2) . \end{equation} Note that we have $C_2(2,{\bf r})=1$ for the $d=2$. Using the usual Gauss's theorem (\ref{GT}), we get \begin{equation} \int_{\partial W} C_2(d,{\bf r}) ({\bf J}({\bf r},t), d{\bf S}_2) = \int_W div(C_2(d,{\bf r}) {\bf J}({\bf r},t)) dV_3 . \end{equation} Equations (\ref{5a}) and (\ref{5R}) in the form $dV_3=C^{-1}_3(D,{\bf r}) dV_D$ allows us to derive the fractional generalization of the Gauss's theorem: \begin{equation} \label{FGT} \int_{\partial W} ({\bf J}({\bf r},t), d{\bf S}_d)= \int_W C^{-1}_3(D,{\bf r}) div \Bigr( C_2(d,{\bf r}) {\bf J}({\bf r},t) \Bigr) \ dV_D . \end{equation} \subsection*{Quadrupole Moment of Fractal Ellipsoid} Let us consider the example of gravitational quadrupole moment for the homogeneous ($\rho_D({\bf r})=\rho_0$) fractal distribution in the ellipsoid region $W$: \begin{equation}\label{ell} \frac{x^2}{A^2}+\frac{y^2}{B^2}+\frac{z^2}{C^2} \le 1 . \end{equation} If we consider the region $W$ in the form (\ref{ell}), then we get (\ref{Mabc}) in the form \begin{equation} M(\alpha,\beta,\gamma)= \frac{\rho_0 (ABC)^a}{ (3a+2) \Gamma^3(a) } (\alpha A^2 K_1(a)+\beta B^2 K_2(a)+\gamma C^2 K_3 (a) ) , \end{equation} where $a=D/3$, and $K_i(a)$ (i=1,2,3) are defined by \begin{equation} K_1(a)=L(a+1,a-1,2\pi) L(a-1,2a+1,\pi), \end{equation} \begin{equation} K_2(a)=L(a-1,a+1,2\pi) L(a-1,2a+1,\pi), \end{equation} \begin{equation} K_3(a)=L(a-1,a-1,2\pi) L(a+1,2a-1,\pi) . \end{equation} Here we use the following function \begin{equation} L(n,m,l)=\frac{2l}{\pi} \int^{\pi/2}_0 dx \ |\cos (x)|^{n} | \sin (x) |^m = \frac{l}{\pi} \frac{\Gamma(n/2+1/2) \Gamma(m/2+1/2)}{\Gamma(n/2+m/2+1)}. \end{equation} If $D=3$, we obtain \begin{equation} \label{Mabce} M(\alpha,\beta,\gamma)= \frac{4\pi}{3} \frac{\rho_0 ABC}{5 } (\alpha A^2 +\beta B^2 +\gamma C^2 ) , \end{equation} where we use $K_1=K_2=K_3={4\pi}/{3}$. The total mass of this region $W$ is \begin{equation} \label{Me} M(W)=\rho_0 \int_W dV_D=\frac{\rho_0 (ABC)^a}{3 a \Gamma^3(a)} \frac{2 \Gamma^3(a/2)}{\Gamma(3a/2)} .\end{equation} If $D=3$, we have the total mass \begin{equation} M(W)=\rho_0 \int_W dV_3=\frac{4 \pi}{3} \rho_0 ABC . \end{equation} Using Eqs. (\ref{Mabce}) and (\ref{Me}), we get the quadrupole moments (\ref{MM}) for fractal ellipsoid \begin{equation} M(\alpha,\beta,\gamma)= \frac{a M(W)}{3a+2} (\alpha A^2 +\beta B^2 +\gamma C^2 ) , \end{equation} where $a=D/3$. If $D=3$, then we have the well-known relation \begin{equation} M(\alpha,\beta,\gamma)= \frac{M(W)}{5} (\alpha A^2+\beta B^2+\gamma C^2) . \end{equation}
1,108,101,565,864
arxiv
\section{Introduction} Tilting theory is a collection of well established methods for studying equivalences between triangulated categories in homological algebra. Although it has many facets (see~\cite{HandbookTilting}), in its basic form \cite{Happel,Rickard} it struggles to answer the following question: Given two abelian categories $\mathcal{A}, \mathcal{H},$ which may not be equivalent, how can we characterize the situation where their \emph{derived} categories are equivalent, \[ \mathsf{D}(\mathcal{H}) \simeq \mathsf{D}(\mathcal{A})? \] If $\mathcal{H}$ is a module category, $\mathcal{H} = \mathsf{Mod\textnormal{\textsf{-}}}{R}$, the answer is well-known. The ring $R$ is transferred by the equivalence to what is called a tilting complex in $\mathsf{D}(\mathcal{A})$. On the other hand, existence of a tilting complex in $\mathsf{D}(\mathcal{A})$ whose endomorphism ring is $R$ ensures such a derived equivalence. This is an extremely powerful tool to study representations of groups and quivers, coherent sheaves in commutative or non-commutative geometry, and also in various other situations. \smallskip The starting point for this paper is how to detect the case where $\mathcal{H}$ has an injective cogenerator $W$, e.g.\ if $\mathcal{H}$ is a Grothendieck category. In this case, the image $C$ of $W$ in $\mathsf{D}(\mathcal{A})$ should morally be a cotilting complex. However, we are on a much more experimental ground now with an attempt to define such a complex intrinsically in $\mathsf{D}(\mathcal{A})$. There are several definitions available in the case where $C$ is required to be an object of $\mathcal{A}$ (see~\cite{Colpi,ColpiTrlifaj2,ColpiHeart,St3,ParraSaorin,FiorotMattielloSaorin,PositselskiSt}), and rather recent research dealing with the case where $C$ is an actual complex, \cite{PsaroudakisVitoria,NicolasSaorinZvonareva}. One of the major problems with manipulating cotilting complexes is that, unlike the ring in its module category, injective cogenerators are often very far from being finitely generated in any reasonable sense. Our aim here is to understand the situation in detail for the particular case where $\mathcal{A}$ is also a Grothendieck category, and preferably even the category $\mathsf{QCoh}_X$ of quasi-coherent sheaves on a Noetherian scheme $X$. We are inspired by recent progress in understanding cotilting sheaves of affine schemes in~\cite{St1,HrbekSt}. We also restrict the shape of derived equivalences which we consider. We only focus on derived equivalences coming from `turning around' a torsion pair $(\mathcal{T}, \mathcal{F})$ in $\mathcal{A}$. This is a very general method introduced by Happel, Reiten and Smal\o{} in~\cite{HRS}. The abelian category $\mathcal{H}$ which we obtain on the other end of the derived equivalence comes equipped with a torsion pair $(\mathcal{F}, \mathcal{T})$ and it has very strong homological bonds to $\mathcal{A}$. This effectively means that we restrict ourselves to cotilting objects in $\mathcal{A}$ whose injective dimension is at most one. \smallskip Our main result in this direction (Theorem~\ref{thm:maintheorem}), which generalizes~\cite[\S5]{ParraSaorin}, says that for any Grothendieck category $\mathcal{A}$ equipped with a torsion pair $(\mathcal{T}, \mathcal{F})$ such that $\mathcal{F}$ contains a generator, the tilted category $\mathcal{H}$ (in the sense of Happel, Reiten and Smal\o{}) is again a Grothendieck category if and only if $\mathcal{H}$ has an injective cogenerator if and only if $\mathcal{F}$ has an injective cogenerator $C$ as an exact category. We call such objects of $\mathcal{A}$ cotilting. In order to prove this result, we had to overcome the following problem---all proofs available for module categories use the fact that cotilting modules are pure-injective. As a general Grothendieck category $\mathcal{A}$ need not be locally finitely presentable, there does not seem to be any good definition of a pure-exact structure on $\mathcal{A}$ available. Still it turns out that it make sense to define pure-injective objects in this context and that all cotilting objects as in the last paragraph are pure-injective (Theorem~\ref{thm:CotiltingPureInjective}). The pure-injectivity has other nice consequences. For example, this allows us to easily describe the cotilting torsion-free classes in $\mathcal{A}$ (Theorem~\ref{thm:CharTiltingClasses}) and, if $\mathcal{A}$ is a locally Noetherian Grothendieck category, cotilting objects are parametrized, up to product equivalence, by torsion pairs in the category $\mathcal{A}_0$ of Noetherian objects of $\mathcal{A}$ (Theorem~\ref{thm:ClassificationViaTP}; see also~\cite{BuanKrause-cotilting,ParraSaorin}). If $X$ is a Noetherian scheme, this takes us back to our original aim. We can in principle classify all cotilting sheaves in $\mathsf{QCoh}_X$, up to product equivalence, as soon as we understand torsion pairs in $\mathsf{Coh}_X$. If $X$ is affine, all torsion pairs in $\mathsf{Coh}_X$ are hereditary and given by a subset $Y\subseteq X$ which is closed under specialization~\cite[\S2]{St1}. If $X$ is non-affine, this is no longer the case. Here we single out the torsion pairs in $\mathsf{Coh}_X$ which are hereditary (Theorem~\ref{thm:charTPCoh}). It turns out that these are again precisely the ones classified by specialization closed subsets $Y\subseteq X$, or equivalently the ones for which the torsion class is a tensor ideal. If $X$ has an ample family of line bundles, these are also precisely those for which the torsion-free class is closed under twists by the line bundles. The consequences for cotilting quasi-coherent sheaves are then summarized in Theorem~\ref{thm:classification}. Last but not least, we also illustrate the theory throughout the paper in the case of $1$-dimensional Noetherian schemes by providing an explicit computation of the cotilting sheaves associated to a specialization closed subset. We discuss the example of $\mathsf{QCoh}_{\mathbb{P}^1_k}$, quasi-coherent sheaves on the projective line over a field, in particular. It turns out that all the technical nuances and differences from the affine situation already occur there. \section{Cotilting objects in Grothendieck categories} \label{sec:cotilting} The goal of the section is to establish basic theory of cotilting objects of injective dimension at most $1$ for Grothendieck categories. The definition of such a (infinitely generated) cotilting object in a module category is rather standard, see \cite{Colpi,ColpiTrlifaj2}. However, as mentioned in the introduction, extensions of the concept to more general abelian categories or even triangulated categories are still subject to experiments by various authors, see e.g.\ \cite{ParraSaorin,FiorotMattielloSaorin,PsaroudakisVitoria,NicolasSaorinZvonareva} to name a few. Here we show that basic aspects of cotilting modules from \cite{Colpi,ColpiTrlifaj2} generalize to Grothendieck categories rather easily and that our definition matches perfectly with the one from \cite{ParraSaorin,FiorotMattielloSaorin,NicolasSaorinZvonareva} (and a posteriori with \cite{PsaroudakisVitoria} as well in view of the results in \cite{NicolasSaorinZvonareva}). At several steps throughout this section, we make use of results on interaction of the $\mathrm{Ext}^1$-functor with infinite products in a Grothendieck category, which are stated in Appendix~\ref{sec:ExtProd}. We start by recalling the definition of a torsion pair in an abelian category. \begin{deff}[{\cite{Dickson}}]\label{def:torznipar} Let $\mathcal{A}$ be an abelian category. A \emph{torsion pair} in $\mathcal{A}$ is a pair $(\mathcal{T}, \mathcal{F})$ of full subcategories of $\mathcal{A}$ such that \begin{enumerate}[(1)] \item\label{TPDef1}{$\hom_{\mathcal{A}}(\mathcal{T}, \mathcal{F})=0$, and } \item\label{TPDef2}{for every $A \in \mathcal{A},$ there is an exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r]& T \ar[r]& A \ar[r] & F \ar[r] & 0 \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent with $T \in \mathcal{T}$ and $F \in \mathcal{F}$. We call the object $T$ the \emph{torsion part} of $A$, and the object $F$ the \emph{torsion-free part} of $A$. } \end{enumerate} A torsion pair $(\mathcal{T}, \mathcal{F})$ is called \emph{hereditary} if $\mathcal{T}$ is closed under subobjects. \end{deff} \begin{rem}[\cite{Dickson}]\label{rem:TP} ~ \begin{enumerate} \item\label{TP1}{It follows from the definition that for each torsion pair $\mathcal{F}=\ker \hom_{\mathcal{A}}(\mathcal{T}, -)$ and $\mathcal{T}=\ker \hom_{\mathcal{A}}(-, \mathcal{F})$. In particular, $\mathcal{T}$ is closed under extensions, factors and under all colimits that exist in $\mathcal{A}$ and, dually, $\mathcal{F}$ is closed under extensions, subobjects and limits. } \item\label{TP2}{Suppose that $\mathcal{A}$ is a well-powered abelian category where for every direct system $A_i$ of subobjects of a fixed object $A$ (and monomorphisms compatible with those to $A$), $\varinjlim_i A_i$ exists---this is fulfilled in particular when $\mathcal{A}$ is a Grothendieck category (e.g. $\mathsf{QCoh}_X$) or a Noetherian category (e.g. $\mathsf{Coh}_X$ for a Noetherian scheme $X$). Assume that $\mathcal{T}\subseteq\mathcal{A}$ is closed under extensions, quotients and under colimits that exist in $\mathcal{A}$, and put $\mathcal{F}=\ker \hom_{\mathcal{A}}(\mathcal{T}, -)$. Then $(\mathcal{T}, \mathcal{F})$ is a torsion pair. Indeed, consider $A \in \mathcal{A}$, and take $T \stackrel{\alpha}\hookrightarrow A$ the maximum subobject of $A$ with $T \in \mathcal{T}$. Such a $T$ exists, it is obtained as the image of $\varinjlim_i T_i \rightarrow A,$ where $T_i$ varies over (representatives of) all subobjects of $A$ which are in $\mathcal{T}$. Then necessarily $\mathrm{Coker}\,\alpha\in\mathcal{F}$ and we obtain the exact sequence required by Definition~\hyperref[TPDef2]{\ref*{def:torznipar}~(\ref*{TPDef2})}.} \end{enumerate} \end{rem} We will also make use of the following easy observation, whose proof we omit. \begin{lem}\label{lem:TorsionPairsInclusion} Suppose $(\mathcal{T}, \mathcal{F})$ and $(\mathcal{T}', \mathcal{F}')$ are two torsion pairs in an abelian category $\mathcal{A}$ such that $\mathcal{T} \subseteq \mathcal{T}'$ and $\mathcal{F}\subseteq \mathcal{F}'$. Then $(\mathcal{T}, \mathcal{F})=(\mathcal{T}', \mathcal{F}').$ \end{lem} Given a class of objects $\mathcal{C}$ in a Grothendieck category $\mathcal{A}$, we further denote \begin{align*} {^\perp\mathcal{C}} &= \{ F \in \mathcal{A} \mid \mathrm{Ext}^1_\mathcal{A}(F, C) = 0 \text{ for each } C\in\mathcal{C} \},\\ \mathcal{C}^\perp &= \{ F \in \mathcal{A} \mid \mathrm{Ext}^1_\mathcal{A}(C, F) = 0 \text{ for each } C\in\mathcal{C} \} \end{align*} and we denote by $\mathrm{Prod}({\mathcal{C}})$ the class of direct summands of arbitrary direct products of objects from $\mathcal{C}$. If $\mathcal{C}=\{C\}$ is a singleton, we simply write ${^\perp{C}}$, $C^\perp$ and $\mathrm{Prod}({C})$, respectively. Given an object $C \in \mathcal{A}$, we denote by $\mathrm{Cogen}({C})$ the class of all objects $F\in\mathcal{A}$ which are cogenerated by $C$ (that is, which admit an embedding of the form $F\hookrightarrow {C}^{\times I}$). Now we can give the technically least involved definition of a cotilting object in a Grothendieck category $\mathcal{A}$. It generalizes~\cite[Definition 2.6]{ColpiTiltingGrothendieck} for Grothendieck categories which might not have enough projective objects. \begin{deff}\label{def:cotilting} An object $C$ in a Grothendieck category $\mathcal{A}$ is \emph{cotilting} if ${^\perp C}=\mathrm{Cogen}({C})$ and the class ${^\perp C}$ contains a generator of $\mathcal{A}$. The class $\mathcal{C} = \mathrm{Cogen}({C})$ is called the \emph{cotilting class} associated with $C$. \end{deff} \begin{rem} Unlike in module categories, ${^\perp C}$ is not automatically generating since $\mathcal{A}$ may not have enough projective objects. \end{rem} The following shows at once that each cotilting class is a torsion-free class. We state the lemma in greater generality for later use. \begin{lem}\label{lem:cotilting-TF} Let $\mathcal{A}$ be a Grothendieck category and $C\in\mathcal{A}$ be an object such that $\mathrm{Cogen}({C})\subseteq {^\perp C}$. Then the class $\mathrm{Cogen}({C})$ is torsion-free, with the corresponding torsion class given by \[ \mathcal{T} = \{ T \in \mathcal{A} \mid \hom_\mathcal{A}(T, C) = 0 \}. \] In particular, the cotilting class is closed under direct sums. \end{lem} \begin{proof} By the very definition, $\mathrm{Cogen}({C})$ is closed under taking subobjects and products, and thus also all limits in $\mathcal{A}$. Once we show that $\mathrm{Cogen}({C})$ is also closed under extensions, it will be a torsion-free class by an argument formally dual to Remark~\hyperref[TP2]{\ref*{rem:TP}~(\ref*{TP2})}. To that end, consider a short exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r]& {F}' \ar[r, "j"] & {F} \ar[r, "p"] & {F}'' \ar[r] & 0 \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent in $\mathcal{A}$, where ${F}', {F}'' \in \mathrm{Cogen}({C})$. That is, we have monomorphisms $i', i''$ forming the solid part of the following diagram: \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r]& {F}' \ar[r, "j"] \ar[d, "i'", hook] & {F} \ar[r, "p"] \ar[d, dotted, "({}^{\;\;k}_{i''p})"] & {F}'' \ar[r] \ar[d, "i''", hook] & 0 \\ 0 \ar[r, dotted]& {C}^{\times I} \ar[r, dotted, "\iota_I"] & {C}^{\times(I \cup J)} \ar[r, dotted, "\pi_J"] & {C}^{\times J} \ar[r, dotted] & 0 \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent Regarding the dotted part, we take for the lower sequence the split exact sequence with the canonical projections and inclusions. Since $F'' \in \mathrm{Cogen}({C}) \subseteq {^\perp C} = {^\perp {C}^{\times(I \cup J)}}$ by Corollary~\ref{cor:BunoProd}, the composition $\iota_I i'$ extends to a morphism $k\colon F \to {C}^{\times(I \cup J)}$, i.e.\ $\iota_I i' = kj$. One readily checks that this choice of $k$ in the matrix at the middle vertical arrow makes the diagram commutative and that, by the Four Lemma, the middle arrow is a monomorphism. Thus, $F\in \mathrm{Cogen}({C})$, as required. Finally, note that the canonical morphism $\bigoplus_{i\in I} F_i \to \prod_{i\in I} F_i$ from a direct sum to a product is injective in any Grothendieck category, since it is the direct limit of the split monomorphisms $\bigoplus_{i\in J} F_i \simeq \prod_{i\in J} F_i \to \prod_{i\in I} F_i$, where $J$ runs over finite subsets of $I$. It follows that any torsion-free class in a Grothendieck category is closed under taking direct sums. \end{proof} Now we aim at giving a homological characterization of cotilting objects along the lines of \cite{Colpi,ColpiTrlifaj2}. However, some care is necessary since products need not be exact in $\mathcal{A}$. We first note that the injective dimension of a cotilting object $C$ is at most one. Indeed, this is a consequence of the following proposition applied to $\mathcal{F} = \mathrm{Cogen}({C}) = {^\perp{C}}.$ \begin{prop}\label{prop:cotilting-injdim1} Let $\mathcal{A}$ be a Grothendieck category and $\mathcal{F}$ be a torsion-free class in $\mathcal{A}$ which contains a generator. Then $\mathrm{injdim}\, {C}\leq 1$ for any $C\in\mathcal{F}^\perp$. \end{prop} \begin{proof} Let ${G}$ be an object of $\mathcal{A}$. We show that $\mathrm{Ext}_{\mathcal{A}}^2({G}, {C})=0$ by showing that every $2$-fold extension of ${G}$ by ${C}$ represents the trivial class of $\mathrm{Ext}^2_{\mathcal{A}}({G}, {C})$. Consider a $2$-fold extension \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} \xi: &0 \ar[r] & {C} \ar[r]& {E}_2 \ar[r]& {E}_1 \ar[r, "\alpha"] & {G} \ar[r] & 0 \;.& \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent Since $\mathcal{F}$ contains a generator and is closed under direct sums by Lemma~\ref{lem:cotilting-TF}, there is an object ${F}_1 \in \mathcal{F}$ and an epimorphism $\varepsilon\colon{F}_1 \rightarrow {E}_1$. If we denote $\beta=\alpha \varepsilon$ and consider the pullback of the projection $\pi\colon E_2 \to \ker\alpha$ along the map $\ker\beta\to\ker\alpha$ induced by $\varepsilon$, we obtain the following commutative diagram with exact rows: \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd}[column sep=small, row sep=tiny] \xi:&0 \ar[rr]&&{C} \ar[rr]&& {E}_2 \ar[rr] \ar[dr, two heads, "\pi"'] && {E}_1 \ar[rr, "\alpha"] && {G} \ar[rr] && 0 \\ &&& && & \ker\alpha \ar[ru, hook] && & & \\ \xi':&0 \ar[rr]&&{C} \ar[uu, equal] \ar[rr, "\gamma"]&& {F}_2 \ar[rr]\ar[rd, "\pi''"', two heads] \ar[uu] && {F}_1 \ar[uu, "\varepsilon", two heads] \ar[rr, "\beta"] && {G} \ar[rr] \ar[uu, equal] && 0 \\ &&& && & \ker\beta \ar[ru, hook]\ar[uu, "\varepsilon'", crossing over, near start] && & & \end{tikzcd} \end{adjustbox} \vspace{0.15cm} That is, the $2$-extensions $\xi$ and $\xi'$ represent the same element of $\mathrm{Ext}^2_\mathcal{A}(G, C)$. Now it is enough to show that $\xi'$ is trivial by showing that $\gamma$ is split monic. This, however, immediately follows from the fact that $\ker\beta \in \mathcal{F} \subseteq {^\perp C}$ since $\ker\beta$ is a subobject of $F_1$. \end{proof} Suppose now that $C\in\mathcal{A}$ is a cotilting object. Then, by Corollary~\ref{cor:BunoProd}, $C^{\times I}$ is also a cotilting object associated with the same cotilting class. In particular, each $C' \in \mathrm{Prod}(C)$ is of injective dimension at most one, and as such admits an injective resolution \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r] & {C}' \ar[r]& {E}^0 \ar[r]& {E}^1 \ar[r] & 0 \;.& \end{tikzcd} \end{adjustbox} \vspace{0.15cm} We will show that, on the other hand, each injective object admits a dual resolution in terms of objects of $\mathrm{Prod}{(C)}$. In order to do so, we need a lemma, which in essence generalizes \cite[Proposition~1.8]{Colpi} to our abstract setting. \begin{lem}\label{lem:cogen=copres} Let ${C}$ be an object of $\mathcal{A}$ satisfying ${^{\perp}}{C}=\mathrm{Cogen}({C})$. Assume that ${K} \in \mathrm{Cogen}({C})$. Then there is a short exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r]& {K} \ar[r]& {C}^{\times X} \ar[r] & {L} \ar[r] & 0 \;, \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent where $X$ is a set and ${L} \in \mathrm{Cogen}({C})$. \end{lem} \begin{proof} We put $X=\hom_{\mathcal{A}}({K}, {C})$ and consider the diagonal map $\Delta\colon {K} \rightarrow {C}^{\times X}.$ That is, $\Delta$ is given by $\pi_{\chi} \Delta = \chi,\;\; \chi \in X,$ and it is injective since ${K} \in \mathrm{Cogen}({C}).$ Thus, we obtain a short exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r]& {K} \ar[r, "\Delta"]& {C}^{\times X} \ar[r] & {L} \ar[r] & 0 \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent and it remains to check that ${L} \in \mathrm{Cogen}({C})={^{\perp}}{C}$. Applying $\hom_{\mathcal{A}}(-, {C})$, we obtain a long exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} \cdots\; \hom_{\mathcal{A}}({C}^{\times X}, {C}) \ar[r, "{-\circ\Delta}"] & \hom_{\mathcal{A}}({K}, {C}) \ar[r] &\mathrm{Ext}_{\mathcal{A}}^1({L}, {C}) \ar[r] & \mathrm{Ext}_{\mathcal{A}}^1({C}^{\times X}, {C}) \;\cdots\,. \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent The map $-\circ \Delta=\hom_{\mathcal{A}}(\Delta, {C})$ is clearly surjective from construction and $\mathrm{Ext}^{1}_{\mathcal{A}}({C}^{\times X}, {C})=0$. Thus, $\mathrm{Ext}_{\mathcal{A}}^1({L}, {C})=0$, which concludes the proof. \end{proof} \begin{lem} \label{lem:prod-C} Given a cotilting object $C$ of $\mathcal{A},$ we have that $$ \mathrm{Cogen}({C}) \cap \mathrm{Cogen}({C})^\perp=\mathrm{Prod}({C}).$$ \end{lem} \begin{proof} To show the inclusion `$\supseteq$', note that $\mathrm{Prod}({C}) \subseteq \mathrm{Cogen}({C})$ and ${C} \in (^\perp {C})^\perp=\mathrm{Cogen}({C})^\perp$, using Definition~\ref{def:cotilting}. It follows that $\mathrm{Prod}({C}) \subseteq \mathrm{Cogen}({C})^\perp$ since $\mathrm{Cogen}({C})^\perp$ is closed under direct summands and direct products by Corollary~\ref{prop:ExtTriv}. Conversely, given ${P} \in \mathrm{Cogen}({C}) \cap \mathrm{Cogen}({C})^\perp$, by Lemma~\ref{lem:cogen=copres} we obtain a short exact sequence $0 \to P \to {C}^{\times X} \to L \to 0$ with $L\in\mathrm{Cogen}({C})$. Since ${P} \in \mathrm{Cogen}({C})^\perp$, the sequence splits, showing that ${P} \in \mathrm{Prod}({C})$ and thus finishing the proof. \end{proof} \begin{prop} \label{prop:condition-C3} Let ${C}$ be a $1$-cotilting object of $\mathcal{A}$. Given an injective object ${W}$ (an injective cogenerator for $\mathcal{A}$ in particular), there is a short exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r]& {C}_1 \ar[r]& {C}_0 \ar[r] & {W} \ar[r] & 0 \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent with ${C}_0, {C}_1 \in \mathrm{Prod}({C})$. \end{prop} \begin{proof} The argument is very similar to the one used in \cite{ColpiTrlifaj} for modules, with necessary modifications due to the fact that we do not have exact products and enough projective objects. Since ${^{\perp}}{C}=\mathrm{Cogen}({C})$ is generating, one can consider an epimorphism $e':{F}\twoheadrightarrow {W}$, where ${F} \in \mathrm{Cogen}({C})$. By definition of $\mathrm{Cogen}({C})$, there is a monomorphism $\iota:{F}\hookrightarrow {C}^{\times I}$ for some set $I$. By injectivity of ${W}$, $e'$ extends along $\iota$ to a morphism $e: {C}^{\times I}\twoheadrightarrow {W}$. Clearly $e$ is an epimorphism as well. Thus, we have a short exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r] & {K} \ar[r, "i"]& {C}^{\times I} \ar[r, "e"] & {W} \ar[r]& 0, \end{tikzcd} \end{adjustbox} \vspace{0.15cm} and, obviously, ${K} \in \mathrm{Cogen}({C})$. By Lemma~\ref{lem:cogen=copres} there is a short exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r] & {K} \ar[r, "j"] & {C}^{\times J} \ar[r] & {L} \ar[r]& 0 \end{tikzcd} \end{adjustbox} \vspace{0.15cm} for some set $J$ and some ${L} \in \mathrm{Cogen}({C}).$ Consider the pushout ${P}$ of $i$ and $j$, which gives rise to a commutative diagram \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} & 0 \ar[d] & 0 \ar[d] & & \\ 0 \ar[r]& {K} \ar[r, "i"] \ar[d, "j"]& {C}^{\times I} \ar[r, "e"] \ar[d] & {W} \ar[r] \ar[d, equal] & 0 \\ 0 \ar[r]& {C}^{\times J} \ar[r] \ar[d]& {P} \ar[r] \ar[d] & {W} \ar[r] & 0 \\ & {L} \ar[r, equal] \ar[d] & {L} \ar[d] & & \\ & 0 & 0 & & \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent with exact rows and columns. Observe that since ${L}, {C}^{\times I} \in \mathrm{Cogen}({C})$ and $\mathrm{Cogen}({C})={^\perp}{C}$ is closed under extensions, we have ${P} \in \mathrm{Cogen}({C})$. Since, on the other hand, ${C}^{\times J}, W\in\mathrm{Cogen}({C})^\perp=({^\perp}{C})^\perp$, we also have ${P} \in \mathrm{Cogen}({C})^\perp.$ Thus, ${P} \in \mathrm{Prod}({C})$ by Lemma~\ref{lem:prod-C}, and the second row of the pushout diagram is the desired exact sequence from the statement of the proposition. \end{proof} Now we can characterize cotilting objects essentially in terms of the conclusions of Propositions~\ref{prop:cotilting-injdim1} and~\ref{prop:condition-C3}, and the fact that $\mathrm{Ext}^1_\mathcal{A}({C}^{\times I}, {C}) = 0$ for all sets $I$. \begin{thm}\label{thm:CotiltingChar} Let $\mathcal{A}$ be a Grothendieck category and $C$ be an object in $\mathcal{A}$. Then the following conditions are equivalent: \begin{enumerate}[(1)] \item\label{Char1}{$C$ is cotilting.} \item\label{Char2}{$C$ satisfies the following three conditions: \begin{enumerate}[(C1)] \item\label{C1}{$\mathrm{injdim}\, C \leq 1$.} \item\label{C2}{$\mathrm{Ext}_\mathcal{A}^{1}(C^{\times I}, C)=0$ for every set $I$.} \item\label{C3}{For every injective cogenerator $W$, there is an exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r] & C_1 \ar[r] & C_0 \ar[r] & W \ar[r] & 0 \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent where $C_0, C_1 \in \mathrm{Prod}(C)$.} \end{enumerate}} \end{enumerate} \end{thm} \begin{proof} The implication $\hyperref[Char1]{(\ref*{Char1})} \Rightarrow \hyperref[Char2]{(\ref*{Char2})}$ is clear from Propositions~\ref{prop:cotilting-injdim1} and~\ref{prop:condition-C3}. Suppose conversely that $C$ satisfies conditions \hyperref[C1]{(C1)}--\hyperref[C3]{(C3)}. By \hyperref[C2]{(C2)} we have $\mathrm{Prod}({C}) \subseteq {^\perp}{C}$ and, by \hyperref[C1]{(C1)}, ${^\perp}{C}$ is closed under subobjects. Thus ${\mathrm{Cogen}({C}) \subseteq {^\perp}{C}}$. In order to prove the inclusion $\mathrm{Cogen}({C}) \supseteq {^\perp}{C}$, note that we already know by Lemma~\ref{lem:cotilting-TF} that $\mathrm{Cogen}({C})$ is a torsion-free class of a torsion pair in $\mathcal{A}$. Consider any object $A \in {^{\perp}}{C}$ and the short exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r]& {T} \ar[r] & {A} \ar[r] & {F} \ar[r] & 0 \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent with ${F} \in \mathrm{Cogen}({C})$ and ${T} \in \ker \hom_{\mathcal{A}}(-, {C}).$ In order to prove that ${A} \in \mathrm{Cogen}({C})$, it suffices to show that ${T}=0$. To this end, consider an injective cogenerator ${W} \in \mathcal{A}$ and the short exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r]& {C}_1 \ar[r] & {C}_0 \ar[r] & {W} \ar[r] & 0 \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent from axiom \hyperref[C3]{(C3)}. Applying $\hom_{\mathcal{A}}({T}, -)$, we obtain an exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} \cdots \ar[r]& \hom_{\mathcal{A}}({T}, {C}_0) \ar[r] & \hom_{\mathcal{A}}({T}, {W}) \ar[r] & \mathrm{Ext}^1_{\mathcal{A}}({T}, {C}_1) \ar[r] & \cdots\,, \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent where $\hom_{\mathcal{A}}({T}, {C}_0)=0$, and by the fact that ${^\perp}{C} = {^\perp}{\mathrm{Prod}({C})}$ is closed under subobjects, also $\mathrm{Ext}^1_{\mathcal{A}}({T}, {C}_1)=0$. It follows that $\hom_{\mathcal{A}}({T}, {W})=0$ and, since ${W}$ is a cogenerator, also ${T}=0$. This concludes the proof of the inclusion. It remains to observe that $\mathrm{Cogen}({C}) = {^\perp}{C}$ is generating. Consider any object $G \in \mathcal{A}$ and an injective cogenerator $W$ admitting a monomorphism $\iota:G \hookrightarrow W$ (if $U$ is any injective cogenerator, then there is a monomorphism $G \hookrightarrow U^{\times I}$ for some set $I$ and we can take $W=U^{\times I}$). By \hyperref[C3]{(C3)}, we may consider an exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r] & C_1 \ar[r] & C_0 \ar[r, "\pi"] & W \ar[r] & 0 \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent with $C_0, C_1 \in \mathrm{Prod}({C})$, and take the pullback of $\pi$ along $\iota$, \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r] & C_1 \ar[r] & C_0 \ar[r, "\pi"] & W \ar[r] & 0 \\ 0 \ar[r] & C_1 \ar[r] \ar[u, equal] & P \ar[r, "\pi'"] \ar[u, "\iota'", hook] & G \ar[r] \ar[u, "\iota", hook] & 0\,. \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent This shows that $G$ is an epimorphic image of $P$ and $P \in \mathrm{Cogen}({C})$. \end{proof} We conclude the section by showing that products of copies of a cotilting object are homologically well-behaved. This implies that the definition of a $1$-cotilting object from \cite[Definition 8]{NicolasSaorinZvonareva} or \cite[Definition 2.1]{FiorotMattielloSaorin} for more general abelian categories specializes precisely to our definition in the case of Grothendieck categories. Strictly speaking, the latter references give formally dual definitions of so-called tilting objects, but it is remarked in both of the papers that their results apply to the dual concept as well. In particular, we can use abstract results on the existence of derived equivalences from \cite{NicolasSaorinZvonareva,FiorotMattielloSaorin} (this topic will be discussed in Section~\ref{sec:purity} as well). We in fact prove a more general result which implies that a Grothendieck category may have interesting full subcategories where products are exact (see e.g.\ Example~\ref{example:P1VsKronecker} below). \begin{prop}\label{prop:ExactProd} Let $\mathcal{A}$ be a Grothendieck category and $\mathcal{F}\subseteq\mathcal{A}$ torsion-free class which contains a generator. Given any collection of objects $B_i,$ $i \in I,$ from $\mathcal{F}^\perp$ and any $n>0$, the $n$-th right derived functor of product, $\mathbf{R}^n\prod_{i \in I} B_i,$ vanishes. \end{prop} \begin{proof} First note that, by Proposition~\ref{prop:cotilting-injdim1}, we obtain that $\mathrm{injdim}\, {B_i}\leq 1$ for all $i\in I.$ That is, $\mathbf{R}^n\prod_{i \in I} B_i = 0$ for all $n>1$. Now fix a generator $G\in \mathcal{F}$ and for each $i\in I$, fix an injective resolution \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r] & B_i \ar[r]& E^0_i \ar[r, "\rho_i"]& E^1_i \ar[r] & 0\;. \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent Then $\hom_\mathcal{A}(G, \rho_i)$ is surjective by the assumption and so is $\hom_\mathcal{A}(G, \prod_{i\in I}\rho_i) \simeq \prod_{i\in I}\hom_\mathcal{A}(G, \rho_i)$ since products are exact in the category of abelian groups. As $G$ is a generator for $\mathcal{A}$, it follows that $\prod_i \rho_i\colon \prod_i E^0_i \to \prod_i E^1_i$ is an epimorphism and $\mathbf{R}^1\prod_{i \in I} B_i = \mathrm{Coker}\, \prod_i \rho_i = 0.$ \end{proof} \begin{cor}\label{cor:ExactProdOfC} Let and $C \in \mathcal{A}$ be a cotilting object. Then a product of copies of $C$ in $\mathcal{A}$ coincides with the corresponding product of copies of $C$ in the derived category $\mathsf{D}(\mathcal{A})$. \end{cor} \begin{proof} We apply the previous proposition to $\mathcal{F} = \mathrm{Cogen}({C})$. \end{proof} \section{Pure-injectivity of cotilting objects} \label{sec:purity} Here we focus on a more advanced aspect of cotilting objects in Grothendieck categories. A key ingredient of the corresponding theory for modules is that all cotilting modules are pure-injective, \cite{BazzoniPureInj}. We will prove an analogous result for cotilting objects in Grothendieck categories. This will allow us to characterize which torsion-free classes are associated with cotilting objects. As a consequence, we recover a first version of a classification for locally Noetherian Grothendieck categories from~\cite{BuanKrause-cotilting,ParraSaorin} and, in the next section, we prove that a cotilting object in a Grothendieck category induces a derived equivalence to another abelian category which is again a Grothendieck category. The first obstacle on the way is that there seems to be no consensus on what the definition of a pure-injective object is for a general Grothendieck category (the definition using pure exact sequences does not apply for a Grothendieck category need not be locally finitely presentable). We use as the definition a characterization of pure-injectivity for modules from~\cite[Theorem 7.1~(vi)]{JensenLenzing}. \begin{deff}\label{def:PureInj} An object $C$ in a Grothendieck category $\mathcal{A}$ is \emph{pure-injective} if, for each index set $I$, the summing map $\Sigma\colon C^{\oplus I} \to C$ (whose all components $\Sigma \circ \iota_i\colon C \to C,$ $i\in I,$ are the identity maps on $C$) extends to a homomorphism $\overline{\Sigma}\colon C^{\times I} \to C$. \end{deff} Although this is a rather elementary intrinsic definition of pure-injectivity, it will be useful to give a characterization in terms of pure-injectivity of certain modules. We will do so via the Gabriel-Popescu theorem, a version of which we first recall. \begin{prop}[{\cite[\S X.4]{Stenstrom}}] \label{prop:GabrielPopescu} Let $\mathcal{A}$ be a Grothendieck category with a generator $G$ and put $R = \mathrm{End}_\mathcal{A}(G)$. Then the functor $H = \hom_\mathcal{A}(G,-)\colon \mathcal{A} \to \mathsf{Mod\textnormal{\textsf{-}}} R$ is fully faithful and has an exact left adjoint $T$, \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} \mathsf{Mod\textnormal{\textsf{-}}} R \ar[rr, bend left, start anchor=north east, end anchor=north west, "T"] & \perp & \mathcal{A}\;. \ar[ll, bend left, start anchor=south west, end anchor=south east, hook, "H"] \end{tikzcd} \end{adjustbox} \end{prop} In particular, the adjunction counit $\varepsilon\colon TH\to 1_\mathcal{A}$ is a natural equivalence and $H$ identifies $\mathcal{A}$ with an extension closed reflective subcategory of $\mathsf{Mod\textnormal{\textsf{-}}} R$, where the adjunction unit $\eta\colon 1_{\mathsf{Mod\textnormal{\textsf{-}}} R} \to HT$ provides the reflections. Clearly, $T$ preserves all colimits and finite limits in $\mathcal{A}$, but it need not preserve infinite products. However, the following easy observation shows that it does preserve at least certain products. \begin{lem}\label{lem:GabrielPopescuProducts} Suppose that we are in the situation of Proposition~\ref{prop:GabrielPopescu}. If $M_i,$ $i\in I,$ is a family of $R$-modules in the essential image of $H$, then the canonical morphism $T(\prod_i M_i) \to \prod_i T(M_i)$ is an isomorphism. \end{lem} \begin{proof} Let $\mathrm{Im}\, H\subseteq \mathsf{Mod\textnormal{\textsf{-}}} R$ denote the essential image of $H$. The lemma follows from the fact that $T$ is an inverse of the category equivalence $H\colon \mathcal{A} \to \mathrm{Im}\, H$ and that products in $\mathrm{Im}\, H$ are also products in $\mathsf{Mod\textnormal{\textsf{-}}} R$. \end{proof} Now we can characterize pure-injectivity as follows. \begin{prop}\label{prop:PureInjChar} Let $\mathcal{A}$ be a Grothendieck category and $C \in \mathcal{A}.$ Then the following are equivalent: \begin{enumerate}[(1)] \item\label{PInj1}{$C$ is pure-injective in $\mathcal{A}$.} \item\label{PInj2}{There is a generator $G\in\mathcal{A}$ such that $\hom_\mathcal{A}(G, C)$ is a pure-injective right $\mathrm{End}_\mathcal{A}(G)$-module.} \item\label{PInj3}{$\hom_\mathcal{A}(G, C)$ is a pure-injective $\mathrm{End}_\mathcal{A}(G)$-module for each generator $G\in\mathcal{A}$.} \end{enumerate} \end{prop} \begin{proof} Let $G\in\mathcal{A}$ be a generator of $\mathcal{A}$, $R=\mathrm{End}_\mathcal{A}(G)$, and consider the fully faithful functor $H=\hom_\mathcal{A}(G,-)\colon \mathcal{A} \to \mathsf{Mod\textnormal{\textsf{-}}}{R}$. If $I$ is any set and we denote $M=H(C)$, we have a commutative square \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} M^{\oplus I} \ar[r] \ar[d, "\eta'"]& M^{\times I}\ar[d, "\simeq"] \\ H(C^{\oplus I}) \ar[r]& H(C^{\times I})\;. \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent The horizontal morphisms are the canonical ones, while the vertical ones are the compositions of the unit of the adjunction with the isomorphisms $HT(M^{\oplus I}) \to H((TM)^{\oplus I}) \cong H(C^{\oplus I})$ and $HT(M^{\times I}) \to H((TM)^{\times I}) \cong H(C^{\times I})$, respectively. In particular, $\eta'$ is an $\mathrm{Im}\, H$-reflection and $\hom_\mathcal{A}(\eta',M)$ is an isomorphism. Now $\Sigma \in \hom_\mathcal{A}(C^{\oplus I}, C)$ factorizes through $C^{\oplus I} \hookrightarrow C^{\times I}$ if and only if $H(\Sigma)$ factorizes through the lower horizontal map of the square if and only if the summing map $\Sigma'\colon M^{\oplus I}\to M$ factorizes through the upper horizontal map of the square. The latter condition characterizes pure-injectivity of $M$ as an $R$-module by~\cite[Theorem 7.1~(vi)]{JensenLenzing} and the argument clearly does not depend on the choice of the generator $G$. \end{proof} Our next concern is to prove that cotilting classes are closed under taking direct limits. We first introduce a special kind of direct limits. \begin{deff}\label{def:ReducedProd} Let $I$ be a set, $(F_i\mid i\in I)$ be a family of objects of a Grothendieck category $\mathcal{A}$ and $\kappa$ be an infinite cardinal number. A \emph{$\kappa$-bounded product} $\prod_{i \in I}^{<\kappa} F_i$ is defined as the direct limit $\varinjlim_{J\subseteq I} \prod_{j\in J} F_j,$ where $J$ runs over all subsets of $I$ of cardinality $<\kappa$ and the maps in the direct system are the canonical split embeddings $\prod_{j\in J} F_j \hookrightarrow \prod_{j\in J'} F_j$ for each $J\subseteq J'\subseteq I$. Clearly, the bounded product $\prod_{i \in I}^{<\kappa} F_i$ canonically embeds into the usual product $\prod_{i\in I} F_i$. The factor $\prod_{i\in I} F_i/\prod_{i\in I}^{<\kappa} F_i$ is called the \emph{$\kappa$-reduced product}. When all $F_\alpha$ are equal to a single object $F$, we will speak of \emph{$\kappa$-bounded} and \emph{$\kappa$-reduced powers} of $F$, respectively. If $\kappa$ is clear from the context, we will denote these by $F^{\boxtimes I}$ and $F^{\times I}/F^{\boxtimes I}$, respectively. \end{deff} If $\kappa = \aleph_0$, the bounded product $\prod_{i\in I}^{<\aleph_0} F_i$ coincides with the usual direct sum $\bigoplus_{i\in I} F_i$. If $\mathcal{A}$ is a module category, the bounded product $\prod_{i \in I}^{<\kappa} F_i$ can be described as the submodule of $\prod_{i\in I} F_i$ formed by the elements with $<\kappa$ non-zero components. We will employ the following relation between reduced products and direct limits which is essentially a special case of~\cite[Theorem 3.3.2]{Prest}. \begin{prop}\label{prop:ReducedProdToDirectLim} Let $\mathcal{A}$ be a Grothendieck category, $\kappa$ be an infinite regular cardinal, $(F_\alpha, f_{\alpha\beta}\colon F_\alpha\to F_\beta \mid \alpha<\beta<\kappa)$ be a direct system in $\mathcal{A}$ indexed by $\kappa$ and put $F=\prod_{\alpha<\kappa} F_\alpha$. Then there is an embedding into the $\kappa$-reduced product $F$, \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} \varinjlim\limits_{\alpha<\kappa} F_\alpha \ar[r, hook]& \prod\limits_{\alpha<\kappa} F^{\times \kappa}/F^{\boxtimes \kappa}. \end{tikzcd} \end{adjustbox} \end{prop} \begin{proof} We can transfer the problem to a module category thanks to Proposition~\ref{prop:GabrielPopescu}. Let $M_\alpha = H(F_\alpha)$ for each $\alpha<\kappa$ and $M = H(F) \simeq \prod_{\alpha<\kappa} M_\alpha$. Then we have embeddings (even pure embeddings) \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} \varinjlim\limits_{\alpha<\kappa} M_\alpha \ar[r, hook]& \prod\limits_{\alpha<\kappa} M_\alpha/{\prod\limits_{\alpha<\kappa}}\!\!^{<\kappa} M_\alpha \ar[r, hook]& M^{\times \kappa}/M^{\boxtimes \kappa} \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent in $\mathsf{Mod\textnormal{\textsf{-}}}{R}$, where the first one is obtained from (the proof of) \cite[Theorem 3.3.2]{Prest} and the second one is induced by the product map $\prod_{\alpha<\kappa}\iota_\alpha\colon \prod_{\alpha<\kappa} M_\alpha \hookrightarrow M^{\times \kappa}$ of the split embeddings $\iota_\alpha\colon M_\alpha \hookrightarrow M$. Since $T\colon \mathsf{Mod\textnormal{\textsf{-}}}{R} \to \mathcal{A}$ is exact, preserves all colimits and, by Lemma~\ref{lem:GabrielPopescuProducts}, also products of copies the module $M$, we obtain the embedding from the statement simply by an application of $T$. \end{proof} \begin{cor}\label{cor:ReducedProdToDirectLim} Given a Grothendieck category $\mathcal{A}$ and a class of objects $\mathcal{F}\subseteq\mathcal{A}$ closed under products and subobjects, $\mathcal{F}$ is closed under taking direct limits if and only if the following holds: Given an infinite regular cardinal $\kappa$ and an object $F \in \mathcal{F}$, the $\kappa$-reduced power $M^{\times \kappa}/M^{\boxtimes \kappa}$ belongs to $\mathcal{F}$. \end{cor} \begin{proof} The `if' part is clear since we can express the $\kappa$-reduced power of $F$ as $\varinjlim_{\alpha<\kappa} F^{\times (\kappa\setminus\alpha)}$. Conversely, if $\mathcal{F}$ is closed under subobjects, products and the reduced powers as above, Proposition~\ref{prop:ReducedProdToDirectLim} implies that it is also closed under direct limits of well ordered chains indexed by infinite regular cardinals. Then $\mathcal{F}$ is in fact closed under direct limits of all well-ordered chains, for any such chain indexed by a limit ordinal $\lambda$ has a cofinal subchain indexed by $\kappa = \operatorname{cf}(\lambda)$, the cofinality of $\lambda$, and $\kappa$ is known to be an infinite regular cardinal. It follows from (the proof of) \cite[Corollary 1.7]{AdamekRosicky} that $\mathcal{F}$ is closed under all direct limits. \end{proof} Since any cotilting class $\mathcal{F}=\mathrm{Cogen}({C})$ is a torsion-free class, we have reduced the original problem to proving that cotilting classes are closed under certain reduced powers. To that end, we use the following result which comes from \cite{BazzoniPureInj} for the case $\kappa=\aleph_0$ and from \cite{StPureInj} for a general $\kappa$. \begin{prop} \label{prop:CountingSequence} Let $\mathcal{A}$ be a Grothendieck category, $F\in \mathcal{A}$ be an object and $\kappa$ be an infinite regular cardinal. Then there exist arbitrarily large cardinal numbers $\lambda$ such that there is an exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r]& F^{\boxtimes\lambda} \ar[r]& E \ar[r]& P^{\oplus 2^\lambda} \ar[r]& 0\;, \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent where $F^{\boxtimes\lambda}$ is the $\kappa$-bounded product of $\lambda$ copies of $F$, $F^{\boxtimes \lambda} \subseteq E \subseteq F^{\times \lambda}$, and $P = F^{\times\kappa}/F^{\boxtimes\kappa}$ is the $\kappa$-reduced product of $\kappa$ copies of $F$. \end{prop} \begin{proof} We again use Proposition~\ref{prop:GabrielPopescu} and put $M=H(F).$ A construction of such an exact sequence for $M\in\mathsf{Mod\textnormal{\textsf{-}}}{R}$ in place of $F$ is given in the proof of \cite[Lemma 7]{StPureInj}, and then we apply $T$ to transfer this exact sequence back to $\mathcal{A}$. \end{proof} Now we can prove the main result of the section. \begin{thm}\label{thm:CotiltingPureInjective} Let $\mathcal{A}$ be a Grothendieck category, $C\in\mathcal{A}$ be a cotilting object and $\mathcal{F} = \mathrm{Cogen}({C}) = {^\perp C}$ the associated cotilting class (Definition~\ref{def:cotilting}). Then $C$ is pure-injective and $\mathcal{F}$ is closed under direct limits in $\mathcal{A}$. \end{thm} \begin{proof} Once we prove that $\mathcal{F}$ is closed under direct limits, the pure-injectivity of $C$ will follow. Indeed, in that case \[ C^{\times I}/C^{\oplus I} = \varinjlim_{F\subseteq I} C^{\times (I\setminus F)} \in \mathcal{F}, \] where $F$ runs over all finite subsets of $I$, and hence every morphism $C^{\oplus I} \to C$ extends to a morphism $C^{\times I} \to C$. In view of Corollary~\ref{cor:ReducedProdToDirectLim}, we only need to prove that given $F\in\mathcal{F}$, we have that $P = F^{\times\kappa}/F^{\boxtimes\kappa}$, the $\kappa$-reduced product of $\kappa$ copies of $F$, lies in $\mathcal{F}$. To this end, we will use a variant of the argument from~\cite[Proposition 2.5]{BazzoniPureInj} and \cite[Lemma 7]{StPureInj} (the method can be traced back to \cite{Hunter}). Let $\lambda\ge\kappa$ be an infinite cardinal number for which there exists an exact sequence from Proposition~\ref{prop:CountingSequence} and such that $\lambda \ge \card{\hom_\mathcal{A}(F^{\times \mu}, C)}$ for each $\mu<\kappa$. Since each morphism $F^{\boxtimes \lambda} \to C$ is, by the universal property of the defining colimit, uniquely determined by its compositions with the embeddings $F^{\times J} \hookrightarrow F^{\boxtimes \lambda}$, where $J\subseteq \lambda$ and $\card{J}<\kappa$, and there are at most $\lambda^\kappa \le (2^\lambda)^\kappa = 2^{\lambda\times\kappa} = 2^\lambda$ such embeddings, we have \[ \card{\hom_\mathcal{A}(F^{\boxtimes \lambda}, C)} \le 2^\lambda \times \lambda = 2^\lambda. \] If we on the other hand apply $\hom_\mathcal{A}(-, C)$ to the exact sequence from Proposition~\ref{prop:CountingSequence} and use Remark~\ref{rem:ExtCoproductsOk}, we obtain an exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} \hom_\mathcal{A}(F^{\boxtimes \lambda}, C) \ar[r]& \mathrm{Ext}^1_\mathcal{A}(P, C)^{\times 2^\lambda} \ar[r]& \mathrm{Ext}^1_\mathcal{A}(E, C)\;. \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent Since $E \subseteq F^{\times \lambda} \in \mathcal{F}$, we have $\mathrm{Ext}^1_\mathcal{A}(E, C) = 0$, so the group $\mathrm{Ext}^1_\mathcal{A}(P, C)^{\times 2^\lambda}$ is an epimorphic image of $\hom_\mathcal{A}(F^{\boxtimes \lambda}, C)$. Now if $\mathrm{Ext}^1_\mathcal{A}(P, C) \ne 0$, we would have \[ \card{\mathrm{Ext}^1_\mathcal{A}(P, C)^{\times 2^\lambda}} \ge 2^{2^\lambda}, \] which is more than the cardinality of $\hom_\mathcal{A}(F^{\boxtimes \lambda}, C)$. Thus $\mathrm{Ext}^1_\mathcal{A}(P, C) = 0$ and $P = F^{\times\kappa}/F^{\boxtimes\kappa} \in \mathcal{F}$, as required. \end{proof} Our application of the latter theorem is the following description of which torsion-free classes are associated with cotilting objects. This (together with Theorem~\ref{thm:maintheorem} in the next section) extends \cite[Proposition 5.7]{ParraSaorin} to all Grothendieck categories. \begin{thm}\label{thm:CharTiltingClasses} Let $\mathcal{A}$ be a Grothendieck category. Then the following are equivalent for a full subcategory $\mathcal{F}\subseteq\mathcal{A}$: \begin{enumerate}[(1)] \item\label{CotiltChar1}{$\mathcal{F}$ is a cotilting class associated with a cotilting object.} \item\label{CotiltChar2}{$\mathcal{F}$ is a torsion-free class in $\mathcal{G}$ which contains a generator and is closed under direct limits.} \end{enumerate} \end{thm} The implication \hyperref[CotiltChar1]{(\ref*{CotiltChar1})}$\Rightarrow$\hyperref[CotiltChar2]{(\ref*{CotiltChar2})} is an immediate consequence of Theorem~\ref{thm:CotiltingPureInjective}. Our main technical tool to prove the other implication, and in particular to construct corresponding cotilting objects, is a result on covering classes in Grothendieck categories (this implication is in fact already included in \cite[Proposition 5.7]{ParraSaorin}, but we give a more direct construction of the cotilting objects which we later use in Section~\ref{sec:classif}). Given a class of objects $\mathcal{F}$ in a category $\mathcal{A}$ and $X\in\mathcal{A}$, an \emph{$\mathcal{F}$-precover} of $X$ is a morphism $f\colon F \to X$ such that $\hom_\mathcal{A}(F',f)$ is surjective for each $F'\in\mathcal{F}$. An $\mathcal{F}$-precover $f\colon F \to X$ is an \emph{$\mathcal{F}$-cover} provided that each endomorphism $g\colon F \to F$ such that $fg=f$ is necessarily an automorphism. The class $\mathcal{F}$ is called \emph{(pre)covering} if each $X\in\mathcal{A}$ has an $\mathcal{F}$-(pre)cover. Given a full subcategory $\mathcal{X}$ of a Grothendieck category $\mathcal{A}$, we will denote by $\underrightarrow{\mathrm{Lim}}\,\mathcal{X}$ the class of all direct limits of objects in $\mathcal{X}$. \begin{prop}[{\cite[Theorem~3.2]{ElBashir}}]\label{prop:ElBashir} Let $\mathcal{A}$ be a Grothendieck category and $\mathcal{F} \subseteq \mathcal{A}$ be a class of objects closed under direct sums and direct limits. Suppose that there is a set $\mathcal{S} \subseteq \mathcal{F}$ such that $\mathcal{F}=\underrightarrow{\mathrm{Lim}}\,\mathcal{S}$. Then $\mathcal{F}$ is a covering class. \end{prop} If $\mathcal{F}$ is extension closed and generating, we can use the following lemma which is originally due to Wakamatsu~\cite{Wakamatsu}. \begin{lem}[{\cite[Lemma 5.13]{G-T}}]\label{lem:Wakamatsu} Let $\mathcal{A}$ be an abelian category, $\mathcal{F}\subseteq\mathcal{A}$ be a generating class closed under extensions and let $f\colon F \to A$ be an $\mathcal{F}$-cover of an object $A \in \mathcal{A}$. Then $f$ is an epimorphism and $\mathrm{Ext}^1_\mathcal{A}(\mathcal{F}, \ker f) = 0$. \end{lem} \begin{proof}[Proof of Theorem~\ref{thm:CharTiltingClasses}] In view of Theorem~\ref{thm:CotiltingPureInjective}, it remains to prove that given a torsion-free class $\mathcal{F} \subseteq \mathcal{A}$ which contains a generator and is closed under direct limits, it is associated to a cotilting object $C \in \mathcal{A}$. Note that under these assumptions, there is a set $\mathcal{S} \subseteq \mathcal{F}$ such that $\mathcal{F}=\underrightarrow{\mathrm{Lim}}\,\mathcal{S}$. Indeed, fix a generator $G \in \mathcal{A}$ and let $\mathcal{S} \subseteq \mathcal{F}$ be the set of all quotients of finite coproducts of $G$ that are in $\mathcal{F}$. Then for $F \in \mathcal{F}$ and an epimorphism $G^{\oplus I} \rightarrow F$, lettting $F_{I_0}:=\Im (G^{\oplus I_0} \hookrightarrow G^{\oplus I} \rightarrow F)$ for each finite subset $I_0 \subseteq I$ yields a direct system of objects from $\mathcal{S}$ with $\varinjlim_{I_0}F_{I_0}=F$. Thus, $\mathcal{F} \subseteq \underrightarrow{\mathrm{Lim}}\, \mathcal{S}$, and the converse inclusion follows from $\mathcal{F}$ being closed under direct limits. The construction of the cotilting module is very similar to the one in~\cite[Theorem 15.22]{G-T}. Let $W$ be an injective cogenerator of $\mathcal{A}$ and consider an exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} \varepsilon\colon & 0 \ar[r] & C_1 \ar[r] & C_0 \ar[r, "\pi"] & W \ar[r] & 0\;, & \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent where $\pi$ is an $\mathcal{F}$-cover. Then $C_1 \in \mathcal{F}^\perp$ by Lemma~\ref{lem:Wakamatsu} and $C_0 \in \mathcal{F}^\perp$ as $\mathcal{F}^\perp$ is closed under extensions. We put $C = C_0 \oplus C_1$ and observe (using Corollary~\ref{cor:BunoProd}) that \[ \mathrm{Prod}({C}) \subseteq \mathcal{F}\cap\mathcal{F}^\perp \quad\text{ and }\quad \mathrm{Cogen}({C}) \subseteq \mathcal{F} \subseteq {^\perp C}\;. \] Suppose further that $A \in {^\perp C}$. We use the same method as in the proof of Theorem~\ref{thm:CotiltingChar} to show that $A \in \mathrm{Cogen}({C})$. Indeed, $\mathrm{Cogen}({C})$ is a torsion-free class in $\mathcal{A}$ by Lemma~\ref{lem:cotilting-TF}, so we have an exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r] & T \ar[r] & A \ar[r] & F \ar[r] & 0\; \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent with ${F} \in \mathrm{Cogen}({C})$ and ${T} \in \ker \hom_{\mathcal{A}}(-, {C}).$ Since also $\mathrm{injdim}\, C\le 1$ by Proposition~\ref{prop:cotilting-injdim1}, we also have $T \in {^\perp C}$ and application of $\hom_{\mathcal{A}}({T}, -)$ to the sequence $\varepsilon$ yields an exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 = \hom_{\mathcal{A}}({T}, {C}_0) \ar[r] & \hom_{\mathcal{A}}({T}, {W}) \ar[r] & \mathrm{Ext}^1_{\mathcal{A}}({T}, {C}_1) = 0 \;. \end{tikzcd} \end{adjustbox} \vspace{0.15cm} It follows that ${T}=0$ and $A \simeq F \in \mathrm{Cogen}{(C)}$. Thus, $\mathcal{F} = \mathrm{Cogen}({C}) = {^\perp C}$ and $C$ is a cotilting object. \end{proof} In the locally Noetherian case, we obtain a classification of cotilting torsion-free classes in a locally Noetherian category via torsion pairs of Noetherian objects, which recovers \cite[Corollary 5.13]{ParraSaorin}. An almost identical result appeared in \cite[Theorem 1.13]{BuanKrause-cotilting}, but with a somewhat different definition of a cotilting object (we were not able to recover the result without replacing (C3) from~\cite{BuanKrause-cotilting} by the slightly stronger condition in Theorem~\hyperref[C3]{\ref*{thm:CotiltingChar}~(\ref*{Char2})~(C3)}, but on the other hand we proved in Theorem~\ref{thm:CotiltingPureInjective} that condition (C4) from~\cite{BuanKrause-cotilting} was superfluous). A special case for module categories of (one-sided) Noetherian rings was also obtained in \cite[Proposition 2.6]{St1}. Geometric consequences for categories of quasi-coherent sheaves will be discussed later in Sections~\ref{sec:torsion} and~\ref{sec:classif}. \begin{thm}\label{thm:ClassificationViaTP} Let $\mathcal{A}$ be a locally Noetherian Grothendieck category and $\mathcal{A}_0$ be the full subcategory of Noetherian objects. Then torsion-free classes $\mathcal{F}$ in $\mathcal{A}$ associated to a cotilting object bijectively correspond to the torsion pairs $(\mathcal{T}_0, \mathcal{F}_0)$ in $\mathcal{A}_0$ for which $\mathcal{F}_0$ is a generating class (i.e.\ each object of $\mathcal{A}_0$ is a quotient of an object of $\mathcal{F}_0$). The correspondence is given by \[ \mathcal{F}\mapsto\mathcal{F}\cap\mathcal{A}_0 \quad\text{ and }\quad (\mathcal{T}_0, \mathcal{F}_0) \mapsto \underrightarrow{\mathrm{Lim}}\,\mathcal{F}_0. \] \end{thm} In order to prove the result, we first make precise the relation of torsion pairs in $\mathcal{A}_0$ to torsion pairs in $\mathcal{A}$. \begin{lem}\label{lem:TorsionPairsQcohvsCoh} Let $\mathcal{A}$ be a locally Noetherian category and let $\mathcal{A}_0$ be the full subcategory of Noetherian objects. \begin{enumerate}[(1)] \item\label{NLink1}{If $(\mathcal{T}, \mathcal{F})$ is a torsion pair in $\mathcal{A}$, then $(\mathcal{T}\cap \mathcal{A}_0, \mathcal{F}\cap \mathcal{A}_0)$ is a torsion pair in $\mathcal{A}_0$.} \item\label{NLink2}{If $(\mathcal{T}_0, \mathcal{F}_0)$ is a torsion pair in $\mathcal{A}_0$, then $(\underrightarrow{\mathrm{Lim}}\,\mathcal{T}_0, \underrightarrow{\mathrm{Lim}}\,\mathcal{F}_0)$ is a torsion pair in $\mathcal{A}$.} \end{enumerate} Moreover, these assignments yield a bijective correspondence between the class of all torsion pairs in $\mathcal{A}_0$, and the class of those torsion pairs $(\mathcal{T}, \mathcal{F})$ in $\mathcal{A}$ for which $\mathcal{F}$ is closed under direct limits. Furthermore, $\mathcal{F}_0$ is generating (in $\mathcal{A}_0$ or in $\mathcal{A}$) if and only if the corresponding class $\mathcal{F}$ contains a generator for $\mathcal{A}$. \end{lem} \begin{proof} Part~\hyperref[NLink1]{(\ref*{NLink1})} is clear since the torsion and the torsion-free part of a Noetherian object are both Noetherian. A proof of part~\hyperref[NLink2]{(\ref*{NLink2})} can be found in~\cite[\S4.4]{CBLocFinPres} (the distinction between direct limits here and filtered colimits used in~\cite{CBLocFinPres} is inessential thanks to~\cite[Theorem 1.5]{AdamekRosicky}). In order to prove that we have the bijective correspondence, observe first that $\mathcal{T}_0 = \mathcal{A}_0\cap \underrightarrow{\mathrm{Lim}}\,\mathcal{T}_0$ and $\mathcal{F}_0 = \mathcal{A}_0\cap \underrightarrow{\mathrm{Lim}}\,\mathcal{F}_0$ (see for instance ~\cite[\S4.1]{CBLocFinPres}). Note that the class $\underrightarrow{\mathrm{Lim}}\,\mathcal{F}_0$ can be described as $$\underrightarrow{\mathrm{Lim}}\,\mathcal{F}_0=\ker \hom_{\mathcal{A}}(\mathcal{T}_0, -)=\bigcap_{T \in \mathcal{T}_0}\ker \hom_{\mathcal{A}}(T, -).$$ Since $\mathcal{T}_0$ consists of Noetherian, hence finitely presented objects only, it follows that $\underrightarrow{\mathrm{Lim}}\,\mathcal{F}_0$ is always closed under taking direct limits. If, on the other hand, $(\mathcal{T}, \mathcal{F})$ is a torsion pair in $\mathcal{A}$ with $\mathcal{F}$ closed under direct limits and if $(\mathcal{T}_0, \mathcal{F}_0)$ is its restriction to $\mathcal{A}_0$, then clearly $\underrightarrow{\mathrm{Lim}}\,\mathcal{T}_0 \subseteq \mathcal{T}$ and $\underrightarrow{\mathrm{Lim}}\,\mathcal{F}_0 \subseteq \mathcal{F}$. It follows from part~\hyperref[NLink2]{(\ref*{NLink2})} and Lemma~\ref{lem:TorsionPairsInclusion} that $\underrightarrow{\mathrm{Lim}}\,\mathcal{T}_0 = \mathcal{T}$ and $\underrightarrow{\mathrm{Lim}}\,\mathcal{F}_0 = \mathcal{F}$. Finally, note that if $G \in \mathcal{F}$ is a generator, then the set of all noetherian subobjects of $G$ is a generating subset of $\mathcal{F}_0=\mathcal{F}\cap \mathcal{A}_0$, and conversely, if $\mathcal{F}_0$ is generating, $\mathcal{A}$ having a \emph{set} of noetherian generators implies that one can choose a set $\mathcal{S} \subseteq \mathcal{F}_0$ that is generating; then $\bigoplus_{X \in \mathcal{S}} X$ is a generator in $\mathcal{F}$. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:ClassificationViaTP}] Lemma~\ref{lem:TorsionPairsQcohvsCoh} implies that the assignment $\mathcal{F}\mapsto\mathcal{F}\cap\mathcal{A}_0$ from Theorem~\ref{thm:ClassificationViaTP} is injective since $\mathcal{F}$ can be reconstructed as $\underrightarrow{\mathrm{Lim}}\,(\mathcal{F} \cap \mathcal{A}_0)$. The surjectivity follows from Theorem~\ref{thm:CharTiltingClasses} together with Lemma~\ref{lem:TorsionPairsQcohvsCoh} since $\underrightarrow{\mathrm{Lim}}\,\mathcal{F}_0$ is always associated with a cotilting object (and $\mathcal{F}_0=\mathcal{A}_{0} \cap \underrightarrow{\mathrm{Lim}}\,\mathcal{F}_0$). \end{proof} \section{Derived equivalences} \label{sec:derived-equiv} Now we turn to derived equivalences. Most of the material in this section is rather standard nowadays, with one notable exception: we prove that a cotilting object in a Grothendieck category induces a derived equivalence to another Grothendieck category. This uses pure-injectivity of cotilting objects in a crucial way. We first summarize the essentials of tilting theory for torsion pairs. Given a torsion pair $(\mathcal{T}, \mathcal{F})$ in an abelian category $\mathcal{A}$, there is a procedure, worked out in \cite{HRS,BondalVanDenBergh} and also treated in~\cite{ColpiHeart,Noohi,StovicekKernerTrlifaj}, to construct another abelian category $\mathcal{H}$. \begin{prop}[{\cite[\S1.3]{HRS} and \cite[\S\S5.3--5.4]{BondalVanDenBergh}}] \label{prop:HRS-tilt} Let $\mathcal{A}$ be an abelian category, $(\mathcal{T}, \mathcal{F})$ a torsion pair in $\mathcal{A}$, $\mathsf{D}({\mathcal{A}})$ the (unbounded) derived category of $\mathcal{A}$ with the suspension functor $\Sigma$, and put \[ \mathcal{H} = \{ X\in\mathsf{D}(\mathcal{A}) \mid H^0(X)\in\mathcal{F}, H^{1}(X)\in\mathcal{T} \text{ and } H^i(X)=0 \text{ for } i \ne 0, 1 \}\;. \] Then $\mathcal{H}$ is itself an abelian category with a torsion pair $(\mathcal{F}, \Sigma^{-1}\mathcal{T})$. If, moreover, $\mathcal{F}$ is a generating class in $\mathcal{A}$ (i.e.\ each object of $\mathcal{A}$ is a quotient of an object of $\mathcal{F}$), then $\mathcal{F}$ is a cogenerating class in $\mathcal{H}$ and the embedding $\mathcal{H}\subseteq \mathsf{D}(\mathcal{A})$ extends to an exact equivalence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} \mathsf{D}(\mathcal{H}) \ar[r, "\simeq"]& \mathsf{D}(\mathcal{A})\;. \end{tikzcd} \end{adjustbox} \end{prop} \begin{deff}\label{def:HRS-tilt} The category $\mathcal{H}$ from the proposition is called \emph{tilted from $\mathcal{A}$} in the sense of Happel, Reiten and Smal\o{} (or \emph{HRS-tilted} for short). \end{deff} We also record a few standard facts which are very useful for computations in HRS-tilted categories. \begin{lem}\label{lem:HRS-exts} Let $\mathcal{A}$ be an abelian category, $(\mathcal{T}, \mathcal{F})$ a torsion pair such that $\mathcal{F}$ is generating, and $\mathcal{H}$ the HRS-tilted category. Given $T, T'\in \mathcal{T},$ $F, F'\in \mathcal{F}$ and $n\ge 0,$ we have isomorphisms \begin{enumerate}[(1)] \item\label{HRSExtTT}{$\mathrm{Ext}^n_\mathcal{H}(\Sigma^{-1}T, \Sigma^{-1}T') \simeq \mathrm{Ext}^n_\mathcal{A}(T, T'),$} \item\label{HRSExtFF}{$\mathrm{Ext}^n_\mathcal{H}(F, F') \simeq \mathrm{Ext}^n_\mathcal{A}(F, F'),$} \item\label{HRSExtTF}{$\mathrm{Ext}^n_\mathcal{H}(\Sigma^{-1}T, F') \simeq \mathrm{Ext}^{n+1}_\mathcal{A}(T, F')$ and} \item\label{HRSExtFT}{$\mathrm{Ext}^{n+1}_\mathcal{H}(F, \Sigma^{-1}T') \simeq \mathrm{Ext}^n_\mathcal{A}(F, T').$} \end{enumerate} Moreover, a sequence of the form $0 \to F \to F'' \to F' \to 0$ is exact in $\mathcal{A}$ if and only if it is exact in $\mathcal{H}$. \end{lem} \begin{proof} Using the derived equivalence $\mathsf{D}(\mathcal{H}) \simeq \mathsf{D}(\mathcal{A})$ from Proposition~\ref{prop:HRS-tilt}, we have isomorphisms \begin{multline*} \mathrm{Ext}^n_\mathcal{H}(\Sigma^{-1}T, \Sigma^{-1}T') \simeq \hom_{\mathsf{D}(\mathcal{H})}(\Sigma^{-1}T, \Sigma^{n-1}T') \simeq \\ \simeq \hom_{\mathsf{D}(\mathcal{A})}(T, \Sigma^{n}T') \simeq \mathrm{Ext}^n_\mathcal{A}(T, T')\;. \end{multline*} The other isomorphisms are proved in a similar way. If $\varepsilon\colon 0 \to F \overset{i}\to F'' \overset{p}\to F' \to 0$ is short exact either in $\mathcal{A}$ or in $\mathcal{H}$, we have $F'' \in \mathcal{F}$ and a triangle \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} F \ar[r, "i"]& F'' \ar[r, "p"]& F' \ar[r, "{[\varepsilon]}"]& \Sigma F \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent in $\mathsf{D}(\mathcal{A})$ or $\mathsf{D}(\mathcal{H})$, respectively. Thanks to the derived equivalence, we have the same triangle in the other derived category and, hence, the same short exact sequence in the other abelian category. \end{proof} Now we can make precise what role cotilting objects play in the HRS-tilted category (see \cite[proof of Proposition 5.7]{ParraSaorin} for analogous considerations and \cite[Proposition 3.8]{ColpiHeart} for a version of the result if $\mathcal{A}$ is a module category). \begin{prop}\label{prop:CotiltingVSInjective} Let $\mathcal{A}$ be a Grothendieck category, $(\mathcal{T}, \mathcal{F})$ a torsion pair with $\mathcal{F}$ generating, and $\mathcal{H}$ the HRS-tilted category. Then the following hold for an object $C \in \mathcal{H}$: \begin{enumerate} \item\label{HRSInjObj}{$C$ is injective in $\mathcal{H}$ if and only if $C \in \mathcal{F}$ and $\mathrm{Ext}^1_\mathcal{A}(\mathcal{F}, C) = 0$.} \item\label{HRSInjCog}{$C$ is an injective cogenerator of $\mathcal{H}$ if and only if $C$ is a cotilting object in $\mathcal{A}$ with $\mathcal{F}=\mathrm{Cogen}({C})$.} \end{enumerate} \end{prop} \begin{proof} \hyperref[HRSInjObj]{(\ref*{HRSInjObj})} If $C$ is injective in $\mathcal{H}$, it is a summand of an object in $\mathcal{F}$ since $\mathcal{F}$ is cogenerating in $\mathcal{H}$. In particular, $C\in\mathcal{F}$. Moreover, $C\in{\mathcal{F}^\perp}$ in $\mathcal{A}$ by Lemma~\hyperref[HRSExtFF]{\ref*{lem:HRS-exts}~(\ref*{HRSExtFF})}. Conversely, if $C\in \mathcal{F}\cap\mathcal{F}^\perp$ in $\mathcal{A}$, then the injective dimension of $C$ in $\mathcal{A}$ is at most one by Proposition~\ref{prop:cotilting-injdim1}. Moreover, \[ \mathrm{Ext}^1_\mathcal{H}(\mathcal{F}, C) = 0 \quad\text{ and }\quad \mathrm{Ext}^1_\mathcal{H}(\Sigma^{-1}\mathcal{T}, C) \simeq \mathrm{Ext}^2_\mathcal{A}(\mathcal{T}, C) = 0 \] by Lemma~\hyperref[HRSExtFF]{\ref*{lem:HRS-exts}~(\ref*{HRSExtFF})} and~\hyperref[HRSExtTF]{(\ref*{HRSExtTF})}. It follows that $C$ is injective in $\mathcal{H}$ since each $X\in\mathcal{H}$ is an extension in $\mathcal{H}$ of an object of $\mathcal{F}$ by an object of $\Sigma^{-1}\mathcal{T}$. \hyperref[HRSInjCog]{(\ref*{HRSInjCog})} Suppose first that $C\in\mathcal{A}$ is a cotilting object associated with $\mathcal{F}$. Since each product of copies of $C$ in $\mathcal{A}$ coincides with the corresponding product in $\mathsf{D}(\mathcal{A})\simeq \mathsf{D}(\mathcal{H})$ by Corollary~\ref{cor:ExactProdOfC}, we have $\mathrm{Prod}({C})\subseteq \mathcal{H}$. In particular, arbitrary products of copies of $C$ exist in $\mathcal{H}$ and agree with the ones in $\mathcal{A}$. Moreover, $\mathrm{Prod}({C})$ consists of injective objects by part~\hyperref[HRSInjObj]{(\ref*{HRSInjObj})} and each object of $\mathcal{F}$ is a subobject in $\mathcal{H}$ of a product of copies of $C$ by Lemmas~\ref{lem:cogen=copres} and~\ref{lem:HRS-exts}. Since $\mathcal{F}$ is itself a cogenerating class in $\mathcal{H}$, $C$ is an injective cogenerator of $\mathcal{H}$. Conversely, let $C\in\mathcal{H}$ be an injective cogenerator in $\mathcal{H}$. We know from part~\hyperref[HRSInjObj]{(\ref*{HRSInjObj})} that $C\in\mathcal{F}\cap\mathcal{F}^\perp$ in $\mathcal{A}$ and the injective dimension of $C$ in $\mathcal{A}$ is at most one. Since $\mathcal{F}$ is a torsion-free class in $\mathcal{A}$, we have $\mathrm{Cogen}({C}) \subseteq \mathcal{F} \subseteq {^\perp{C}}$ in $\mathcal{A}$. We first show that $\mathcal{F} = {^\perp{C}}$. Suppose now that $A \in {^\perp{C}}$ in $\mathcal{A}$ and consider an exact sequence $0 \to T \to A \to F \to 0$ induced by the torsion pair $(\mathcal{T}, \mathcal{F})$. Then $T \in {^\perp{C}}$ in $\mathcal{A}$ since $C$ has injective dimension at most one and \[ \hom_\mathcal{H}(\Sigma^{-1}T, C) \simeq \mathrm{Ext}^1_\mathcal{A}(T, C) = 0 \] thanks to Lemma~\hyperref[HRSExtTF]{\ref*{lem:HRS-exts}~(\ref*{HRSExtTF})}. Since $C$ is assumed to be a cogenerator in $\mathcal{H}$, we have $T = 0$ and $A\simeq F \in \mathcal{F}$. Finally, we show that also $\mathrm{Cogen}({C}) = \mathcal{F}$. It follows from Lemma~\ref{lem:cotilting-TF} that $\mathrm{Cogen}{(C)}$ is a torsion-free class in $\mathcal{A}$ with the torsion class given by \[ \mathcal{T}' = \{ T \in \mathcal{A} \mid \hom_\mathcal{A}(T, C) = 0 \} \] Suppose now that $A \in \mathcal{F}$ and consider an exact sequence $0 \to T' \to A \to F' \to 0$ induced by the torsion pair $(\mathcal{T}', \mathrm{Cogen}({C}))$. Then $T'\in \mathcal{F}$ and \[ \hom_\mathcal{H}(T', C) \simeq \hom_\mathcal{A}(T', C) = 0 \] thanks to Lemma~\hyperref[HRSExtFF]{\ref*{lem:HRS-exts}~(\ref*{HRSExtFF})}. Since $C$ is assumed to be a cogenerator in $\mathcal{H}$, we have $T' = 0$ and $A\simeq F' \in \mathrm{Cogen}({C})$. \end{proof} We conclude the section with a theorem which explains the role of cotilting objects in Grothendieck categories from the point of view of derived equivalences and which generalizes results from~\cite{ColpiTiltingGrothendieck}, \cite[\S3]{ColpiHeart} and~\cite[\S5]{ParraSaorin}. \begin{thm}\label{thm:maintheorem} Let $\mathcal{A}$ be a Grothendieck category, $(\mathcal{T}, \mathcal{F})$ be a torsion pair in $\mathcal{A}$ such that $\mathcal{F}$ contains a generator and let $\mathcal{H}$ be the HRS-tilted abelian category. Then the following are equivalent: \begin{enumerate}[(1)] \item\label{HRSTiltGrothendieck}{$\mathcal{H}$ is a Grothendieck category.} \item\label{HRSTiltInjCogen}{$\mathcal{H}$ has an injective cogenerator.} \item\label{HRSTiltCotilting}{$\mathcal{F} = \mathrm{Cogen}({C}) = {^\perp C}$ is associated with a cotilting object $C \in \mathcal{A}$.} \end{enumerate} \end{thm} \begin{proof} \hyperref[HRSTiltGrothendieck]{(\ref*{HRSTiltGrothendieck})}$\Rightarrow$\hyperref[HRSTiltInjCogen]{(\ref*{HRSTiltInjCogen})} This is well known, see for instance~\cite[Corollary X.4.3]{Stenstrom}. \hyperref[HRSTiltInjCogen]{(\ref*{HRSTiltInjCogen})}$\Rightarrow$\hyperref[HRSTiltCotilting]{(\ref*{HRSTiltCotilting})} This is immediate from Proposition~\hyperref[HRSInjCog]{\ref*{prop:CotiltingVSInjective}~(\ref*{HRSInjCog})}. \hyperref[HRSTiltCotilting]{(\ref*{HRSTiltCotilting})}$\Rightarrow$\hyperref[HRSTiltGrothendieck]{(\ref*{HRSTiltGrothendieck})} We will use a similar argument as in the proof of~\cite[Theorem 6.2]{St3}. We know from Proposition~\ref{prop:CotiltingVSInjective} and Lemma~\ref{lem:prod-C} that the category of injective objects of $\mathcal{H}$ is equivalent to $\mathrm{Prod}({C})$. Furthermore, it is a standard fact that if $\mathcal{H}$ and $\mathcal{H}'$ are two abelian categories with enough injective objects and the corresponding full subcategories of injective objects are equivalent, then also $\mathcal{H} \simeq \mathcal{H}'$. This follows for instance from \cite[Proposition IV.1.2]{AuslanderReitenSmalo}, where an argument is given for the dual situation of abelian categories with enough projective objects. We will now construct a Grothendieck category $\mathcal{H}'$ with the full subcategory of injective objects equivalent to $\mathrm{Prod}{(C)}$. It will follow from the above that $\mathcal{H}\simeq\mathcal{H}'$ is also a Grothendieck category. To this end, let $G\in\mathcal{A}$ be a generator, $R=\mathrm{End}_\mathcal{A}(G)$ and $C' = \hom_\mathcal{A}(G, C) \in \mathsf{Mod\textnormal{\textsf{-}}}{R}$. Then $C'$ is a pure-injective $R$-module by Theorem~\ref{thm:CotiltingPureInjective} and Proposition~\ref{prop:PureInjChar}. Moreover, it follows from Proposition~\ref{prop:GabrielPopescu} that $H$ induces an equivalence \[ \mathrm{Prod}({C}) \simeq \mathrm{Prod}({C'})\;. \] Let now $\mathcal{B}$ be the category of all additive functors ${R}\mathsf{\textnormal{\textsf{-}}mod} \to \mathsf{Ab}$, where ${R}\mathsf{\textnormal{\textsf{-}}mod}$ is the category of finitely presented left $R$-modules. This is a locally coherent Grothendieck category and the functor \begin{align*} T\colon \mathsf{Mod\textnormal{\textsf{-}}}{R} &\longrightarrow \mathcal{B}, \\ M &\longmapsto (M\otimes_R-)|_{R\mathsf{\textnormal{\textsf{-}}mod}}, \end{align*} is fully faithful, preserves products and sends pure-injective modules to injective objects of $\mathcal{B}$; see~\cite[Theorem B.16]{JensenLenzing}. In particular, if we put $C'' = T(C') \in \mathcal{B}$, we have an equivalence \[ \mathrm{Prod}({C}) \simeq \mathrm{Prod}({C''})\;. \] As now $C''\in\mathcal{B}$ is an injective object, we have a hereditary torsion pair $(\mathcal{T}', \mathcal{F}')$ in $\mathcal{B}$, where $\mathcal{T}' = \ker\hom_\mathcal{B}(-, C'')$ and $\mathcal{F}' = \mathrm{Cogen}{(C'')}$. By definition, $\mathrm{Prod}({C''})$ is precisely the class of torsion-free injective objects in $\mathcal{B}$ with respect to this torsion pair. Now we can take the Gabriel quotient $\mathcal{H}' = \mathcal{B}/\mathcal{T}$ (see \cite[\S III]{Gabriel} or~\cite[\S11.1]{Prest}). This is a Grothendieck category whose category of injective objects is equivalent to $\mathrm{Prod}({C})$ by \cite[Proposition III.4.9 and Corollaire III.3.2]{Gabriel} or \cite[\S\S X.1 and X.2]{Stenstrom}. \end{proof} \section{Torsion pairs in categories of sheaves} \label{sec:torsion} Now we aim at specializing to categories of quasi-coherent sheaves on Noetherian schemes. This section is devoted to the description of suitable torsion pairs in the categories of coherent and quasi-coherent sheaves. This is in view of Theorem~\ref{thm:ClassificationViaTP} a key step for a classification of cotilting classes in $\mathsf{QCoh}_X$, but Theorem~\ref{thm:charTPCoh} on torsion pairs in the category of coherent sheaves seems to be of interest on its own. For a Noetherian scheme $X$, there is a standard classification result for hereditary torsion pairs in $\mathsf{QCoh}_X$ due to Gabriel. We recall this result now, along with a proof. The reason for including the proof, which is in some aspects more direct than the original one, is twofold: it allows us to describe the corresponding torsion-free classes more directly and some parts of the argument will be useful in the subsequent discussion. Throughout this section, we make use of the results from Appendix~\ref{sec:AssPoints}, where the classification of injective quasi-coherent sheaves on a Noetherian scheme is summarized and the theory of associated points of quasi-coherent sheaves is recalled. Here let us just introduce the following notation: for a point $x$ on a Noetherian scheme $X$, denote by $\mathscr{J}(x)$ the unique injective indecomposable quasi-coherent sheaf whose support is $\overline{\{x\}}$ (see the beginning of \ref{inj} for a detailed construction). \begin{deff}\label{def:SpecClosed} Let $X$ be a topological space. A subset $Y \subseteq X$ is \emph{specialization closed} if $\overline{\{y\}} \subseteq Y$ for every $y \in Y$. Alternatively, a set is specialization closed if it is a union of closed subsets. \end{deff} \begin{prop}[{\cite[\S VI.2]{Gabriel}}]\label{prop:TYFY} Let $Y \subseteq X$ be a specialization closed subset. Define \begin{align*} \mathcal{T}(Y)=&\{\mathscr{T} \in \mathsf{QCoh}_X\mid \mathrm{Supp}\, \mathscr{T} \subseteq Y\}, \\ \mathcal{F}(Y)=&\{\mathscr{F}\in \mathsf{QCoh}_X\mid \mathrm{Ass}\, \mathscr{F} \cap Y=\emptyset\}. \end{align*} Then the pair $(\mathcal{T}(Y), \mathcal{F}(Y))$ is a hereditary torsion pair in $\mathsf{QCoh}_X$. Moreover, there is a bijective correspondence between hereditary torsion pairs in $\mathsf{QCoh}_X$ and specialization closed subsets of $X$, given by the assignments \[(\mathcal{T}, \mathcal{F}) \mapsto \mathrm{Supp}\, \mathcal{T} \quad\text{ and }\quad Y \mapsto (\mathcal{T}(Y), \mathcal{F}(Y)).\] \end{prop} \begin{proof} The class $\mathcal{T}(Y)$ is closed under arbitrary direct sums, subobjects, quotients and extensions by Corollary~\hyperref[AS4]{\ref*{cor:AssSvazkuBasic}~(\ref*{AS4})} and~\hyperref[AS5]{(\ref*{AS5})}. It follows from Remark~\hyperref[TP2]{\ref*{rem:TP}~(\ref*{TP2})} that $\mathcal{T}(Y)$ is a hereditary torsion class in $\mathsf{QCoh}_X$. Next we show that $\hom_X(\mathcal{T}(Y), \mathcal{F}(Y))=0.$ Indeed, if $f\colon \mathscr{T} \longrightarrow \mathscr{F}$ is a morphism with $\mathscr{T} \in \mathcal{T}(Y), \; \mathscr{F} \in \mathcal{F}(Y),$ then $\mathrm{Im}\, f \in \mathcal{T}(Y)\cap\mathcal{F}(Y)$. In particular, $\mathrm{Ass}\, \mathrm{Im}\, f \subseteq \mathrm{Supp}\, \mathrm{Im}\, f \subseteq Y$ and $\mathrm{Ass}\, \mathrm{Im}\, f \subseteq \mathrm{Ass}\, \mathscr{F}\subseteq X \setminus Y$, hence $\mathrm{Ass}\, \mathrm{Im}\, f=\emptyset$. It follows by Corollary~\hyperref[AS3]{\ref*{cor:AssSvazkuBasic}~(\ref*{AS3})} that $\mathrm{Im}\, f=0$. To prove that $(\mathcal{T}(Y), \mathcal{F}(Y))$ is a torsion pair, it remains to show that if $\mathscr{F}$ is a quasi-coherent sheaf on $X$ such that $\hom_X(\mathscr{T}, \mathscr{F})=0$ for all $\mathscr{T}\in \mathcal{T}(Y),$ then $\mathscr{F} \in \mathcal{F}(Y)$. Equivalently, we must show that whenever $\mathscr{F}$ is a quasi-coherent sheaf with $\mathrm{Ass}\, \mathscr{F} \cap Y \neq \emptyset,$ there is a quasi-coherent sheaf $\mathscr{T}$ with $\mathrm{Supp}\, \mathscr{T} \subseteq Y$ and a nonzero morphism $\mathscr{T}\longrightarrow\mathscr{F}$. However, this is immediate from Proposition~\ref{prop:TestingSheaf}. Clearly for any hereditary torsion pair $(\mathcal{T}, \mathcal{F})$ in $\mathsf{QCoh}_X$, the set $\mathrm{Supp}\, \mathcal{T}$ is specialization closed (if $\mathscr{F}$ is a quasi-coherent sheaf with $\mathscr{F}_x\neq 0$ and $y \in \overline{\{x\}},$ then $\mathscr{F}_x=\mathscr{F}_y\otimes_{\mathcal{O}_{X,y}}\mathcal{O}_{X,x}$, so $\mathscr{F}_y \neq 0$). This shows that both the assignments in the statement are well-defined. If we start with a specialization closed subset $Y \subseteq X$, then clearly $\mathrm{Supp}\, \mathcal{T}(Y)\subseteq Y$. Since for any $x\in Y$, there exists a coherent sheaf $\mathscr{F}$ with $\mathrm{Supp}\,\mathscr{F} = \overline{\{x\}}$, we in fact have $\mathrm{Supp}\, \mathcal{T}(Y)= Y$. It remains to prove that given any hereditary torsion pair in $(\mathcal{T}, \mathcal{F})$ in $\mathsf{QCoh}_X$ and $Y=\mathrm{Supp}\,\mathcal{T}$, we have that $\mathcal{T}=\mathcal{T}(Y)$ and $\mathcal{F}=\mathcal{F}(Y)$. Clearly $\mathcal{T} \subseteq \mathcal{T}(Y)$, so by Lemma~\ref{lem:TorsionPairsInclusion}, it suffices to show that $\mathcal{F} \subseteq \mathcal{F}(Y)$. In other words, we must prove that $Y\cap\mathrm{Ass}\, \mathcal{F} = \emptyset$. Suppose that this is not the case, that is, that there exists $x \in Y$ and $\mathscr{F}\in\mathcal{F}$ with $x \in \mathrm{Ass}\,\mathscr{F}$. As $\mathcal{F}$ is closed under injective envelopes by \cite[VI.3.2]{Stenstrom}, we can use Corollary~\ref{cor:AssF=AssEF} to replace $\mathscr{F}$ by $E(\mathscr{F})$. Since $\mathscr{J}(x)$ is a direct summand of $E(\mathscr{F})$ by Lemma~\ref{lem:AssOfJx}, we also have $\mathscr{J}(x) \in \mathcal{F}$. However, if we choose any $\mathscr{T}\in \mathcal{T}$ with $\mathscr{T}_x \neq 0$, then % \begin{multline*} \mathrm{Hom}_X\left(\mathscr{T}, \mathscr{J}(x)\right)= \mathrm{Hom}_X\left(\mathscr{T}, i_*\left(\widetilde{E_x}\right)\right) \simeq \\ \simeq \mathrm{Hom}_{\spec \mathcal{O}_{X,x}}\left(i^*\left(\mathscr{T}\right), \widetilde{E_x}\right) \simeq \hom_{\mathcal{O}_{X,x}}(\mathscr{T}_x, E_x) \ne 0\;, \end{multline*} % since $E_x$ is an injective cogenerator of $\mathsf{Mod\textnormal{\textsf{-}}} \mathcal{O}_{X,x}$. This contradicts the assumption that $(\mathcal{T}, \mathcal{F})$ is a torsion pair and finishes the proof. \end{proof} \begin{cor}\label{cor:limFY=FY} Let $X$ be a Noetherian scheme. Then every hereditary torsion-free class in $\mathsf{QCoh}_X$ is closed under direct limits. \end{cor} \begin{proof} In view of Proposition~\ref{prop:TYFY}, it is enough to show that the classes of the form $\mathcal{F}(Y)$ (for $Y$ specialization closed) are closed under direct limits. This is, however, an immediate consequence of Lemma~\ref{lem:AssFlat}. \end{proof} Now we easily obtain an analogous classification of hereditary torsion pairs in $\mathsf{Coh}_X$. \begin{prop}\label{prop:T0YF0Y} For a Noetherian scheme $X$, there is a bijective correspondence between hereditary torsion pairs in $\mathsf{Coh}_X$ and specialization closed subsets $Y \subseteq X$, given by the assignments \[(\mathcal{T}_0, \mathcal{F}_0) \mapsto \mathrm{Supp}\, \mathcal{T}_0 \quad\text{ and }\quad Y \mapsto (\mathcal{T}_0(Y), \mathcal{F}_0(Y)),\] where $\mathcal{T}_0(Y)=\{\mathscr{T} \in \mathsf{Coh}_X\mid \mathrm{Supp}\, \mathscr{T} \subseteq Y\}$ and $\mathcal{F}_0(Y)=\{\mathscr{F}\in \mathsf{Coh}_X\mid \mathrm{Ass}\, \mathscr{F} \cap Y=\emptyset\}$. \end{prop} \begin{proof} In view of Proposition~\ref{prop:TYFY} and Lemma~\ref{lem:TorsionPairsQcohvsCoh}, it only remains to observe that a torsion pair $(\mathcal{T}_0, \mathcal{F}_0)$ in $\mathsf{Coh}_X$ is hereditary if and only if $(\mathcal{T}, \mathcal{F}) = (\underrightarrow{\mathrm{Lim}}\,\mathcal{T}_0, \underrightarrow{\mathrm{Lim}}\,\mathcal{F}_0)$ is hereditary in $\mathsf{QCoh}_X$. To this end, if $\mathcal{T}$ is closed under taking subsheaves, so is clearly $\mathcal{T}_0 = \mathcal{T}\cap \mathsf{Coh}_X$. On the other hand, each $\mathscr{T} \in \mathcal{T}$ is a direct union $\mathscr{T} = \bigcup_{i\in I}\mathscr{T}_i$ of its coherent subsheaves belonging to $\mathcal{T}_0$. If $\mathcal{T}_0$ is closed under taking subsheaves and $\mathscr{S}\subseteq\mathscr{T}$, then $\mathscr{S} = \bigcup_{i\in I} (\mathscr{S}\cap\mathscr{T}_i) \in \mathcal{T}$. \end{proof} If $X$ is Noetherian and affine, then in fact each torsion pair in $\mathsf{Coh}_X$ is hereditary by~\cite[Proposition 2.5]{St1}, so we have a full classification of all torsion pairs in this case. In the non-affine situation, there may exist non-hereditary torsion pairs. \begin{example} \label{example:NonHeredTorsion} Let $X = \mathbb{P}^1_k = \proj{k[x_0, x_1]}$ be a projective line over a field and let $\mathcal{T}_0\subseteq\mathsf{Coh}_X$ be the smallest class containing the structure sheaf $\mathcal{O}_X$ and closed under extensions and quotients. This is a torsion class by Remark~\hyperref[TP2]{\ref*{rem:TP}~(\ref*{TP2})} and $\mathcal{O}(-1)$ belongs to the corresponding torsion-free class $\mathcal{F}_0$ since it has no global sections. However, there is an inclusion $\mathcal{O}(-1)\hookrightarrow\mathcal{O}_X$, so $\mathcal{T}_0$ is not hereditary. \end{example} The reason is that hereditary torsion pairs have a very geometric meaning. To elucidate this, we give the following definition, which encodes a natural compatibility of the notion of torsion pairs and the underlying geometry. \begin{deff}\label{def:TP-local} Let $X$ be a Noetherian scheme. We say that a torsion pair $(\mathcal{T}, \mathcal{F})$ in $\mathsf{Coh}_X$ is \emph{locally compatible} if a stronger version of Definition~\hyperref[TPDef1]{\ref*{def:torznipar}~(\ref*{TPDef1})} holds: $\homSh_X(\mathcal{T}, \mathcal{F})=0$. \end{deff} \begin{rem} If $X = \spec R$ is affine, then any torsion pair is locally compatible since then $\homSh_X(\widetilde{M}, \widetilde{N})\simeq \widetilde{\hom_R}(M, N)$ for each pair $M, N$ of finitely generated $R$-modules. On the other hand, the torsion pair from Example~\ref{example:NonHeredTorsion} is not locally compatible since $\mathcal{O}_X(U) \simeq \mathcal{O}(-1)(U)$ for each open affine subset $U \subseteq \mathbb{P}^1_k$. \end{rem} Using the adjunction between $\otimes$ and $\homSh_X$, one can reformulate local compatibility to another condition, which was used for instance in Thomason's classification~\cite{Thomason} of localizing subcategories in the perfect derived category of~$X$. \begin{deff}\label{def:tensorIdeal} Let $\mathcal{X}\subseteq\mathsf{Coh}_X$ be a full additive subcategory. Then $\mathcal{X}$ is a \emph{tensor ideal} if $\mathscr{G}\otimes\mathscr{F}\in\mathcal{X}$ for each $\mathscr{G}\in\mathsf{Coh}_X$ and $\mathscr{F}\in\mathcal{X}$. \end{deff} \begin{lem}\label{lem:tensorIdealLocComp} Let $X$ be a Noetherian scheme. Then a torsion pair $(\mathcal{T}_0, \mathcal{F}_0)$ in $\mathsf{Coh}_X$ is locally compatible if and only if $\mathcal{T}_0$ is a tensor ideal. \end{lem} \begin{proof} This is purely formal. Suppose that the torsion pair is locally compatible and let $\mathscr{T}\in\mathcal{T}_0 = \ker\homSh_X(-, \mathcal{F}_0)$ be a torsion object and $\mathscr{G}\in\mathsf{Coh}_X$ any coherent sheaf. Then, using the standard adjunction, we have \[ \hom_X(\mathscr{G}\otimes\mathscr{T}, \mathscr{F}) \simeq \hom_X\left(\mathscr{G}, \homSh(\mathscr{T}, \mathscr{F})\right) = 0 \] for each $\mathscr{F}\in\mathcal{F}_0$. Thus $\mathscr{G}\otimes\mathscr{T} \in \mathcal{T}_0$. For the 'if' part, suppose there exist $\mathscr{T}\in\mathcal{T}_0$ and $\mathscr{F}\in\mathcal{F}_0$ with $\homSh_X(\mathscr{T}, \mathscr{F}) \ne 0$. If we put $\mathscr{G} = \homSh_X(\mathscr{T}, \mathscr{F})$, there is certainly a non-zero morphism \[ \mathscr{G} \longrightarrow \homSh_X(\mathscr{T}, \mathscr{F}). \] By the adjunction again, we obtain a non-zero morphism \[ \mathscr{G} \otimes \mathscr{T} \longrightarrow \mathscr{F}, \] hence the torsion class $\mathcal{T}_0$ is not a tensor ideal. \end{proof} In the sequel, we put more emphasis on the torsion-free class $\mathcal{F}_0$ rather than on the torsion class $\mathcal{T}_0$. We cannot in general expect $\mathcal{F}_0$ to be a tensor ideal since it is typically not closed under taking cokernels, but quite often it turns out to be closed under tensoring by line bundles. In that context, the following definition is useful. \begin{deff} \label{def:AmpleFamily} \begin{enumerate}[(1)] \item{We say that a scheme $X$ has \emph{an ample family of line bundles} if there are global sections $f_i$ of line bundles $\mathscr{L}_i, \; i \in I$, such that the sets $D(f_i)=\{x \in X \mid f_i(x)\neq 0\}, \; i \in I$ form an affine open cover of $X$.} \item{We say that a scheme $X$ \emph{has the resolution property} if every coherent sheaf is an epimorphic image of a vector bundle (i. e. a locally free sheaf of finite rank).} \end{enumerate} \end{deff} A Noetherian scheme $X$ which has an ample family of line bundles has the resolution property, which was proved by S.~Kleiman and M.~Borelli in \cite{Borelli}, and independently by L.~Illusie in \cite{Illusie}. In such a case, every coherent sheaf is in fact a factor of a finite direct sum of negative tensor powers of the line bundles $\mathscr{L}_i$. Since any quasi-coherent sheaf is a direct union of its coherent subsheaves, the negative tensor powers of the line bundles $\mathscr{L}_i$ form a set of generators for $\mathsf{QCoh}_X$ as well. The above properties are satisfied for a large class of Noetherian schemes, e.g. for quasi-projective schemes over affine schemes. See \cite[section~2.1]{T-T} for more detailed discussion. Now we can state and prove the main result of the section. \begin{thm}\label{thm:charTPCoh} Let $X$ be a Noetherian scheme and $(\mathcal{T}_0, \mathcal{F}_0)$ be a torsion pair in $\mathsf{Coh}_X$. Then the following are equivalent: \begin{enumerate}[(1)] \item{$(\mathcal{T}_0, \mathcal{F}_0)$ is hereditary,} \item{$(\mathcal{T}_0, \mathcal{F}_0)$ is locally compatible (Definition~\ref{def:TP-local}),} \item{$\mathcal{T}_0$ is a tensor ideal (Definition~\ref{def:tensorIdeal}).} \end{enumerate} If, moreover, $X$ has an ample family of line bundles, these are further equivalent to \begin{enumerate}[(1)] \setcounter{enumi}{3} \item{$\mathscr{L}\otimes\mathscr{F} \in \mathcal{F}_0$ for each line bundle $\mathscr{L}$ and each $\mathscr{F}\in \mathcal{F}_0$.} \end{enumerate} \end{thm} \begin{proof} (1)$\Rightarrow$(2) If $(\mathcal{T}_0, \mathcal{F}_0)$ is hereditary, it is of the form $(\mathcal{T}_0(Y), \mathcal{F}_0(Y))$ for a specialization closed $Y\subseteq X$ by Proposition~\ref{prop:T0YF0Y}, and hence clearly locally compatible since both the torsion and torsion-free class are defined by conditions on stalks. (2)$\Rightarrow$(1) Let us assume that $(\mathcal{T}_0, \mathcal{F}_0)$ is locally compatible and put $Y=\mathrm{Supp}\,\mathcal{T}_0$. We first claim that $Y \cap \mathrm{Ass}\,\mathcal{F}_0 = \emptyset$. Indeed, suppose that we have $\mathscr{F}\in\mathcal{F}_0$ and $x\in\mathrm{Ass}\,\mathscr{F}$. If $\mathscr{T}\in\mathcal{T}_0$, then \[ \hom_{\mathcal{O}_{X,x}}(\mathscr{T}_x, \mathscr{F}_x) \cong \homSh_X(\mathscr{T}, \mathscr{F})_x = 0. \] Since $\kappa(x)\hookrightarrow \mathscr{F}_x$, we also have $\hom_{\mathcal{O}_{X,x}}(\mathscr{T}_x, \kappa(x)) = 0$, so $\mathscr{T}_x=0$ by the Nakayama lemma. It follows that $x\not\in\mathrm{Supp}\,\mathcal{T}_0$ and the claim is proved. It follows that $\mathcal{T}_0 \subseteq \mathcal{T}_0(Y)$ and $\mathcal{F}_0 \subseteq \mathcal{F}_0(Y)$. Thus $\mathcal{T}_0 = \mathcal{T}_0(Y)$ by Lemma~\ref{lem:TorsionPairsInclusion}, and it is a hereditary torsion class. (2)$\Leftrightarrow$(3) This is just Lemma~\ref{lem:tensorIdealLocComp}. (3)$\Rightarrow$(4) If $\mathcal{T}_0$ is a tensor ideal, clearly $\mathscr{L} \otimes \mathcal{T}_0 = \mathcal{T}_0$ for each line bundle $\mathscr{L}$. Since $\mathscr{L} \otimes -$ is an autoequivalence of $\mathsf{Coh}_X$, the latter is equivalent to $\mathscr{L} \otimes \mathcal{F}_0 = \mathcal{F}_0$. (4)$\Rightarrow$(3) Denote by $\mathscr{L}_i,\; i\in I$ the ample family of line bundles. If (4) holds, we see by the same token that $\mathscr{L}_i^{\otimes n} \otimes \mathcal{T}_0 = \mathcal{T}_0$ for each $i\in I$ and $n\in\mathbb{Z}$. Since each coherent sheaf $\mathscr{G}$ admits a surjection of the form $\mathscr{L}_{i_1}^{\otimes n_1} \oplus \cdots \oplus \mathscr{L}_{i_r}^{\otimes n_r} \to \mathscr{G}$ and since $\mathcal{T}_0$ is closed under quotients and direct sums, it must be a tensor ideal. \end{proof} We conclude with a consequence for torsion-free classes in $\mathsf{QCoh}_X$. \begin{cor} \label{cor:TFQcoh} Let $X$ be a noetherian scheme with an ample family of line bundles and let $\mathcal{F}\subseteq \mathsf{QCoh}_X$ be a torsion-free class. The following are equivalent: \begin{enumerate}[(1)] \item{$\mathcal{F}$ is closed under injective envelopes,} \item{$\mathcal{F}$ is closed under direct limits and $\mathscr{L}\otimes\mathscr{F} \in \mathcal{F}$ for each line bundle $\mathscr{L}$ and $\mathscr{F}\in \mathcal{F}$.} \end{enumerate} \end{cor} \begin{proof} Note that if (1) holds, the corresponding torsion pair $(\mathcal{T}, \mathcal{F})$ is hereditary by \cite[VI.3.2]{Stenstrom} and thus, $\mathcal{F}$ is closed under direct limits by Proposition~\ref{prop:TYFY} and Corollary~\ref{cor:limFY=FY}. In particular, the torsion pair is determined in both (1) and (2) by its restriction to $\mathsf{Coh}_X$; see Lemma~\ref{lem:TorsionPairsQcohvsCoh}. Now we just apply (1)$\Leftrightarrow$(4) from Theorem~\ref{thm:charTPCoh}. \end{proof} \section{Classification of cotilting sheaves} \label{sec:classif} Let $X$ be a fixed Noetherian scheme. The goal is to classify those cotilting torsion-free classes in $\mathsf{QCoh}_X$ which are closed under taking injective envelopes or, equivalently for those $X$ which have an ample family of line bundles, under tensoring with line bundles (recall Theorem~\ref{thm:CotiltingPureInjective} and Corollary~\ref{cor:TFQcoh}). We already know from Theorem~\ref{thm:ClassificationViaTP} and Propositions~\ref{prop:TYFY} and~\ref{prop:T0YF0Y} that a hereditary torsion-free class $\mathcal{F} = \mathcal{F}(Y)$ in $\mathsf{QCoh}_X$ (where $Y\subseteq X$ is specialization closed) is associated with a cotilting sheaf $\mathscr{C}_Y$ if and only if it contains a generator. Here we discuss under what conditions on $Y$ the class $\mathcal{F}(Y)$ actually contains a generator as well as the relation of the cotilting objects here to the construction of cotilting modules in~\cite{St2} in the affine case. In direct analogy with the affine case, it turns out that $\mathcal{F}(Y)$ is generating if and only if the set $Y$ does not contain any associated point of the scheme $X$. The following lemma proves necessity of this condition: \begin{lem} \label{lem:AssGeneratorMin} Let $X$ be a Noetherian scheme and $\mathscr{G}$ a generator for $\mathsf{QCoh}_X$. Then $\mathrm{Ass}\, \mathcal{O}_X \subseteq \mathrm{Ass}\,\mathscr{G}$. \end{lem} \begin{proof} Consider an epimorphism $e:\mathscr{G}^{\oplus I} \rightarrow \mathcal{O}_X$, where $I$ is some set. For a point $x \in X$, the induced epimorphism of $\mathcal{O}_{X,x}$-modules $\mathscr{G}_x^{\oplus I} \rightarrow \mathcal{O}_{X,x}$ splits and thus, we have that $$\mathrm{Ass}\,_{\mathcal{O}_{X,x}}\mathcal{O}_{X,x} \subseteq \mathrm{Ass}\,_{\mathcal{O}_{X,x}}\mathscr{G}_x^{\oplus I}=\mathrm{Ass}\,_{\mathcal{O}_{X,x}}\mathscr{G}_x.$$ Now $x \in \mathrm{Ass}\, \mathcal{O}_X$ means that $\mathfrak{m}_x\in \mathrm{Ass}\,_{\mathcal{O}_{X,x}}\mathcal{O}_{X,x},$ hence in this case we have $\mathfrak{m}_x \in \mathrm{Ass}\,_{\mathcal{O}_{X,x}}\mathscr{G}_x$ and thus, $x \in \mathrm{Ass}\, \mathscr{G}$. \end{proof} Next we prove that the condition is also sufficient, by constructing a generator $\mathscr{G}$ for $\mathsf{QCoh}_X$ with $\mathrm{Ass}\, \mathscr{G}=\mathrm{Ass}\, \mathcal{O}_X$. \begin{prop} \label{prop:TorFreeGen} Let $X$ be a Noetherian scheme. Then $\mathsf{QCoh}_X$ admits a generator $\mathscr{G}$ with $\mathrm{Ass}\, \mathscr{G}=\mathrm{Ass}\, \mathcal{O}_X$. \end{prop} \begin{proof} Fix an affine open cover $X=U_1 \cup U_2 \cup \dots \cup U_k$ of $X$, and quasi-coherent ideals $\mathscr{I}_i \subseteq \mathcal{O}_X$ corresponding to the closed subschemes $Z_i=X \setminus U_i$ (e.g. taken with the reduced closed subscheme structure), $i=1, 2, \dots, k$. Given a coherent sheaf $\mathscr{F}$ and a section $s \in \mathscr{F}(U_i)$, by Lemma~\ref{lem:PowersOfI}, there is a positive integer $n$ and a morphism $\varphi: \mathscr{I}_i^{n} \rightarrow \mathscr{F}$ such that $s$ is in the image of $\varphi$. Summing up these contributions (over all $i$ and generating sets of $\mathscr{F}(U_i)$), it follows that there is an epimorphism of the form $\bigoplus_{i, j}\mathscr{I}_i^{n_{ij}}\rightarrow \mathscr{F}$ for some integers $n_{ij} \geq 0$. Thus, the sheaf $\mathscr{G}=\bigoplus_{i, n}\mathscr{I}_i^n$ is a generator for $\mathsf{QCoh}_X$. Then $\mathrm{Ass}\, \mathscr{G} \subseteq \mathrm{Ass}\, \mathcal{O}_X$ folows from the fact that $\mathscr{I}_i^n \subseteq \mathcal{O}_X$ for all $i$ and $n$, and the converse inclusion follows from Lemma~\ref{lem:AssGeneratorMin}. \end{proof} Let us now discuss how the construction of cotilting objects from Theorem~\ref{thm:ClassificationViaTP} is related to the construction of cotilting modules from~\cite{St2}. Suppose that $Y \subseteq X$ is a specialization closed subset. Denote by $\mathcal{I}(Y)$ the class of all injective quasi-coherent sheaves $\mathscr{E}$ with $\mathrm{Ass}\, \mathscr{E} \cap Y = \emptyset$. That is, $\mathcal{I}(Y)$ consists of all the injectives contained in $\mathcal{F}(Y)$. \begin{lem}\label{lem:EpiCover} Suppose that $\mathrm{Ass}\, \mathcal{O}_X \cap Y = \emptyset$. Then every $\mathcal{F}(Y)$-cover $f\colon \mathscr{F} \to \mathscr{J}$ of an injective quasi-coherent sheaf $\mathscr{J}$ is an epimorphism and it is also an $\mathcal{I}(Y)$-cover. \end{lem} \begin{proof} By Proposition~\ref{prop:TorFreeGen}, we may choose a generator $\mathscr{G} \in \mathcal{F}(Y)$. Then there is an epimorphism $g\colon \mathscr{G}^{\oplus I} \rightarrow \mathscr{J}$ which factors through $f$, which shows that $f$ is an epimorphism. Since $\mathcal{F}(Y)$ is a hereditary torsion-free class, an injective envelope $i\colon \mathscr{F} \to E(\mathscr{F})$ has $E(\mathscr{F}) \in \mathcal{I}(Y)$. Since $\mathscr{J}$ is injective, there exists a morphism $h\colon E(\mathscr{F}) \to \mathscr{J}$ such that $f = h \circ i$. It is immediate that $h$ is also an $\mathcal{F}(Y)$-precover of $\mathscr{J}$. Since a cover is a retract of any precover (\cite[Lemma 5.8]{G-T}), $\mathscr{F}$ is a direct summand of $E(\mathscr{F})$ and as such $\mathscr{F} \in \mathcal{I}(Y)$. \end{proof} Now recall the construction from~\cite{St2} or, to be more precise, a direct generalization of it to non-affine Noetherian schemes. \begin{constr}\label{constr:ImitatedCotilting} Fix a specialization closed subset $Y \subseteq X$ that does not contain any associated point of $X$. For each point $y \in Y$, consider an exact sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r]& \mathscr{K}(y) \ar[r, "\alpha_y"]& \mathscr{I}(y) \ar[r, "\beta_y"] & \mathscr{J}(y) \ar[r] & 0, \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent where $\beta_y\colon \mathscr{I}(y) \longrightarrow \mathscr{J}(y)$ is an $\mathcal{F}(Y)$-cover (recall that $\mathscr{J}(y)$ is the indecomposable injective with support $\overline{\{y\}}$, see \ref{inj}). In particular, $\mathscr{I}(y)$ is injective by Lemma~\ref{lem:EpiCover}. Define quasi-coherent sheaves $$\mathscr{K}_Y:=\prod_{y \in Y}\mathscr{K}(y), \quad\text{ and }\quad \mathscr{J}_Y:= \prod_{x \in X \setminus Y}\mathscr{J}(x),$$ and finally, put $$\mathscr{C}_Y:=\mathscr{K}_Y \oplus \mathscr{J}_Y.$$ \end{constr} Now we quickly see from Lemma~\ref{lem:Wakamatsu} that $\mathscr{K}(y) \in \mathcal{F}(Y)^\perp$. If we take the product of all the above short exact sequences for all $y\in Y$ together with the trivial short exact sequence $0 \to 0 \to \mathscr{J}_Y \to \mathscr{J}_Y \to 0$, we get an exact (by Proposition~\ref{prop:ExactProd}) sequence \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r]& \mathscr{K}_Y \ar[r]& \prod\limits_{y \in Y} \mathscr{I}(y) \oplus \mathscr{J}_Y \ar[r] & \prod\limits_{x \in X} \mathscr{J}(x) \ar[r] & 0, \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent It follows from the proof of Theorem~\ref{thm:CharTiltingClasses} that $\mathscr{C}_Y \oplus \prod_{y \in Y} \mathscr{I}(y)$ is a cotilting sheaf associated with $\mathcal{F}(Y)$. Since $\mathscr{I}(y) \in \mathrm{Prod}({\mathscr{C}_Y})$ for all $y\in Y$, it follows that also $\mathscr{C}_Y$ is a cotilting sheaf associated with $\mathcal{F}(Y)$. \begin{rem} The arguments for \cite[Theorems 5.3 and 5.4]{St2} generalize in a straightforward way to our setting as well. In particular, the indecomposable sheaves in $\mathrm{Prod}(\mathscr{C}_Y)$ are precisely \[ \mathscr{K}(y), \; y\in Y \quad\text{ and }\quad \mathscr{J}(x), \; x\in X\setminus Y\;. \] This in particular says that the indecomposable injectives in the HRS-tilted abelian categories $\mathcal{H}$ (Definition~\ref{def:HRS-tilt}) induced by the cotilting objects $\mathscr{C}_Y$ correspond bijectively to the points of $X$. \end{rem} We summarize the above discussion as follows. \begin{thm} \label{thm:classification} Let $X$ be a Noetherian scheme. Then the assignment \[ Y \mapsto \mathcal{F}(Y) \] induces a bijective correspondence between \begin{enumerate}[(1)] \item the specialization closed subsets $Y\subseteq X$ such that $\mathrm{Ass}\, \mathcal{O}_X \cap Y = \emptyset,$ and \item the hereditary torsion-free classes in $\mathsf{QCoh}_X$ associated with a cotilting sheaf. \end{enumerate} If, moreover, $X$ has an ample family of line bundles, then the image of the above correspondence can be also described as \begin{enumerate}[(1)] \setcounter{enumi}{2} \item the torsion-free classes $\mathcal{F}$ in $\mathsf{QCoh}_X$ associated with a cotilting sheaf and such that $\mathscr{L}\otimes\mathcal{F} = \mathcal{F}$ for each line bundle $\mathscr{L}$. \end{enumerate} \end{thm} \begin{proof} The first part immediately follows as a combination of Theorem~\ref{thm:ClassificationViaTP} and Propositions~\ref{prop:T0YF0Y} and~\ref{prop:TorFreeGen}, with a description of the corresponding cotilting sheaf given by the above discussion. The second part then follows from Theorem~\ref{thm:CotiltingPureInjective} and Corollary~\ref{cor:TFQcoh}. \end{proof} Before we proceed to examining the $1$-dimensional case of the developed theory in more detail, let us draw some consequences of Theorem~\ref{thm:classification} for the class of the (``classically'') torsion-free quasi-coherent sheaves. Recall that a module $M$ over a Noetherian ring $R$ is called \emph{torsion-free} if for any non-zero divisor $r$ and any $m \in M$, $rm=0$ implies $m=0$. In other words, any zero divisor $r\in R$ of $M$ is a zero divisor of $R$ or, equivalently by~\cite[Proposition 1.2.1]{BrunsHerzog}, any associated prime of $M$ is contained in some associated prime of $R$. If we denote by $Y\subseteq\spec R$ the largest specialization closed subset not containing any associated prime of $R$ (i.e.\ $Y=X \setminus Z$, where $Z$ the set of all $\mathfrak{p}\in\spec R$ that specialize to an associated prime of $R$), it follows that the class of torsion-free modules is precisely the hereditary torsion-free class $\mathcal{F}(Y)$ given by Proposition~\ref{prop:TYFY}. In order to generalize torsion-freeness to quasi-coherent sheaves on non-affine schemes, the latter description using the associated points is more useful as the classical description using elements is not local in general. \begin{example} Suppose that $M$ is a torsion-free $R$-module and $\mathfrak{p}$ is an associated prime of $M$ that is not an associated prime of $R$. Let $\mathfrak{q}_1,\dots\mathfrak{q}_n\in \mathrm{Ass}\, R$ be all the associated primes of $R$ that contain $\mathfrak{p}$ and are minimal such. A particular example of this situation is $R=k[x, y, z]/(xz, yz, z^2)$, $M=R/(y, z)$ and $(y, z)=\mathfrak{p}\subseteq\mathfrak{q}_1=(x, y, z)$. Picking $f \in (\bigcap_i\mathfrak{q}_i) \setminus \mathfrak{p}$, $M_f$ will not remain torsion-free as an $R_f$-module since $\mathfrak{p}_f$ is not contained in any associated prime of $R_f$. Similarly, by localization at $\mathfrak{p}$ we see that torsion-freeness is not stalk-local either. \end{example} \begin{deff} A quasi-coherent sheaf $\mathscr{F}$ on a Noetherian scheme $X$ is called \emph{torsion-free} if every associated point $x$ of $\mathscr{F}$ is a generization of an associated point of $X$, or equivalently $\mathscr{F} \in \mathcal{F}(Y)$, where $Y\subseteq X$ is the largest specialization closed subset not containing any associated point of $X$. \end{deff} \begin{rem} Note, however, that the torsion-free condition is affine-local over schemes with no embedded points, in which case the defining condition degenerates into $\mathrm{Ass}\, \mathscr{F} \subseteq \mathrm{Ass}\, \mathcal{O}_X$. In particular, when $X$ is an integral Noetherian scheme, one recovers the usual (affine-local or stalkwise) definition of torsion-free quasi-coherent sheaves (\cite[Tag 0AVQ]{stacks}). \end{rem} The following is then an immediate consequence of Theorem~\ref{thm:classification}, and generalizes and supplements \cite[Theorem~4.7]{odabasi}. \begin{cor}\label{cor:TorFreeCovers} Let $X$ be a Noetherian scheme. The class of torsion-free quasi-coherent sheaves on $X$ is covering and generating. In fact, it is a cotilting class which is smallest among the cotilting classes closed under injective envelopes. \end{cor} \begin{rem} Let us compare Corollary~\ref{cor:TorFreeCovers} to the case of covering and generating properties of flat sheaves (which are always torsion-free). A scheme $X$ is called \emph{semi-separated} if the diagonal morphism $\Delta: X \rightarrow X \times_{\mathbb{Z}} X$ is affine, i.e. if the intersection of every pair of affine open subsets of $X$ is again affine. Murfet showed in \cite[Corollary~3.21]{Murfet} that on a semi-separated Noetherian scheme, the class of flat quasi-coherent sheaves is generating, and Enochs and Estrada showed in \cite{EnochsEstrada} that the class is always covering. On the other hand, a recent result in~\cite{SlavikSt} shows that semi-separatedness is also necessary for $\mathsf{QCoh}_X$ to have a flat generator if $X$ is Noetherian. That is, one can take epimorphic flat covers if and only if $X$ is semi-separated. Corollary~\ref{cor:TorFreeCovers} then shows that in the case of Noetherian schemes that are not necessarily semi-separated, one can still always take at least epimorphic torsion-free covers. \end{rem} We conclude the paper by discussing the developed theory in the case of $1$-dimensional schemes in more detail. \begin{example}\label{example:curves} Let $X$ be a $1$-dimensional Noetherian scheme, e.g. a quasi-projective curve over a field. Fix a specialization closed subset $Y \subseteq X$ avoiding the associated points of $X$. We aim to describe the cotilting sheaf $\mathscr{C}_Y$ from Construction~\ref{constr:ImitatedCotilting} more explicitly. Fix a point $y \in Y$ and denote by $i\colon \spec \mathcal{O}_{X,y} \rightarrow X$ the canonical morphism (described in detail in \ref{inj}). Denote by $\widehat\mathcal{O}_{X,y}$ the $\mathfrak{m}_y$-adic completion of $\mathcal{O}_{X,y}$, by $j':\spec \widehat \mathcal{O}_{X,y}\rightarrow \spec \mathcal{O}_{X,y}$ the map induced by the completion, and set $j(=j_y)=ij'$. The local ring $\mathcal{O}_{X,y}$ satisfies $\mathfrak{m}_y \notin \mathrm{Ass}\, \mathcal{O}_{X,y}$. In particular, we have $$1 \geq \dim \mathcal{O}_{X,y} \geq \mathrm{depth}\, \mathcal{O}_{X,y}\geq 1.$$ It follows that $\mathcal{O}_{X,y}$ is a local Cohen-Macaulay ring and thus, $\widehat\mathcal{O}_{X,y}$ is a local complete Cohen-Macaulay ring, hence it admits a canonical module $\omega_y$ (\cite[Corollary 3.3.8]{BrunsHerzog}). By \cite[Theorem 3.1.17]{BrunsHerzog}, we have $\mathrm{injdim}\, \omega_y= \mathrm{depth}\, \widehat\mathcal{O}_{X,y}=1$. In fact, $\omega_y$ admits an injective resolution \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} \varepsilon: & 0 \ar[r] & \omega_y \ar[r] & E^0 \ar[r, "\pi"] & E^1 \ar[r] & 0\;, & \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent where $$E^0=\bigoplus\limits_{\begin{subarray}{c} \mathfrak{p} \in \spec \widehat\mathcal{O}_{X,y}\\ \mathrm{ht}\, \mathfrak{p}=0\end{subarray}}E(\widehat \mathcal{O}_{X,y}/\mathfrak{p}) \;\;\text{ and }\;\;E^1=E_{\widehat \mathcal{O}_{X,y}}(\widehat \mathcal{O}_{X,y}/ \widehat{\mathfrak{m}}_y) \simeq E_{\mathcal{O}_{X,y}}(\kappa(y)).$$ In particular, $E^0 \in \mathcal{I}(\{\widehat{\mathfrak{m}}_y\})$. Note that all the finite $\widehat \mathcal{O}_{X,y}$-modules are pure-injective: this follows from \cite[Proposition 4.3.29]{Prest} and the Matlis duality, since every finite $\widehat \mathcal{O}_{X,y}$-module is its own double dual. First we aim to show that $\pi\colon E^0 \rightarrow E^1$ is an $\mathcal{F}(\{\widehat{\mathfrak{m}}_y\})$-cover. Given $M \in \mathcal{F}(\{\widehat{\mathfrak{m}}_y\}),$ we may express $M=\varinjlim_i M_i,$ where $M_i$ runs over all the finite $\widehat\mathcal{O}_{X,y}$-submodules of $M$. Since $\widehat{\mathfrak{m}}_y \notin \mathrm{Ass}\, M_i,$ the modules $M_i$ are maximal Cohen-Macaulay (unless $0$) and thus, $\mathrm{Ext}^1_{\widehat \mathcal{O}_{X,y}}(M_i, \omega_y)=0$ (\cite[Proposition 3.3.3]{BrunsHerzog}). However, since $\omega_y$ is pure-injective, \cite[Lemma 6.28]{G-T} gives $$\mathrm{Ext}^1_{\widehat \mathcal{O}_{X,y}}(M, \omega_y)=\mathrm{Ext}^1_{\widehat \mathcal{O}_{X,y}}(\varinjlim_i M_i, \omega_y)\simeq \varprojlim_i \mathrm{Ext}^1_{\widehat \mathcal{O}_{X,y}}(M_i, \omega_y)=0.$$ Thus, $\pi$ is an $\mathcal{F}(\{\widehat{\mathfrak{m}}_y\})$-precover. By Proposition~\ref{prop:ElBashir}, the $\mathcal{F}(\{\widehat{\mathfrak{m}}_y\})$-cover of $E(\kappa(y))$ exists, and by \cite[Lemma 5.8]{G-T}, its domain is a direct summand in any precover with a complement in the kernel of the precover. On the other hand, $\omega_y$ is indecomposable since $\mathrm{End}_{\widehat\mathcal{O}_{X,y}}(\omega_y)\simeq \widehat\mathcal{O}_{X,y}$ is a local ring (\cite[Theorem~3.3.4]{BrunsHerzog}). It follows that $\pi$ is an $\mathcal{I}(\{\widehat{\mathfrak{m}}_y\})$-cover. For the purposes of this example, let us identify quasi-coherent sheaves over affine schemes $\spec{\mathcal{O}_{X,y}}, \spec{\widehat \mathcal{O}_{X,y}}$ with the respective module categories via the global sections functor. Applying $j_{*}$ to $\varepsilon$ in this sense yields a short exact sequence of quasi-coherent sheaves on $X$ \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r] & j_*(\omega_y) \ar[r] & j_*(E^0) \ar[r, "j_*(\pi)"] & \mathscr{J}(y) \ar[r] & 0 \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent (note that $j_*$ is exact: both $i, j'$ are affine morphisms, hence $j$ is, therefore the higher direct images $R^ij_*$ vanish, \cite[Tag 073H]{stacks}). The adjunction $(j^*, j_*)$ yields a commutative diagram \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} \hom_{\widehat \mathcal{O}_{X,y}}(j^*(\mathscr{F}), E^0) \ar[r, "\simeq "] \ar[d, "\pi\circ -"] & \hom_{X}(\mathscr{F}, j_*(E^0)) \ar[d, "j_*(\pi)\circ -"] \\ \hom_{\widehat \mathcal{O}_{X,y}}(j^*(\mathscr{F}), E^1) \ar[r, "\simeq "] & \hom_{X}(\mathscr{F}, j_*(E^1)) \end{tikzcd} \end{adjustbox} \vspace{0.15cm} for every $\mathscr{F} \in \mathsf{QCoh}_X$ and thus, to show that $j_*(\pi)$ is an $\mathcal{F}(Y)$-precover, it remains to show that $j^*(\mathcal{F}(Y)) \subseteq \mathcal{F}(\{\widehat{\mathfrak{m}}_y\})$. The fact that $i^*(\mathcal{F}(Y)) \subseteq \mathcal{F}(\{\mathfrak{m}_y\})$ is clear from Definition~\ref{def:suppass}, as $i^*$ is identified by the above convention with the stalk functor at $y$. The inclusion $j'^*(\mathcal{F}_0(\{\mathfrak{m}_y\})) \subseteq \mathcal{F}_0(\{\widehat{\mathfrak{m}}_y\})$ amounts to the fact that the $\mathfrak{m}_y$-adic completion preserves maximal Cohen-Macaulay modules, and finally, $j'^*(\mathcal{F}(\{\mathfrak{m}_y\})) \subseteq \mathcal{F}(\{\widehat{\mathfrak{m}}_y\})$ follows, since $j'^{*}$ preserves direct limits. To show that $j_*(\pi)$ is an $\mathcal{F}(Y)$-cover, it is again enough to show that $j_*(\omega_y)$ is indecomposable. Since $i_*$ is fully faithful (by the fact that the counit $i^*i_* \Rightarrow \mathrm{Id}_{\mathsf{Mod\textnormal{\textsf{-}}}\widehat\mathcal{O}_{X,y}}$ is an isomorphism), this amounts to showing that $j'_*(\omega_y)$ is indecomposable. Equivalently, one needs to show that $\omega_y$ is indecomposable as an $\mathcal{O}_{X,y}$-module. Suppose not and consider a non-trivial decomposition $\omega_y=M\oplus N$ as an $\mathcal{O}_{X,y}$-module. Taking the $\mathfrak{m}_y$-adic completion yields a non-trivial decomposition $\omega_y \simeq \widehat{\omega_y}=\widehat M\oplus \widehat N$ (note that $\widehat M, \widehat N \neq 0$: this follows e.g. by $\mathfrak{m}_y^k \omega_y=\mathfrak{m}_y^k M\oplus \mathfrak{m}_y^k N$ and $\bigcap_k \mathfrak{m}_y^k \omega_y=0$). This contradicts the indecomposability of $\omega_y$ as an $\widehat\mathcal{O}_{X,y}$-module. It follows that $j_*(\pi)$ is an $\mathcal{F}(Y)$-cover of $\mathscr{J}(y)$ and hence, we have $$\mathscr{K}(y) \simeq j_*(\omega_y)\;.$$ Altogether, the corresponding cotilting sheaf is of the form $$\mathscr{C}_Y = \prod_{y \in Y} j_{y*}(\omega_y)\times \prod_{x \in X \setminus Y} \mathscr{J}(x)\;.$$ \end{example} \begin{example} Let $X = \mathbb{P}^1_k$ be a projective line over a field and $\xi\in X$ be the generic point. Consider a subset $Y \subseteq X$ avoiding $\xi$ (which is automatically spec. closed). Then all the local completed rings $\widehat\mathcal{O}_{X,y}\simeq k[\![T]\!]$ are further Gorenstein, hence the short exact sequence $\varepsilon$ is now of the form \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r]& \widehat{\mathcal{O}}_{X,y} \ar[r]& \widehat{\mathscr{Q}}_y \ar[r, "\varphi"]& E_y \ar[r] & 0\;, \end{tikzcd} \end{adjustbox} \vspace{0.15cm} \noindent where $\widehat{\mathscr{Q}}_y \simeq k(\!(T)\!)$ is the completed fraction field. Thus, we obtain \[ \mathscr{K}(y) = j_{y*}(\widehat{\mathcal{O}}_{X,y}),\;\;\; y \in Y. \] \end{example} \begin{example} \label{example:P1VsKronecker} If we consider $X = \mathbb{P}^1_k$ again, there are cotilting sheaves in $\mathsf{QCoh}_X$ whose associated cotilting classes $\mathcal{F}$ are not closed under injective envelopes (equivalently, under twists). Indeed, consider the non-hereditary torsion pair $(\mathcal{T}_0, \mathcal{F}_0)$ in $\mathsf{Coh}_X$ from Example~\ref{example:NonHeredTorsion}. The objects of $\mathcal{F}_0$ are precisely finite direct sums of copies of $\mathcal{O}(n)$, $n<0$. Since $\mathcal{O}(1)$ is an ample line bundle, $\mathcal{F}_0$ is a generating class in $\mathsf{Coh}_X$ and, by Theorem~\ref{thm:ClassificationViaTP}, $\mathcal{F} = \underrightarrow{\mathrm{Lim}}\,\mathcal{F}_0$ is a cotilting torsion-free class in $\mathsf{QCoh}_X$. Let us collect more information about $\mathcal{F}$ to illustrate the theory. First of all, $\mathcal{F}$ consists precisely of (possibly infinite) direct sums of copies of $\mathcal{O}(n)$, $n<0$. This follows by the same argument as for \cite[Proposition 3.6]{LenzingTransfer}. Furthermore, the torsion pair $(\mathcal{T}, \mathcal{F})$ is split, i.e.\ $\mathrm{Ext}^1_\mathcal{A}(\mathcal{F}, \mathcal{T}) = 0$, or equivalently the short exact sequence from Definition~\hyperref[TPDef2]{\ref*{def:torznipar}~(\ref*{TPDef2})} splits for every $A \in \mathcal{A}$. Indeed, using arguments dual to those in Proposition~\ref{prop:CotiltingVSInjective}, one shows that $T = \Sigma^{-1}(\mathcal{O}_X \oplus \mathcal{O}(1))$ is a projective generator in the tilted category, so that the tilted category $\mathcal{H}$ is equivalent to $\mathsf{Mod\textnormal{\textsf{-}}}{R}$, where $R = \mathrm{End}_\mathcal{A}(T) = k(\bullet\!\rightrightarrows\!\bullet)$ is the Kronecker algebra (this recovers a special case of Beilinson's equvalences~\cite{Beilinson}). Since $R$ is well-known to be hereditary, Lemma~\hyperref[HRSExtFT]{\ref*{lem:HRS-exts}~(\ref*{HRSExtFT})} implies \[ \mathrm{Ext}^1_\mathcal{A}(\mathcal{F}, \mathcal{T}) \simeq \mathrm{Ext}^2_\mathcal{H}(\mathcal{F}, \Sigma^{-1}\mathcal{T}) = 0 \] (see~\cite[Lemma 2.1 in Chapter~2]{HRS} and also \cite[\S6]{ColpiFuller3} and \cite[Theorem 5.2]{StovicekKernerTrlifaj}). In particular, $\mathcal{T}\subseteq\mathcal{F}^\perp$. One can check that also $\mathcal{O}(-1), \mathcal{O}(-2) \in \mathcal{F}^\perp$ and that $C = \mathcal{O}(-1) \oplus \mathcal{O}(-2)$ is a cotilting object cogenerating $\mathcal{F}$. On the other hand, $\mathcal{O}(n) \notin \mathcal{F}^\perp$ for $n\le -3$ as there exist non-split short exact sequences $0 \to \mathcal{O}(n) \to \mathcal{O}(n+1) \oplus \mathcal{O}(n+1) \to \mathcal{O}(n+2) \to 0$. Thus, \[ \mathcal{F}^\perp = \mathcal{O}(-2) \otimes \mathcal{T} = \{ \mathscr{F} \in \mathsf{QCoh}_X \mid \mathcal{O}(n) \text{ is not a summand of } \mathscr{F} \text{ for each } n \le -3 \}, \] and this class has exact products by Proposition~\ref{prop:ExactProd} (in contrast to Example~\ref{example:ExtProducts}). \end{example} \section{Ext-functors and products in abelian categories}\label{sec:ExtProd} We start with discussing a few aspects of interaction of the $\mathrm{Ext}^1$-functor with infinite products in a general Grothendieck category. This is necessary since, in contrast to module categories, Grothendieck categories do not have exact products in general. That is, given an infinite set $I$, the product functor $$\prod_{i \in I}\colon \mathcal{A}^{I} \longrightarrow \mathcal{A}$$ preserves kernels, but it may not preserve cokernels. In fact, the exactness fails even for $\mathsf{QCoh}_X$, where $k$ is a field and $X=\mathbb{P}^1_k=\proj{k[x_0, x_1]}$ is the projective line over $k$; see~\cite[Example 4.9]{Krause}. Let $\mathcal{A}$ be an abelian category and $B_i$, $i \in I$, be a collection of objects in $\mathcal{A}$ such that the product $\prod_{i\in I}B_i$ exists in $\mathcal{A}$. For any additive functor $F$ from $\mathcal{A}$ to the category of abelian groups we obtain a canonical morphism $F(\prod_i B_i) \to \prod_i F(B_i)$ whose components are $F(\pi_i)\colon F(\prod_i B_i) \to F(B_i)$, where $\pi_i\colon \prod_i B_i \to B_i$ are the product projections. Given $A \in \mathcal{A}$, we can of course specialize this to $F = \mathrm{Ext}^1_\mathcal{A}(A,-)$: $$\psi_{A,B_i}\colon \mathrm{Ext}^1_\mathcal{A}\Big(A, \prod_{i \in I}B_i\Big) \longrightarrow \prod_{i \in I}\mathrm{Ext}^1_\mathcal{A}(A, B_i)\;.$$ Unlike in module categories, this might not be an isomorphism (see Example~\ref{example:ExtProducts} below), but it still is injective. This in fact follows by formally dualizing \cite[Proposition 8.1]{ColpiFuller3}, but we provide a short proof for the reader's convenience. \begin{prop}\label{prop:ExtMono} If $\mathcal{A}$ is an abelian category and $A$ and $B_i$, $i \in I$, is a collection of objects such that $\prod_{i\in I}B_i$ exists in $\mathcal{A}$, then the map $\psi_{A,B_i}$ above is injective. \end{prop} \begin{proof} Consider an extension $\varepsilon\colon 0 \to \prod_i B_i \stackrel{\lambda}\to E \to A \to 0$ representing an element of $\mathrm{Ext}^1_\mathcal{A}(A, \prod_{i \in I}B_i)$. For each $i \in I$, consider the commutative diagram \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r] & \prod\limits_{i \in I}B_i \ar[r, "\lambda"]\ar[d, "\pi_i"]& E \ar[r]\ar[d, "\sigma_i"] & A \ar[r]\ar[d, equal] & 0 \\ 0 \ar[r] & B_i \ar[r, "\lambda_i"]& E_i \ar[r]\ar[l, bend left, dotted, "\rho_i"]& A \ar[r] & 0 \\ \end{tikzcd} \end{adjustbox} \noindent where the lower line is the pushout of $\varepsilon$ along the product projection $\pi_i$. If $\psi_{A,B_i}([\varepsilon]) = 0$, then $\lambda_i$ splits for each $i$. Hence there is a retraction $\rho_i\colon E \to B_i$, indicated by the dotted arrow in the diagram. In particular, $\pi_i$ factorizes through $\lambda$ since $\pi_i = \rho_i\sigma_i\lambda$. Using the universal property of the product, we obtain a factorization through $\lambda$ of the identity on $\prod_i B_i$. That is, $\lambda$ splits as well and $\varepsilon$ must represent the zero element of $\mathrm{Ext}^1_\mathcal{A}(A, \prod_{i \in I}B_i)$. \end{proof} \begin{cor}\label{prop:ExtTriv} For any object $A \in \mathcal{A}$ and any collection of objects ${B_i \in \mathcal{A}},$ $i \in I$ for which $\prod_{i\in I}B_i$ exists, we have that $$\mathrm{Ext}_\mathcal{A}^{1}\Big(A, \prod_{i \in I}B_i\Big)=0 \text{ if and only if }\;\forall i \in I: \;\mathrm{Ext}_\mathcal{A}^{1}\Big(A, B_i\Big)=0\;. $$ \end{cor} \begin{cor}\label{cor:BunoProd} Let $\mathcal{A}$ be an abelian category with products. For any class $\mathcal{S}$ of objects in $\mathcal{A}$ we have $$\ker \mathrm{Ext}^1_{\mathcal{A}}(-, \mathcal{S})=\mathrm{Ext}^1_{\mathcal{A}}(-, \mathrm{Prod}(\mathcal{S})).$$ \end{cor} An example of $\psi_{A,B_i}$ actually not being an isomorphism can be found in the category of quasi-coherent sheaves on a projective line. \begin{example}\label{example:ExtProducts} Suppose that $\mathcal{A}$ is a hereditary Grothendieck category such that products are \emph{not} exact in $\mathcal{A}$. That is, there is an infinite set $I$ and a family of objects $B_i$, $i \in I$, such that the first right derived functor of $\prod_{i\in I}\colon \mathcal{A}^I \to \mathcal{A}$ does not vanish on $(B_i \mid i\in I)$. More specifically, we can chose for each $i\in I$ an injective resolution of $B_i$, \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r] & B_i \ar[r]& E^0_i \ar[r]& E^1_i \ar[r] & 0 \end{tikzcd} \end{adjustbox} \vspace{0.25cm} \noindent and the product \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r] & \prod\limits_{i \in I} B_i \ar[r]& \prod\limits_{i \in I} E^0_i \ar[r, "f"]& \prod\limits_{i \in I} E^1_i \end{tikzcd} \end{adjustbox} \vspace{0.25cm} \noindent of these resolutions sequences is left, but not right exact. Since $\mathcal{A}$ is hereditary, the image of $f$ is an injective object and, hence, a summand of $\prod_{i \in I} E^1_i$. Let $\iota\colon A{\hookrightarrow}\prod_{i \in I} E^1_i$ be an inclusion of a complement of the image of $f$. Then the compositions $(\pi_i\circ\iota\colon A \to E^1_i \mid i \in I)$ define an element of $\prod_{i \in I}\mathrm{Ext}^1_\mathcal{A}(A, B_i)$ which cannot be in the image of $\psi_{A,B_i}$. To demonstrate a concrete example, let again $k$ be a field and $\mathcal{A} = \mathsf{QCoh}_X$ for $X = \mathbb{P}^1_k$. By~\cite[Example 4.9]{Krause}, we can take $I = \mathbb{N}$ and for each $n \in \mathbb{N}$ we can let $B_n$ be the kernel of the canonical epimorphism \[ \mathcal{O}(-n) \otimes_k \hom_X(\mathcal{O}(-n),\mathcal{O}) \longrightarrow \mathcal{O}. \] \end{example} \begin{rem}\label{rem:ExtCoproductsOk} As in~\cite[\S8]{ColpiFuller3}, one can also consider the dual situation, that is, the canonical morphisms \[ \varphi_{B_i,A}\colon \mathrm{Ext}^1_\mathcal{A}\Big(\bigoplus_{i \in I}B_i, A\Big) \longrightarrow \prod_{i \in I}\mathrm{Ext}^1_\mathcal{A}(B_i, A) \] However, these are isomorphisms as long as our abelian category has enough injectives, so in particular for any Grothendieck category. To see that, let \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} {E}^\bullet\colon & E^0 \ar[r]& E^1 \ar[r]& E^2 \ar[r]& E^3 \ar[r]& \cdots \end{tikzcd} \end{adjustbox} \vspace{0.25cm} \noindent be an injective resolution of $A$ and consider the following commutative square of abelian groups: \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} {\mathrm{Ext}^1_\mathcal{A}\Big(\bigoplus\limits_{i \in I}B_i, A\Big)} \ar[d, "\varphi_{B_i,A}"'] \ar[r, equal]& {H^1\left(\hom_\mathcal{A}\Big(\bigoplus\limits_{i \in I}B_i, {E}^\bullet\Big)\right)} \ar[d, "\mathrm{can}", "\simeq"'] \\ {\prod\limits_{i \in I}\mathrm{Ext}^1_\mathcal{A}(B_i, A)} \ar[r, equal]& {\prod\limits_{i \in I}H^1\big(\hom_\mathcal{A}(B_i, {E}^\bullet)\big)} \end{tikzcd} \end{adjustbox} \end{rem} \section{Quasi-coherent sheaves on locally Noetherian schemes}\label{sec:AssPoints} \subsection{Injective sheaves on locally Noetherian schemes}\label{inj} This part is devoted to summarizing results about sheaves on locally Noetherian schemes. We start with structure of injective quasi-coherent sheaves on a locally Noetherian scheme $X$. It directly generalizes the structure of injective modules over a Noetherian ring; see \cite[Ch.~IV and VI]{Gabriel} and \cite[\S II.7]{H-RD}. Namely, for each point $x \in X$ there is an associated indecomposable injective sheaf $\mathscr{J}(x)$, and every injective sheaf is a direct sum of the sheaves $\mathscr{J}(x)$ for various points $x \in X$. In order to define the sheaves $\mathscr{J}(x)$, let us first briefly describe a natural embedding $\spec\mathcal{O}_{X,x} \rightarrow X$. Consider $x \in X$ and denote by $Y$ the set of all generizations of $x$, i.e. the set of all points $y$ with $x \in \overline{\{y\}}$. Upon fixing an affine open neighbourhood $U$ of $x$, we have that $$Y=\{y \in U\mid \mathfrak{p}_y \subseteq \mathfrak{p}_x\},$$ where $\mathfrak{p}_y$ denotes the prime ideal of $\mathcal{O}_X(U)$ corresponding to the point $y \in U$. The localization map $\mathcal{O}_X(U){\rightarrow}\mathcal{O}_X(U)_{\mathfrak{p}_x}=\mathcal{O}_{X,x}$ induces an embedding $$\spec{\mathcal{O}_{X,x}} \stackrel{j}\longrightarrow \spec{{O}_X(U)}=U,$$ and composition of $j$ with the open immersion $U \subseteq X$ yields an embedding of schemes $$\spec{\mathcal{O}_{X,x}}\stackrel{i}{\hookrightarrow}X$$ with $i(\spec{\mathcal{O}_{X,x}})=Y$. It can be checked that $i$ is independent of the choice of $U$. Define $$\mathscr{J}(x)=i_*\left(\widetilde{E_x}\right),$$ where $E_x$ is the injective hull of $\kappa(x)$, the residue field at $x$ (in $\mathsf{Mod\textnormal{\textsf{-}}} \mathcal{O}_{X,x}$). Since $\spec{\mathcal{O}_{X,x}}$ is a Noetherian scheme, it follows that the morphism $i$ is quasi-compact and quasi-separated (cf. \cite[Remark~10.2 (3) and Definition~10.22]{GW}) and that $\mathscr{J}(x)$ is a quasi-coherent sheaf (\cite[Corollary~10.27]{GW}). Let us now state a version of the classification theorem. \begin{prop}\label{prop:injectives-classification} Let $X$ be a locally Noetherian scheme and $\mathscr{F}$ a quasi-coherent sheaf. Then the following are equivalent: \begin{enumerate}[(1)] \item\label{IC1}{$\mathscr{F}$ is an injective $\mathcal{O}_X$-module.} \item\label{IC2}{$\mathscr{F}$ is an injective quasi-coherent sheaf (i.e.\ it is an injective object in $\mathsf{QCoh}_X$).} \item\label{IC3}{For every $x \in X$, $\mathscr{F}_x$ is an injective $\mathcal{O}_{X,x}$-module.} \item\label{IC4}{$\mathscr{F}$ is a direct sum of sheaves of the form $\mathscr{J}(x)$ for various $x \in X$.} \end{enumerate} \end{prop} \begin{proof} The equivalence of \hyperref[IC1]{(\ref*{IC1})}, \hyperref[IC3]{(\ref*{IC3})} and \hyperref[IC4]{(\ref*{IC4})} is proved in \cite[II.7.17]{H-RD}. Let us briefly comment on the equivalence of \hyperref[IC1]{(\ref*{IC1})} and \hyperref[IC2]{(\ref*{IC2})}. The implication \hyperref[IC1]{(\ref*{IC1})} $\Rightarrow$ \hyperref[IC2]{(\ref*{IC2})} follows directly from the fact that monomorphisms in $\mathsf{QCoh}_X$ are precisely monomorphisms in $\mathsf{Mod\textnormal{\textsf{-}}}{\mathcal{O}_X}$ between objects from $\mathsf{QCoh}_X$. Conversely, suppose that $\mathscr{F}$ is injective as a quasi-coherent sheaf. By \cite[II.7.18]{H-RD}, there is a monomorphism $\mathscr{F} \hookrightarrow \mathscr{G}$, where $\mathscr{G}$ is a quasi-coherent sheaf which is injective as an $\mathcal{O}_X$-module. Using the injectivity of $\mathscr{F}$ (in $\mathsf{QCoh}_X$), it follows that this monomorphism splits (in $\mathsf{QCoh}_X$, thus also in $\mathsf{Mod\textnormal{\textsf{-}}}\mathcal{O}_X$). Thus, $\mathscr{F}$ is a direct summand of injective $\mathcal{O}_X$-module, hence it is an injective $\mathcal{O}_X$-module as well. \end{proof} The following consequence of the classification theorem is important for our purposes as well. \begin{cor}\label{cor:DirLimOfInj} Let $X$ be a locally Noetherian scheme. The class of injective quasi-coherent sheaves on $X$ is closed under taking direct limits. \end{cor} \begin{proof} Suppose a sheaf $\mathscr{F}$ is given by $$\mathscr{F}=\varinjlim_{i}\mathscr{E}_i$$ with all $\mathscr{E}_i$'s injective quasi-coherent sheaves. Consider an arbitrary point $x \in X$. As the stalk functor $(-)_x$ is a left adjoint, we have $$\mathscr{F}_x=\varinjlim_i \left(\mathscr{E}_i\right)_x.$$ By Proposition~\ref{prop:injectives-classification}, all the $\mathcal{O}_{X,x}$-modules $\left(\mathscr{E}_i\right)_x$ are injective. As $\mathcal{O}_{X,x}$ is a Noetherian ring, it follows that the direct limit $\mathscr{F}_x$ is injective as well. Using Proposition~\ref{prop:injectives-classification} again, we infer that $\mathscr{F}$ is an injective quasi-coherent sheaf. \end{proof} A stronger result, which is employed in Section~\ref{sec:torsion}, holds for Noetherian (i.e.\ locally Noetherian and quasi-compact) schemes. \begin{lem} \label{lem:LocNoethCatg} Let $X$ be a Noetherian scheme. Then $\mathsf{QCoh}_X$ is a locally Noetherian Grothendieck category, and Noetherian objects are precisely the coherent sheaves. \end{lem} \begin{proof} This was shown in~\cite[Ch.~6, Th\'{e}or\`{e}me 1]{Gabriel}, but we sketch the argument for the reader's convenience. By definition, $X$ has a finite open affine cover $X = U_1 \cup \dots \cup U_n$ such that $\mathcal{O}_X(U_i)$ are Noetherian rings. If $\mathcal{F}$ is a coherent sheaf on $X$, then $\mathcal{F}(U_i)$ are Noetherian $\mathcal{O}_X(U_i)$-modules, and hence $\mathcal{F}$ also satisfies the ascending chain conditions on subobjects in $\mathsf{QCoh}_X$. Finally, every quasi-coherent sheaf on $X$ is the union of its coherent subsheaves by~\cite[Exercise II.5.15]{H-AG} or~\cite[Corollary 10.50]{GW}. \end{proof} \begin{rem} Despite the terminology, $\mathsf{QCoh}_X$ need not be a locally Noetherian category if $X$ is a locally Noetherian scheme. An example of this phenomenon was exhibited in \cite[\S II.7, p.~135--136]{H-RD}. Since direct sums of injective quasi-coherent sheaves are still injective by Corollary~\ref{cor:DirLimOfInj}, $\mathsf{QCoh}_X$ then cannot even be a locally finitely generated Grothendieck category due to~\cite[Proposition V.4.3]{Stenstrom}. To obtain a better feeling, let us have a look at the example from~\cite{H-RD}. It is an integral (i.e.\ reduced and irreducible) scheme $X$ which is a direct union of a chain \[ U_0 \subsetneq U_1 \subsetneq U_2 \subsetneq U_3 \subsetneq \cdots \] of open Noetherian subschemes. If we denote by $\mathscr{I}_n$ the quasi-coherent sheaf of ideals of the closed subset $X\setminus U_n$, it is not difficult to check that $\mathcal{O}_X = \bigcup_n \mathscr{I}_n$ in $\mathsf{QCoh}_X$ (as well as in $\mathsf{Mod\textnormal{\textsf{-}}}{\mathcal{O}_X}$). Thus the structure sheaf $\mathcal{O}_X$ is coherent, but not a finitely generated object of $\mathsf{QCoh}_X$. \end{rem} \subsection{Supports and associated points} Let us now focus more closely on the topic of associated points and points in the support of quasi-coherent sheaves, as these are among the main tools used in this paper. In what follows in this section, let $X$ be a Noetherian scheme. \begin{deff}\label{def:suppass} Let $\mathscr{F}$ be a quasi-coherent sheaf on $X$. Define the \emph{support} of the sheaf $\mathscr{F}$ by $$\mathrm{Supp}\,\mathscr{F}=\{x \in X \mid \mathscr{F}_x \neq 0\}\;.$$ We say that a point $x \in X$ is an \emph{associated point} of $\mathscr{F}$ provided that there is a monomorphism of $\mathcal{O}_{X,x}$-modules $$\kappa(x) \hookrightarrow \mathscr{F}_x\;,$$ i.e.\ if there is an (affine) open set $U \subseteq X$ and a section $s \in \mathscr{F}(U)$ such that $\mathrm{Ann}_{\mathcal{O}_{X,x}}(s_x)=\mathfrak{m}_x$. Denote the set of all associated points of $\mathscr{F}$ by $\mathrm{Ass}\, \mathscr{F}$. \end{deff} Recall that given a commutative ring $R$, its prime ideal $\mathfrak{p}$ and an $R$-module $M$, we say that $\mathfrak{p}$ is an \emph{associated prime of $M$} if $\mathfrak{p}=\mathrm{Ann}(m)$ for some $m \in M$. That is, there is an injection of $R$-modules $R/\mathfrak{p}\hookrightarrow M$ (taking $1+\mathfrak{p}$ to $m$). Denote the set of all associated primes of $M$ by $\mathrm{Ass}\, M$. Similarly, define \emph{support of $M$}, denoted by $\mathrm{Supp}\, M$, as the set of all primes $\mathfrak{p}$ such that $M_{\mathfrak{p}}\neq 0$. The following lemma describes the basic well-known properties of associated primes. The proof can be found e.g.\ in \cite[Sections~3.1 and 3.2]{Eisenbud}. \begin{lem}\label{AssModuluBasic} Let $R$ be a commutative Noetherian ring, $M$ an $R$-module and $\mathfrak{p}\subseteq R$ a prime ideal. \begin{enumerate}[(1)] \item\label{AM1}{$\mathfrak{p} \in \mathrm{Ass}\, M$ if and only if $\mathfrak{p}_{\mathfrak{p}} \in \mathrm{Ass}\, M_{\mathfrak{p}}.$} \item\label{AM2}{$\mathrm{Ass}\, M \subseteq \mathrm{Supp}\, M$.} \item\label{AM3}{$\mathrm{Ass}\, M = \emptyset$ if and only if $\mathrm{Supp}\, M = \emptyset$ if and only if $M=0$.} \item\label{AM4}{Given any collection $M_i, \; i \in I,$ of $R$-modules, we have $$\mathrm{Ass}\, \bigoplus_{i \in I}M_i=\bigcup_{i \in I}\mathrm{Ass}\, M_i\;$$ and $$\mathrm{Supp}\, \bigoplus_{i \in I}M_i=\bigcup_{i \in I}\mathrm{Supp}\, M_i\;.$$} \item\label{AM5}{Given a short exact sequence of $R$-modules \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r]& A \ar[r]& B \ar[r] & C \ar[r] & 0\;, \end{tikzcd} \end{adjustbox} \vspace{0.25cm} we have that $\mathrm{Ass}\, B \subseteq \mathrm{Ass}\, A \cup \mathrm{Ass}\, C$ and $\mathrm{Supp}\, B = \mathrm{Supp}\, A \cup \mathrm{Supp}\, C$. } \end{enumerate} \end{lem} Now we use these algebraic facts to prove their algebro-geometric counterparts. \begin{cor}\label{cor:AssSvazkuBasic} Let $X$ be a Noetherian scheme, $\mathscr{F}$ a quasi-coherent sheaf on $X$ and $x \in X$ a point. \begin{enumerate}[(1)] \item\label{AS1}{$x \in \mathrm{Ass}\, \mathscr{F}$ if and only if there exists an affine open neighbourhood $U$ of $x$ such that $\mathfrak{p}_x \in \mathrm{Ass}\, \mathscr{F}(U)$.} \end{enumerate} \begin{enumerate}[(1')] \item\label{AS1'}{$x \in \mathrm{Ass}\, \mathscr{F}$ if and only if for every affine open neighbourhood $U$ of $x$, \newline{$\mathfrak{p}_x \in \mathrm{Ass}\, \mathscr{F}(U)$.}}\end{enumerate} \begin{enumerate}[(1)] \setcounter{enumi}{1} \item\label{AS2}{$\mathrm{Ass}\, \mathscr{F} \subseteq \mathrm{Supp}\, \mathscr{F}$.} \item\label{AS3}{$\mathrm{Ass}\, \mathscr{F} = \emptyset$ if and only if $\mathrm{Supp}\, \mathscr{F} = \emptyset$ if and only if $\mathscr{F}=0$.} \item\label{AS4}{Given any collection $\mathscr{F}_i, \; i \in I,$ of quasi-coherent sheaves on $X$, we have $$\mathrm{Ass}\, \bigoplus_{i \in I}\mathscr{F}_i=\bigcup_{i \in I}\mathrm{Ass}\, \mathscr{F}_i\;$$ and $$\mathrm{Supp}\, \bigoplus_{i \in I}\mathscr{F}_i=\bigcup_{i \in I}\mathrm{Supp}\, \mathscr{F}_i\;.$$} \item\label{AS5}{Given a short exact sequence of quasi-coherent sheaves on $X$ \vspace{0.25cm} \begin{adjustbox}{max totalsize={1\textwidth}{.9\textheight},center} \begin{tikzcd} 0 \ar[r]& \mathscr{F} \ar[r]& \mathscr{G} \ar[r] & \mathscr{H} \ar[r] & 0, \end{tikzcd} \end{adjustbox} \vspace{0.25cm} we have that $\mathrm{Ass}\, \mathscr{G} \subseteq \mathrm{Ass}\, \mathscr{F} \cup \mathrm{Ass}\, \mathscr{H}$ and $\mathrm{Supp}\, \mathscr{G} = \mathrm{Supp}\, \mathscr{F} \cup \mathrm{Supp}\, \mathscr{H}$. } \end{enumerate} \end{cor} \begin{proof} If $U$ is an affine open neighbourhood of $x$, the stalk $\mathscr{F}_{x}$ may be computed as $\left(\mathscr{F}(U)\right)_{\mathfrak{p}_x}$ and thus, $x \in \mathrm{Ass}\, \mathscr{F} $ if and only if $\mathfrak{m}_x=\left(\mathfrak{p}_x\right)_{\mathfrak{p}_x} \in \mathrm{Ass}\, \left(\mathscr{F}(U)_{\mathfrak{p}_x}\right)$. Application of Lemma~\hyperref[AM1]{\ref*{AssModuluBasic}~(\ref*{AM1})} thus proves \hyperref[AS1]{(\ref*{AS1})} and \hyperref[AS1']{(\ref*{AS1'}')}. The statement \hyperref[AS2]{(\ref*{AS2})} is clear from the definition. Statements \hyperref[AS3]{(\ref*{AS3})}--\hyperref[AS5]{(\ref*{AS5})} follow directly from its algebraic counterparts using the facts that for any $x \in X,$ the stalk functor $(-)_x$ is exact (to prove \hyperref[AS5]{(\ref*{AS5})}) and preserves direct sums (to prove \hyperref[AS4]{(\ref*{AS4})}). \end{proof} Additionally, let us prove the following lemma on associated points of direct limits of sheaves. \begin{lem}\label{lem:AssFlat} Let $X$ be a locally Noetherian scheme and $\mathscr{F}$ a quasi-coherent sheaf on $X$. If $\mathscr{F}$ is a direct limit of a directed system of quasi-coherent sheaves $(\mathscr{F}_i\; | \; i \in I),$ then $$\mathrm{Ass}\,{\mathscr{F}} \subseteq \bigcup_{i \in I}\mathrm{Ass}\,{\mathscr{F}_i}.$$ \end{lem} \begin{proof} We start with a reduction to the affine case. Note that for every point $x \in X$, we have that $\mathscr{F}_x=\varinjlim_i (\mathscr{F}_i)_x$ as the stalk functor preserves colimits. It is clearly enough to show the implication $$ \forall x \in X:\;\; \text{If } \mathfrak{m}_x \in \mathrm{Ass}\,(\mathscr{F}_x)\text{, then }\mathfrak{m}_x \in \mathrm{Ass}\,((\mathscr{F}_i)_x) \text{ for some }i \in I,$$ which is easily seen to be a consequence of the affine version of the statement. Let us now assume that $R$ is a Noetherian commutative ring and $M, M_i, \; i \in I$ are $R$-modules with $\varinjlim_i M_i=M$. Denote $\nu_i: M_i \rightarrow M$ the canonical homomorphisms and consider $\mathfrak{p} \in \mathrm{Ass}\,{M}.$ Fix an injective homomorphism $R/\mathfrak{p}\stackrel{\iota}\hookrightarrow M$. Since $R$ is Noetherian, the module $R/\mathfrak{p}$ is finitely presented and thus, $\iota$ has a factorization $$\iota=\nu_i \iota_i,$$ where $i \in I$ is some index and $\iota_i: R/\mathfrak{p} \rightarrow M_i$ is a suitable homomorphism (see e.g.\ \cite[Lemma~2.8]{G-T}). Such $\iota_i$ is necessarily injective since $\iota$ is. It follows that $\mathfrak{p}\in \mathrm{Ass}\, M_i,$ which concludes the proof. \end{proof} Associated points are closely related to injective envelopes, exactly as in the affine case. In fact, one can define associated points of objects of certain abstract Grothendieck categories this way, \cite[\S IV.2]{Gabriel}. We record the following lemma and its consequence. \begin{lem}\label{lem:AssOfJx} For a point $x \in X$, $\mathrm{Ass}\, \mathscr{J}(x)=\{x\}.$ \end{lem} \begin{proof} Let $y \in \mathrm{Ass}\, \mathscr{J}(x)$. Note first that $y$ must be a specialization of $x$. Indeed, otherwise there is an open set $U\subseteq X$ which contains $y$, but not $x$. Denoting again by $i\colon \spec{\mathcal{O}_{X,x}}{\hookrightarrow}X$ the canonical embedding, we would obtain \[ \mathscr{J}(x)(U) = i_*\left(\widetilde{E_x}\right)(U) = \widetilde{E_x}\left(i^{-1}(U)\right) = \widetilde{E_x}(\emptyset) = 0, \] which is absurd. So suppose that $y \in \overline{\{x\}}$. Then $\mathscr{J}(x)_y \cong i_*\left(\widetilde{E_x}\right){\!}_y$, which is just the injective envelope $E_x$ of $\kappa(x)$, viewed as an $\mathcal{O}_{X,y}$-module. It is well known that $\mathrm{Ass}\, E_x=\{\mathfrak{p}_x\}$, so $\mathrm{Ass}\, \mathscr{J}(x) = \{x\}$. \end{proof} \begin{cor}\label{cor:AssF=AssEF} Let $X$ be a Noetherian scheme and $\mathscr{F} \in \mathsf{QCoh}_X$. Denote by $E(\mathscr{F})$ the injective hull of $\mathscr{F}$. Then $\mathrm{Ass}\, \mathscr{F}= \mathrm{Ass}\, E(\mathscr{F})$. \end{cor} \begin{proof} Obviously, $\mathrm{Ass}\, \mathscr{F} \subseteq \mathrm{Ass}\, E(\mathscr{F})$ as $\mathscr{F}$ is a subsheaf of $E(\mathscr{F})$. Suppose for contradiction that there is a point $x \in \mathrm{Ass}\, E(\mathscr{F})\setminus \mathrm{Ass}\, \mathscr{F}$. Then we can express $E(\mathscr{F})$ as a direct sum $\bigoplus_{i\in I}\mathscr{J}(x_i)$ for a certain collection of points $\{x_i \mid i\in I\}$ by Proposition~\hyperref[IC4]{\ref*{prop:injectives-classification}~(\ref*{IC4})}, and clearly $x_i = x$ for some $i$ by Corollary~\hyperref[AS4]{\ref*{cor:AssSvazkuBasic}~(\ref*{AS4})} and Lemma~\ref{lem:AssOfJx}. In particular, $E(\mathscr{F})$ has both $\mathscr{F}$ and $\mathscr{J}(x)$ as subsheaves. However, $\mathrm{Ass}\, \left(\mathscr{F}\cap \mathscr{J}(x)\right) \subseteq \left(\mathrm{Ass}\, \mathscr{F}\right) \cap \{x\}=\emptyset$. Thus, $\mathscr{F}\cap \mathscr{J}(x)=0$, which contradicts the essentiality of the inclusion $\mathscr{F} \subseteq E(\mathscr{F})$. \end{proof} Given a commutative Noetherian ring $R$ and an $R$-module $M$, an associated prime $\mathfrak{p}$ of $M$ can always be ``isolated'' in a finitely generated submodule $N \subseteq M.$ That is, there is a finitely generated submodule $N$ with $\mathrm{Ass}\, N=\{\mathfrak{p}\}$. This is obvious, one simply needs to take $N$ to be an isomorphic copy of $R /\mathfrak{p}$ that is embedded into $M$. It will be useful to generalize this property for Noetherian schemes. To this end let $x \in X$ be a point and let $Z_x$ be the integral closed subscheme of $X$ with generic point $x$ (such $Z_x$ exists and is unique by \cite[Proposition~3.50]{GW}). If we denote by $j\colon Z_x{\hookrightarrow}X$ the closed immersion, a naive non-affine analogue of $R/\mathfrak{p}$ is the sheaf $j_*(\mathcal{O}_{Z_x})$. It is coherent as $j_*(\mathcal{O}_{Z_x}) \cong \mathcal{O}_X/\mathscr{I}_x$, where $\mathscr{I}_x$ is the coherent sheaf of ideals of $\mathscr{O}_X$ whose sections are precisely those which vanish at $x$ (i.e.\ $\mathscr{I}_x(U) = \mathfrak{p}_x \subseteq \mathcal{O}_X(U)$ is the prime corresponding to $x$ if $x\in U$ and $\mathscr{I}_x(U) = \mathcal{O}_X(U)$ if $x\not\in U$ for an open affine subset $U$ of $X$). However, in contrast to the affine case, this sheaf does not embed into every quasi-coherent sheaf $\mathscr{F}$ with $x \in \mathrm{Ass}\, \mathscr{F}$. In fact, in general there is no single coherent sheaf $\mathscr{G}$ such that quasi-coherent sheaves $\mathscr{F}$ with $x \in \mathrm{Ass}\, \mathscr{F}$ would be characterized by the existence of a monomorphism $\mathscr{G} \hookrightarrow \mathscr{F}$. \begin{example}\label{example:TestingSheaf} Let $X=\mathbb{P}^1_k$ be a projective line over a field $k$ and $\xi\in X$ be the generic point. A quasi-coherent sheaf $\mathscr{F}$ with $\xi \in \mathrm{Ass}\, \mathscr{G}$ cannot be torsion, so it contains a line bundle $\mathscr{L}$ as a subsheaf. In fact, any line bundle $\mathscr{L}$ satisfies $\mathrm{Ass}\, \mathscr{L}=\{\xi\}$, and if a testing coherent sheaf $\mathscr{G}$ for $\xi$ existed, a line bundle $\mathscr{L}\subseteq\mathscr{G}$ would also be such a testing sheaf. However, there is no single line bundle that would embed into any other line bundle, since $\hom_{X}(\mathcal{O}(m), \mathcal{O}(n))=0$ if $n<m$. \end{example} However, we obtain the following by a straightforward modification of \cite[Tag 01YE]{stacks}. \begin{prop}\label{prop:TestingSheaf} Let $X$ be a Noetherian scheme, $x \in X$ be a point, $Z_x$ be the integral closed subscheme of $X$ with generic point $x$ and $j\colon Z_x{\hookrightarrow}X$ be the closed immersion. Given a quasi-coherent sheaf $\mathscr{F}$, we have $x \in \mathrm{Ass}\, \mathscr{F}$ if and only if there exists a non-zero coherent sheaf of ideals $\mathscr{I} \subseteq \mathcal{O}_{Z_x}$ such that $j_*(\mathscr{I}) \hookrightarrow \mathscr{F}$. Moreover, in this case $\mathrm{Ass}\,{j_*(\mathscr{I})} = \{x\}$ and $\mathrm{Supp}\, j_*(\mathscr{I}) = \overline{\{x\}}$.\end{prop} \begin{proof} We start with the last sentence. Suppose that $0 \ne \mathscr{I} \subseteq \mathcal{O}_{Z_x}$---then clearly $\mathrm{Supp}\, j_*(\mathscr{I}) \subseteq \overline{\{x\}}$. If $y \in \mathrm{Ass}\,{j_*(\mathscr{I})}$, then $y \in Z_x$, since otherwise $y$ would have an open affine neighbourhood $U$ with $j_*(\mathscr{I})(U) = 0$. If $y \in Z_x$, consider an open affine neighbourhood $U$ of $y$. Then also $x \in U$ and $\mathrm{Ass}\, j_*(\mathscr{I})(U) = \{\mathfrak{p}_x\}$ since $j_*(\mathscr{I})(U)$ is a non-zero ideal of the domain $j_*(\mathcal{O}_{Z_x})(U)$. This shows that $\mathrm{Ass}\,{j_*(\mathscr{I})} = \{x\}$ and $\mathrm{Supp}\, j_*(\mathscr{I}) \subseteq \overline{\{x\}}$. Let now $\mathscr{F}$ be a quasi-coherent sheaf with $x \in \mathrm{Ass}\, \mathscr{F}$ and let $\mathscr{I}_x$ be the coherent sheaf of ideals of $Z_x$ as above. We first reduce the situation to the case where $\mathscr{I}_x\cdot \mathscr{F} = 0$. Indeed, let $\mathscr{F}'\subseteq \mathscr{F}$ be the subsheaf of sections annihilated by $\mathscr{I}_x$ (i.e.\ $\mathscr{F}'(U) = \{s\in\mathscr{F}(U) \mid \mathscr{I}_x(U)\cdot s = 0 \}$). Then $\mathscr{F}'$ is quasi-coherent and $x\in \mathrm{Ass}\,{\mathscr{F}'}$ by \cite[Tag 01PO]{stacks}, so we can replace $\mathscr{F}$ by $\mathscr{F}'$. If $\mathscr{I}_x\cdot \mathscr{F} = 0$, then $\mathscr{F} \cong j_*(j^*\mathscr{F})$ by \cite[Remark 7.35]{GW}. Moreover, since $\mathscr{F}_x \cong j_*(j^*\mathscr{F})_x \cong (j^*\mathscr{F})_x$, we have $x\in\mathrm{Ass}\,{j^*\mathscr{F}}$. If we find a non-zero coherent sheaf of ideals $\mathscr{I} \subseteq \mathcal{O}_{Z_x}$ such that $\mathscr{I} \hookrightarrow j^*\mathscr{F}$, then by adjunction $j_*(\mathscr{I})$ embeds into $\mathscr{F}$. Hence we reduced the problem to the case where $X = Z_x$. Assume, therefore, that $X$ is integral, $x$ is its generic point and $x \in \mathrm{Ass}\,{\mathscr{F}}$. Then, by Corollary~\ref{cor:AssSvazkuBasic}, there exists an open affine neighbourhood $U$ of $x$ and an embedding $\psi\colon \mathcal{O}_X(U) \hookrightarrow \mathscr{F}(U)$. Let $\mathscr{I}\subseteq \mathcal{O}_X$ be a coherent sheaf of ideals such that $\mathrm{Supp}\,{\mathcal{O}_X/\mathscr{I}} = X\setminus U$ (see for instance \cite[Tag 01J3]{stacks}). The proof will be concluded by the following lemma, which implies that there exists $n\ge0$ such that $\psi$ extends to a homomorphism $\varphi\colon \mathscr{I}^n \to \mathscr{F}$. Note that since $\varphi$ is generically injective and $\mathscr{I}^n$ has torsion-free modules of sections over the sheaf of domains $\mathcal{O}_X$, it follows that $\varphi$ is itself injective. \end{proof} \begin{lem}[{\cite[Tag 01YB]{stacks}, \cite[\S V.2, Corollaire 1]{Gabriel}}]\label{lem:PowersOfI} Let $X$ be a Noetherian scheme and $\mathscr{F} \in \mathsf{QCoh}_X$. Let $\mathscr{I}\subseteq \mathcal{O}_X$ be a coherent sheaf of ideals, $Z = \mathrm{Supp}\,{\mathcal{O}_X/\mathscr{I}}$ the corresponding closed subset and $U = X\setminus Z$. Then taking sections over $U$ induces a canonical isomorphism \[ \varinjlim_n \hom_{X}(\mathscr{I}^n, \mathscr{F}) \stackrel{\simeq}{\longrightarrow} \mathscr{F}(U). \] \end{lem} \subsection{The closed monoidal structure on sheaves} Finally, we recall a few basic facts about the standard closed monoidal structure on $\mathsf{Mod\textnormal{\textsf{-}}}{\mathcal{O}_X}$ and $\mathsf{Coh}_X$. We again assume that $X$ is a locally noetherian scheme. We will write $\otimes = \otimes_{\mathcal{O}_X}$ for the usual tensor product on $\mathsf{Mod\textnormal{\textsf{-}}}{\mathcal{O}_X}$, which is simply the sheafification of the obvious tensor product of presheaves; see e.g.\ \cite[\S7.4]{GW}. If $\mathscr{F}, \mathscr{G}$ are sheaves of $\mathcal{O}_X$-modules and $x\in X$, then canonically \[ (\mathscr{F}\otimes\mathscr{G})_x \simeq \mathscr{F}_x\otimes_{\mathcal{O}_{X,x}} \mathscr{G}_x.\] If, moreover, both $\mathscr{F}, \mathscr{G}$ are quasi-coherent (resp.\ coherent), then $\mathscr{F}\otimes\mathscr{G}$ is quasi-coherent (resp.\ coherent) and \[ (\mathscr{F}\otimes\mathscr{G})(U) \cong \mathscr{F}(U) \otimes_{\mathcal{O}_X(U)} \mathscr{G}(U) \] for each open affine subset $U \subseteq X$ by \cite[Corollary 7.19]{GW}. Given $\mathscr{F}, \mathscr{G}\in\mathsf{Mod\textnormal{\textsf{-}}}{\mathcal{O}_X}$, one can define the sheaf of homomorphisms $\homSh_X(\mathscr{F}, \mathscr{G}) \in \mathsf{Mod\textnormal{\textsf{-}}}{\mathcal{O}_X}$ by setting \[ \homSh_X(\mathscr{F}, \mathscr{G})(U) = \hom_U(\mathscr{F}|_U, \mathscr{G}|_U) \text{ for all open subsets } U \subseteq X. \] The usual homomorphism group $\hom_X(\mathscr{F}, \mathscr{G})$ can be recovered as the global sections of $\homSh_X(\mathscr{F}, \mathscr{G})$. If $\mathscr{F}$ is a coherent sheaf, then \[ \homSh_X(\mathscr{F}, \mathscr{G})_x \simeq \hom_{\mathcal{O}_{X,x}}(\mathscr{F}_x, \mathscr{G}_x) \] canonically for each $x\in X$ by \cite[Proposition 7.27]{GW}. If, moreover, $\mathscr{G}$ is quasi-coherent (resp.\ coherent), then also $\homSh_X(\mathscr{F}, \mathscr{G})$ is quasi-coherent (resp.\ coherent) and for each open affine $U \subseteq X$, we also have \[ \homSh_X(\mathscr{F}, \mathscr{G})(U) \simeq \hom_{\mathcal{O}_{X}(U)}\left(\mathscr{F}(U), \mathscr{G}(U)\right). \] The two construction are related by the usual adjunction \[ \homSh_X(\mathscr{F}\otimes\mathscr{G}, \mathscr{H}) \simeq \homSh_X\left(\mathscr{F}, \homSh(\mathscr{G}, \mathscr{H})\right) \] for any triple $\mathscr{F}, \mathscr{G}, \mathscr{H}\in\mathsf{Mod\textnormal{\textsf{-}}}{\mathcal{O}_X}$. In particular, $(\mathsf{Mod\textnormal{\textsf{-}}}{\mathcal{O}_X}, \otimes, \mathcal{O}_X, \homSh_X)$ is naturally a closed monoidal category, and $\mathsf{Coh}_X$ is a closed monoidal subcategory.
1,108,101,565,865
arxiv
\section{Introduction} Rank-$k$ approximation is an important problem in data analysis. Given a dataset represented as a matrix $A \subset \mathbb{R}^{n \times d}$, where the rows of the matrix represent the data points, the goal is to find a rank-$k$ matrix $\tilde{A}$ such that $\FN{A - \tilde{A}}$ is is not too large compared to $\FN{A - \pi_k(A)}$. Here $\pi_k(A)$ denotes the best rank-$k$ matrix under the Frobenius norm, that is, $$\pi_k(A) = \argmin_{{X: \mbox{\small{rank}}(X) \leq k}} \FN{A - X}.$$ Note that this problem is not computationally hard and can be solved using Singular Value Decomposition (SVD). Here, we discuss a simpler sampling based algorithm. From a geometric perspective, the problem is to find a {\em best-fit} $k$-subspace to a given $n$ points in $d$-dimensional Euclidean space where the measure of fit is the sum of squared distance of the points to the subspace. In this article we restrict our discussion to rank-$1$ approximation which corresponds to the geometric {\em best-fit line} problem. We will be using the matrix and the geometric interpretations interchangeably in this discussion. We will discuss a sampling technique for this problem. We start with the following question: ``{\it Is there a simple sampling procedure that samples a few rows of the given matrix such that the span of the sampled rows contain a good rank-1 approximation?}" Let us try the simplest option of sampling from the uniform distribution. One quickly realises that it is easy to construct datasets where the span of even a fairly large sample of uniformly sampled rows does not contain a good rank-$1$ matrix. For example, consider a two-dimensional dataset where all points except one have coordinate $(0, y)$ and the remaining point has coordinate $(x, 0)$ and $x \gg y$. This example suggests that one should take the norm of a point into consideration while sampling. This naturally leads to {\em length-squared sampling}. The idea is to sample rows such that the sampling probability of the $i^{th}$ row is proportional to the square of its norm. That is, the sampling probability $p_i$ of the row $\AV{i}$ of a matrix $A$ is given by: \[ p_i = \frac{\norm{\AV{i}}^2}{\FN{A}} \] Length-squared sampling has been explored in the past work of Frieze {\it et al. }~\cite{fkv04} and further explored in various works~\cite{drvw06,dv06}. The main result known from previous works in the context of rank-$1$ approximation is the following theorem of Frieze {\it et al. }~\cite{fkv04}. \begin{theorem}[\cite{fkv04}]\label{thm:frieze} Let $0 < \varepsilon < 1$. Let $S$ be a sample of $s$ rows of an $n \times d$ matrix $A$, each chosen independently with length-squared distribution. If $s = \Omega(\frac{1}{\varepsilon})$, then the span of $S$ contains a matrix $\tilde{A}$ of rank-$1$ such that: \[ {\bf E}[\FN{A - \tilde{A}}] \leq \FN{A - \pi_1(A)} \ +\ \varepsilon \cdot \FN{A}. \] \end{theorem} Note that this only gives an additive approximation and the additive error of $\varepsilon \cdot \FN{A}$ can be very large since we do not have any control on $\FN{A}$. This raises the question about whether a multiplicative approximation could be possible. The subsequent works of Deshpande and Vempala~\cite{dv06} and Deshpande {\it et al. }~\cite{drvw06} use {\em adaptive length-squared sampling} along with {\em volume sampling} to obtain a multiplicative approximation. In this work, we show that length-squared sampling is sufficient to obtain a multiplicative approximation albeit at the cost of using a slightly larger sample size. Our main result is formally stated as the following theorem. \begin{theorem}[Main result]\label{thm:main} Let $0 < \varepsilon < 1$. Let $S$ be a sample of $s$ rows of an $n \times d$ matrix $A$, each chosen independently with length-squared distribution. If $s = \Omega(\frac{1}{\varepsilon^4})$, then the span of $S$ contains a matrix $\tilde{A}$ of rank-$1$ such that: \[ {\bf E}[\FN{A - \tilde{A}}] \leq (1+\varepsilon) \cdot \FN{A - \pi_1(A)}. \] \end{theorem} We prove our main result in the next section. Before we do this, let us discuss the application of our results in the {\em streaming setting} that is relevant for big data analysis where $n$ and $d$ are very large\footnote{In this setting, one is allowed to make a few passes over the dataset while using limited amount of workspace. That is, the amount of space the should not scale linearly with the dataset size. This makes sense for big data analysis where it may not be possible to hold the entire dataset in the memory for processing. }. Note that length-squared sampling will naturally give a 2-pass streaming algorithm that uses $O(\frac{n+d}{\varepsilon^4}\log{nd})$ space. Here, in the first pass, we perform length-squared sampling using {\em reservoir sampling}\footnote{In order to maintain a single sample one does the following. The first row is stored with probability $1$. On seeing the $i^{th}$ row ($i > 1$), the stored item is replaced wth $\AV{i}$ with probability $\frac{\norm{\AV{i}}^2}{\sum_{j = 1}^{i} \norm{\AV{j}}^2}$. A simple telescoping product shows that the rows get sampled with the desired probability.}. In the second pass, we project all the points in the space spanned by the sampled rows and find the best fit line in this smaller dimensional space. It is important to note that a streaming algorithm with similar space bound that works using only one pass is known~\cite{clarkson}. So, our note is more about the properties of length-squared sampling than streaming algorithms for rank-1 approximation. \subsection{Related work} Low rank approximation of matrices has large number of applications in information retrieval and data mining (see e.g.~\cite{dkr02,prtv00,afkm01,dfkvv04}). There has been lot of recent activity in obtaining low rank approximations in time depending on the number of non-zero entries in the input matrix~\cite{cw17,s06,dv06,p14}. All of these methods rely on computing suitable random projections of the input matrix. Length-squared sampling is a natural sampling algorithm and has had applications in many problems involving matrix computations~\cite{dk01,dk03,fkv04,dfkvv04}. As mentioned earlier, Frieze {\it et al. }~\cite{fkv04} showed that this can also be used for obtaining low rank approximations, but one needs to incur an additive error term. This restriction was removed in subsequent works~\cite{dv06,s06,dmm06,dmm06b,dmm06c,ndt09,mz11,dmms11} but using different techniques. Our main contribution is to show that length-squared sampling is sufficient to obtain a bounded multiplicative error for rank-1 approximation. \section{Rank-$1$ approximation} We prove our main result in this section. Before delving into the proof, we give some intuition behind the analysis. By a suitable rotation, we can assume that $\pi_1(A)$, the closest rank-$1$ matrix to $A$ in terms of Frobenius norm, is the first column of $A$. Let $\sigma^2$ and $r^2$ denote $\FN{\pi_1(A)}$ and $\FN{A-\pi_1(A)}$ respectively. If $r$ is large compared to $\sigma$, then the additive guarantee given by Theorem~\ref{thm:frieze} implies a multiplicative guarantee as well. So the non-trivial case is when $r \ll \sigma.$ Let $r_i$ and $\sigma_i$ denote the contribution towards $r$ and $\sigma$ from the $i^{th}$ row respectively. So for most rows, $r_i \ll \sigma_i$ -- we call such rows {\em good} rows. When we sample a good row, the normalized row will be close to the vector $(1, 0, \ldots, 0)$. The heart of the analysis relies on showing that the {\em average} of such normalized sampled rows will be close to $(1, 0, \ldots, 0)$. In other words, we need to bound the variance term corresponding to rows which are not good. Let $\AV{i}$ denote the $i^{th}$ row of matrix $A$. Let $\mathbf{v}$ denote the unit vector such that $\norm{A\mathbf{v}}^2$ is maximised. Note that $\mathbf{v}$ is the largest singular vector of matrix $A$. We assume without loss of generality that $\mathbf{v} = (1, 0, 0, ..., 0)$\footnote{For a matrix $A$ and a unitary matrix $Q$ of appropriate dimension, the lengths of the corresponding rows in $A$ and $AQ$ are the same. So we can choose a suitable $Q$ such that $\mathbf{v}$ has the mentioned property}. Let $\sigma^2 = \norm{A\mathbf{v}}^2$. So, we can write $\AV{i} \equiv (\sigma u_i, \rv{i})$, where $\sum_i u_i^2 = 1$ and $\rv{i}$ is a vector of dimension $(d-1)$. Let $r_i \equiv \norm{\rv{i}}$ and $r^2 \equiv \sum_i \norm{\rv{i}}^2 = \sum_i r_i^2$. The following lemma states that under the assumption that $r^2$ is significantly larger than $\sigma^2$, the conclusion of our main theorem holds due to Theorem~\ref{thm:frieze} (as stated in~\cite{fkv04}). \begin{lemma}\label{lemma:fkv-lemma} If $r^2 > \varepsilon^3 \sigma^2$, then there is a rank-1 matrix $\tilde{A}$ in the span of $\Omega \left(\frac{1}{\varepsilon^4}\right)$ independently sampled rows of $A$ sampled with length-squared distribution such that ${\bf E}[\FN{A - \tilde{A}}] \leq (1 + \varepsilon) \cdot \FN{A - \pi_1(A)}$. \end{lemma} \begin{proof} Note that since $\mathbf{v}$ maximises $\norm{A\mathbf{v}}^2$, we have $\pi_1(A) = \left(\begin{smallmatrix} \sigma u_1, 0, ..., 0\\\vdots \\ \sigma u_2, 0, ..., 0 \end{smallmatrix}\right),$ which implies that $\FN{A- \pi_1(A)} = r^2$. Also, $\FN{A} = \sigma^2 + r^2$. Combining the above with Theorem~\ref{thm:frieze} (where we use $\frac{\varepsilon^4}{2}$ for $\varepsilon$), we get: \begin{eqnarray*} {\bf E}[\FN{A - \tilde{A}}] \leq r^2 + \frac{\varepsilon^3}{2} \cdot (r^2 + \sigma^2) \leq (1 + \varepsilon) \cdot r^2 = (1 + \varepsilon) \cdot \FN{A - \pi_1(A)}. \end{eqnarray*} This completes the proof of the lemma.\qed \end{proof} For the remainder of the proof, we will assume that \begin{equation}\label{eqn:assume} r^2 \leq \varepsilon^3 \sigma^2. \end{equation} Let $\mathbf{s}$ be a randomly sampled row of matrix $A$ sampled with length-squared distribution and let $\sv{1}, \sv{2}, ..., \sv{l}$ be $l$ independent copies of $\mathbf{s}$. We would like to define a deterministic procedure to construct a (random) rank-1 matrix $X$ using $\sv{1}, ..., \sv{l}$, where each row of $X$ lies in the span of $\sv{1}, ..., \sv{l}$, such that the expected value of $\FN{A - X}$ is at most $(1+\varepsilon) \cdot r^2$. Another (geometric) way of saying this is that there is a point $\mathbf{t}$ in the span of $\sv{1}, ..., \sv{l}$ such that the squared distance of rows of $A$ from the line $\ell(\mathbf{t})$ is at most $(1+\varepsilon)$ times of that from the best-fit line $\ell(\mathbf{v})$. Here $\ell(.)$ denotes the line passing through the given point and origin $o = (0, ..., 0)$. We will need a few more definitions to give the procedure that defines $X$ from $\sv{1}, ..., \sv{l}$. We first divide the rows into ``good" and ``bad". A row $\AV{i}$ is said to be good if \begin{equation}\label{eqn:good-row} r_i^2 < \varepsilon \sigma^2 u_i^2, \end{equation} otherwise it is bad. We now give the procedure for mapping the randomly length-squared sampled $\sv{1}, ..., \sv{l}$ to an appropriate matrix $X$. \begin{framed} \hspace*{-0.4in}\ \ \ {\tt Mapping($\sv{1}, ..., \sv{l}$)}\\ \hspace*{0.0in} \ \ \ - For all $i \in \{1, ..., l\}$: \\ \hspace*{0.3in} \ \ \ - If ($\sv{i}$ is a bad row) then $\tv{i} \leftarrow (0, ..., 0)$\\ \hspace*{0.3in} \ \ \ - Else $\tv{i} \leftarrow \frac{\sv{i}}{\sigma u_i}$\\ \hspace*{0.0in} \ \ \ - $\mathbf{t} \leftarrow \frac{\sum_{i=1}^{l} \tv{i}}{l}$\\ \hspace*{0.0in} \ \ \ - For all $i \in \{1, ..., n\}$: $\XV{i} \leftarrow \sigma u_i \mathbf{t}$\\ \hspace*{0.0in} \ \ \ - $X \leftarrow \left(\begin{smallmatrix} \XV{1} \\ \vdots \\ \XV{n}\end{smallmatrix} \right)$ \end{framed} The intuition is that if $\sv{i}$ is a good row, then $\tv{i}$ will be close to $\mathbf{v} = (1,0, \ldots, 0)$. So $X$ will be very close to $\pi_1(A)$. Note that the above defined procedure is only meant for the analysis and is never actually executed. Also it is easy to see that the $n \times d$ matrix $X$ defined above is a rank-1 matrix. We will now bound ${\bf E}[\FN{A - X}]$, which is the same as $\sum_i {\bf E}[\FN{\AV{i} - \XV{i}}]$. We start with a simple lemma that bounds the probability of sampling a bad row. \begin{lemma}\label{lem:bad} The probability that a sampled row $\mathbf{s}$, sampled using length-squared distribution, is bad is at most $\frac{2r^2}{\varepsilon \sigma^2}$. \end{lemma} \begin{proof} The probability of sampling a bad row $\AV{i}$ is given by $\frac{r_i^2 + \sigma^2 u_i^2}{\sigma^2 + r^2} \leq \frac{r_i^2 + \sigma^2 u_i^2}{\sigma^2}$. So, the probability that a sample row is bad is at most $\frac{\sum_{i \textrm{\ is bad}} (r_i^2 + \sigma^2 u_i^2)}{\sigma^2} \leq \frac{\sum_{i \textrm{\ is bad}} (1 + \frac{1}{\varepsilon}) r_i^2 }{\sigma^2} \leq \frac{2 r^2}{\varepsilon \sigma^2}$.\qed \end{proof} Let $\AV{i}_1$ denote the first coordinate of $\AV{i}$. Define $\XV{i}_1$ and $\tv{i}_1$ similarly. We first estimate $\sum_i {\bf E}[(\AV{i}_1 - \XV{i}_1)^2]$. \begin{lemma}\label{lemma:first-coordinate} $\sum_i {\bf E}[(\AV{i}_1 - \XV{i}_1)^2] \leq 5 \varepsilon r^2$. \end{lemma} \begin{proof} Fix an index $i$. Note that $\AV{i}_1$ is $\sigma u_i$ and $\XV{i}_1$ is $\sigma u_i \mathbf{t}_1$. Therefore: \begin{eqnarray*} (\AV{i}_1 - \XV{i}_1)^2 = \sigma^2 u_i^2 (1 - \mathbf{t}_1)^2 \leq \frac{\sigma^2 u_i^2}{l^2} \left( \sum_{j=1}^{l} (1 - \tv{j}_1)\right)^2. \end{eqnarray*} The last inequality follows from the Jensen's inequality using $(1-x)^2$ as the convex function. We obtain a bound on the expectation of $(\AV{i}_1 - \XV{i}_1)^2$ from the above. \begin{equation}\label{eqn:3} {\bf E}[(\AV{i}_1 - \XV{i}_1)^2] \leq \frac{\sigma^2 u_i^2}{l^2} \cdot \left( \sum_{j=1}^{l} {\bf E} \left[\left(1 - \tv{j}_1 \right)^2 \right] \right) + \frac{\sigma^2 u_i^2}{l^2} \cdot \left( \sum_{j=1}^{l}{\bf E} \left[\left(1 - \tv{j}_1 \right) \right]\right)^2 \end{equation} The previous inequality follows from the independence of random variables $\tv{1}_1, ..., \tv{l}_1$. Lemma~\ref{lem:bad} shows that the probability of the Bernoulli random variable $\tv{j}_1$ being $0$ for any $j$ is at most $\frac{2r^2}{\varepsilon \sigma^2}$. So, ${\bf E}[(1 - \tv{j}_1)] \leq \frac{2r^2}{\varepsilon \sigma^2}$ and ${\bf E}[(1 - \tv{j}_1)^2] \leq \frac{2r^2}{\varepsilon \sigma^2}$ (note that $\tv{j}_1$ is either 0 or 1). Substituting this in~(\ref{eqn:3}) above, we get \[ {\bf E}[(\AV{i}_1 - \XV{i}_1)^2] \leq \frac{2 u_i^2 r^2}{\varepsilon l} + \frac{4 u_i^2 r^4}{\varepsilon^2 \sigma^2} \] The lemma follows from the facts that $\sum_i u_i^2 = 1$, $r^2 \leq \varepsilon^3 \sigma^2$ and $l \geq \frac{2}{\varepsilon^4}$.\qed \end{proof} Now we estimate the contribution towards $\sum_i {\bf E}[\norm{\AV{i} - \XV{i}}^2]$ from coordinates other than the first coordinate. If $\mathbf{z}$ denotes the vector obtained from $\mathbf{t}$ by removing the first coordinate, then observe that $\sum_i \sum_{d=2}^{l} (\AV{i}_j - \XV{i}_j)^2 = \sum_i \norm{\rv{i} - \sigma u_i \mathbf{z}}^2$. Now, observe that \begin{equation}\label{eqn:4} \sum_i {\bf E}[\norm{\rv{i} - \sigma u_i \mathbf{z}}^2] = r^2 - 2 \sigma \langle \sum_i u_i \rv{i}, {\bf E}[\mathbf{z}] \rangle + \sigma^2 {\bf E}[\norm{\mathbf{z}}^2] \end{equation} We will now estimate each of the terms above. First, note that if $\zv{j}$ denotes the vector obtained from $\tv{j}$ by removing the first coordinate, then $\mathbf{z} = \frac{1}{l} \sum_{j=1}^{l} \zv{j}$. Let $G$ denote the index set of good rows. Then observe that for any $j$, \begin{equation}\label{eqn:5} {\bf E}[\zv{j}] = \sum_{k \in G} p_k \frac{\rv{k}}{u_k \sigma}, \end{equation} where $p_k$ is the probability of length-squared sampling the $k^{th}$ row of $A$. Since the vectors $\tv{j}$ are chosen i.i.d, the expectation of $\mathbf{z}$ can also be written in terms of the above expression (i.e., RHS of (\ref{eqn:5})). Next, we show a useful inequality for good rows. \begin{fact}\label{lemma:good-p} If row $\AV{i}$ if good, then $|u_i - \frac{p_i}{u_i}| \leq \varepsilon |u_i|$. \end{fact} \begin{proof} Assume that $u_i > 0$, otherwise we can replace $u_i$ by $-u_i$ in the following argument. Since row $\AV{i}$ is good, we know that $r_i^2 \leq \varepsilon \sigma^2 u_i^2$. Also note that length squared sampling means that $p_i = \frac{\sigma^2 u_i^2 + r_i^2}{\sigma^2 + r^2}$. Using these we get: \[ \frac{p_i}{u_i} \leq \frac{\sigma^2 u_i^2 + r_i^2}{\sigma^2 u_i} = u_i + \frac{r_i^2}{\sigma^2 u_i} \leq (1+\varepsilon) \cdot u_i, \] and \[ \frac{p_i}{u_i} \geq \frac{\sigma^2 u_i}{r^2 + \sigma^2} \stackrel{(\ref{eqn:assume})}{\geq} \frac{u_i}{\varepsilon^3 + 1} \geq (1-\varepsilon) \cdot u_i \] The lemma follows from the above two inequalities.\qed \end{proof} \noindent The next lemma gives an upper bound on $\norm{\sigma \cdot {\bf E}[\mathbf{z}] - \sum_i u_i \rv{i}}$. \begin{lemma}\label{lemma:mid-1} $\norm{\sigma \cdot {\bf E}[\mathbf{z}] - \sum_i u_i \rv{i}} \leq 2 \varepsilon r$. \end{lemma} \begin{proof} Using the triangle inequality and an expression for ${\bf E}[\mathbf{z}]$ using (\ref{eqn:5}), we get that : \begin{eqnarray*} \norm{\sigma \cdot {\bf E}[\mathbf{z}] - \sum_i u_i \rv{i}} &\leq& \norm{\sum_{i \notin G} u_i \rv{i}} + \norm{\sum_{i \in G} \left(u_i - \frac{p_i}{u_i} \right) \rv{i}}\\ & \leq & \sum_{i \notin G} |u_i| r_i + \sum_{i \in G} |u_i - \frac{u_i}{p_i}| r_i \\ & \stackrel{\mbox{\tiny{Fact~\ref{lemma:good-p}}}}{\leq} & \sum_{i \notin G} |u_i| r_i + \sum_{i \in G} \varepsilon |u_i| r_i \\ &\stackrel{\tinym{Cauchy-Schwarz}}{\leq}& \left( \sum_{i \notin G} u_i^2\right)^{\frac{1}{2}} \left( \sum_{i \notin G} r_i^2\right)^{\frac{1}{2}} + \\ && \qquad \varepsilon \cdot \left( \sum_{i \in G} u_i^2 \right)^{\frac{1}{2}} \left( \sum_{i \in G} r_i^2\right)^{\frac{1}{2}}\\ &\stackrel{ (\ref{eqn:good-row})}{\leq}& r \cdot \left( \sum_{i \notin G} \frac{r_i^2}{\varepsilon \sigma^2}\right)^{\frac{1}{2}} + \varepsilon r \\ &\leq& r \cdot \left( \frac{r^2}{\varepsilon \sigma^2}\right)^{\frac{1}{2}} + \varepsilon r \\ &\stackrel{ (\ref{eqn:assume})}{\leq}&2 \varepsilon r \end{eqnarray*} This completes the proof of the lemma.\qed \end{proof} \noindent We now show a useful fact regarding $\rv{i}$. \begin{fact}\label{lemma:fact} $\norm{\sum_i u_i \rv{i}} \leq r$. \end{fact} \begin{proof} The statement follows by triangle inequality and Cauchy-Schwarz: \[ \norm{\sum_i u_i \rv{i}} \leq \sum_i |u_i|r_i \leq \left( \sum_i u_i^2 \right)^{\frac{1}{2}} \cdot \left( \sum_i r_i^2\right)^{\frac{1}{2}} = r. \] This completes the proof.\qed \end{proof} \noindent The next lemma bounds the middle term of the RHS of~(\ref{eqn:4}). \begin{lemma}\label{lemma:term-2} $2 \sigma \cdot \langle \sum_i u_i \rv{i}, {\bf E}[\mathbf{z}]\rangle \geq 2 \norm{\sum_i u_i \rv{i}}^2 - 4 \varepsilon \cdot r^2$. \end{lemma} \begin{proof} We have \begin{eqnarray*} \sigma \cdot \langle \sum_i u_i \rv{i}, {\bf E}[\mathbf{z}]\rangle = \langle \sum_i u_i \rv{i}, \sigma \cdot {\bf E}[\mathbf{z}]\rangle = \langle \sum_i u_i \rv{i}, \sum_j u_j \rv{j}\rangle + \langle \sum_i u_i \rv{i}, \sigma \cdot {\bf E}[\mathbf{z}] - \sum_j u_j \rv{j}\rangle \end{eqnarray*} We now get the following inequalities: \begin{eqnarray*} \left\vert \langle \sum_i u_i \rv{i}, \sigma \cdot {\bf E}[\mathbf{z}] - \sum_j u_j \rv{j}\rangle \right\vert \stackrel{\tinym{Cauchy-Schwarz}}{\leq} \norm{\sum_i u_i \rv{i}} \cdot \norm{\sigma \cdot {\bf E}[\mathbf{z}] - \sum_j u_j \rv{j}} \stackrel{\tinym{Lemma~\ref{lemma:mid-1}, Fact~\ref{lemma:fact}}}{\leq} 2 \varepsilon r^2 \end{eqnarray*} The lemma follows from the above inequality.\qed \end{proof} We now bound the last RHS term of~(\ref{eqn:4}). Since $\mathbf{z} = \frac{\zv{1} + \zv{2} + ... + \zv{l}}{l}$, it is easy to see that \begin{equation} \sigma^2 {\bf E}[\norm{\mathbf{z}}^2] \leq \frac{\sigma^2}{l} {\bf E}[\norm{\zv{j}}^2] + \sigma^2 \cdot \left( \norm{{\bf E}[\zv{j}]}\right)^2, \end{equation} where $j$ is an arbitrary index between $1$ and $l$. We can bound the two terms on the RHS below. \begin{lemma}\label{lemma:term-3-1} For any index $j$, $\frac{\sigma^2}{l} \cdot {\bf E}[\norm{\zv{j}}^2] \leq \varepsilon r^2$. \end{lemma} \begin{proof} Note that $\zv{j}$ is equal to $\frac{\rv{k}}{u_k \sigma}$ with probability $p_k$. Therefore, We have \[ \frac{\sigma^2}{l} \cdot {\bf E}[\norm{\zv{j}}^2] = \frac{\sigma^2}{l} \cdot \sum_{k \in G} \frac{p_k r_k^2}{u_k^2 \sigma^2} \stackrel{\tinym{Fact~\ref{lemma:good-p}}}{\leq} \frac{(1+\varepsilon) r^2}{l} \] The lemma now follows from the fact that $l \geq \frac{2}{\varepsilon^4}$.\qed \end{proof} \begin{lemma}\label{lemma:term-3-2} For any index $j$, $\sigma^2 \cdot (\norm{{\bf E}[\zv{j}]})^2 \leq 2 \norm{\sum_{k} u_k \rv{k}}^2 + 4\varepsilon^2 r^2$. \end{lemma} \begin{proof} For any index $k \in G$, let $\delta_k$ denote $\frac{p_k}{u_k} - u_k$. Fact~\ref{lemma:good-p} shows that $|\delta_k| \leq \varepsilon |u_k|$. For any index $k \notin G$, let $\delta_k$ denote $-u_k$. Using~(\ref{eqn:5}), we can write: \begin{eqnarray*} \sigma^2 \cdot (\norm{{\bf E}[\zv{j}]})^2 &=& \norm{\sum_{k \in G} p_k \frac{\rv{k}}{u_k}}^2 \\ &=& \norm{ \left(\sum_{k \in G} u_k \rv{k} + \sum_{k \in G} \delta_k \rv{k} \right)}^2 \qquad \textrm{(since $\delta_k = \frac{p_k}{u_k}-u_k$ for $k \in G$)}\\ &=& \norm{ \left(\sum_{k} u_k \rv{k} + \sum_{k} \delta_k \rv{k} \right)}^2 \qquad \textrm{(since $\delta_k = -u_k$ for $k \notin G$)}\\ &\leq& 2\ \norm{\sum_k u_k \rv{k}}^2 + 2\ \norm{\sum_k \delta_k \rv{k}}^2. \end{eqnarray*} The second term above can be bounded as follows: \begin{eqnarray*} \norm{\sum_k \delta_k \rv{k}}^2 &\leq& \sum_k \delta_k^2 r_k^2 \\ &\stackrel{\tinym{Fact~\ref{lemma:good-p}}}{\leq}& \varepsilon^2 r^2 \sum_{k \in G} u_k^2 + r^2 \sum_{k \notin G} u_k^2 \\ & \stackrel{(\ref{eqn:good-row})}{\leq} & \varepsilon^2 r^2 + \frac{r^4}{\varepsilon \sigma^2} \\&\stackrel{(\ref{eqn:assume})}{\leq}& 2 \varepsilon^2 r^2. \end{eqnarray*} This completes the proof of the lemma.\qed \end{proof} Now, combining Lemma~\ref{lemma:term-2}, Lemma~\ref{lemma:term-3-1}, Lemma~\ref{lemma:term-3-2}, we see that~(\ref{eqn:4}) can be simplified as: \[ \sum_{i} {\bf E}[\norm{\rv{i} - \sigma u_i \mathbf{z}}^2] \leq r^2 + 9 \varepsilon r^2. \] Combining this with Lemma~\ref{lemma:first-coordinate}, we get that $\FN{A - X}$ has an expected value at most $(1+15\varepsilon) r^2$ for the case where~(\ref{eqn:assume}) holds. Finally, combining this with Lemma~\ref{lemma:fkv-lemma}, we obtain the main result in Theorem~\ref{thm:main}\footnote{The extra factor of $15$ can be handled by using $\varepsilon/15$ instead of $\varepsilon$ in the sampling procedure.}. \section{Conclusion and Open Problems} The following questions related to length-squared sampling are relevant for the current discussion. \begin{enumerate} \item Does a single length-squared sampled point approximate the best-fit line? How good is the approximation? \item Does the result also hold for rank-$k$ approximation for $k > 1$? That is, does a set of $f(\varepsilon)$ length-squared sampled rows (for some function $f$ of $\varepsilon$) contain a rank-$k$ matrix $\tilde{A}$ such that $\FN{A - \tilde{A}} \leq (1 + \varepsilon) \cdot \FN{A - \pi_k(A)}$? \item Our results show that $\Omega(\frac{1}{\varepsilon^4})$ length-squared sampled rows are sufficient to obtain $(1+\varepsilon)$ multiplicative approximation. Can we show that sampling $\Omega(\frac{1}{\varepsilon^4})$ rows are necessary? Note that for additive approximation (see Theorem~\ref{thm:frieze}), $\Omega(\frac{1}{\varepsilon})$ rows are sufficient. \item Does {\em adaptive} length-squared sampling\footnote{Adaptive sampling means that we sample a sequence of sets $S_1, ..., S_k$, where $S_1$ contains length-squared sampled rows of the given matrix $A$, $S_2$ contains length-squared sampled rows of the matrix $A - \pi_{S_1}(A)$, and so on. Here, $\pi_S(A)$ denotes the projection of the rows of matrix $A$ onto the linear subspace spanned by elements of $S$. See \cite{dv06} for a detailed discussion.} give multiplicative rank-$k$ approximation? Note that the previous work of Deshpande and Vempala~\cite{dv06} only showed additive approximation. Multiplicative approximation was shown to be achieved in subsequent work of Deshpande {\it et al. }~\cite{drvw06} when length-squared sampling was combined with {\em volume sampling}. So the relevant question is whether length-squared sampling is sufficient for multiplicative approximation. \end{enumerate} The first two questions are simple to resolve. A simple analysis, which is also implicit in some of the previous works (e.g., \cite{drvw06}), gives us that a single length-squared sampled point gives a 2-factor approximation in expectation. A simple example gives a negative answer to the second question. Consider a 2-dimensional point set where all but one point have coordinates $(x, 0)$ and the remaining point has coordinates $(0, y)$, where $x >> y$. In this case, $\FN{A - \pi_2(A)} = 0$ but any constant sized length-squared sampled set is unlikely to contain the point at $(0, y)$. The last two questions remain open. \addcontentsline{toc}{section}{References} \bibliographystyle{alpha}
1,108,101,565,866
arxiv
\section{More details on experimental setup} The measurement apparatus is shown in Fig.~\ref{fig:setup}a. The Pockels cell of PC1 and the adjoining PBSs are used to filter the noise of excitation pulses in the temporal domain by rapidly switching the noise pulses to the orthogonal polarization and filtering with a PBS. The Pockels cells are driven by high-voltage pulsed drivers. Then PC2 is used to switch the early mode to the long fiber of 69 m and the late mode to the short fiber of 5 m. We use a weak locking laser beam coupled through another port of the PBS to lock the interferometer. The locking beam leaves the interferometer from another port to the APD. Then the interferometer is locked on the error signal generated from the modulated locking beam. We dynamically turn off the locking beam in the experimental period to prevent residual noise from leaking to the SPDs in the detection windows. \begin{figure}[hbtp] \centering \includegraphics[width=0.6\textwidth]{s1.pdf} \caption{\textbf{More details on experimental setup.} \textbf{a}, Measurement setup. PC: Pockels cell, SMF: Single mode fiber, APD: Avalanche photodiode, PBS: Polarizing beam splitter, SPD: Single photon detector. \textbf{b}, 474 nm laser system (some waveplates not shown).} \label{fig:setup} \end{figure} We use the non-polarization maintaining fibers. The polarizations of the single photon and the locking beam are orthogonal in the fiber. We consider the drift of the birefringence effect of the fiber is possible to change the relation between two polarizations in long period and thus the locking point, so we cover the long fiber with thermal insulation material. The fiber phase shifter (FPS) with a fiber length of 5 m works as the short arm of the interferometer and also for phase feedback. The FPS works by stretching the fiber convolved on a small PZT, which may also induce a birefringence phase in the fiber. To avoid the possible change of the birefringence phase in the long time, we stabilized the temperature of the FPS. The superatom is manipulated by a SHG (Second-Harmonic Generation) laser of 474 nm from Toptica with a power of $\sim$1.1W. As shown in Fig.~\ref{fig:setup}b, the laser is frequency shifted with an AOM, and the zero-order beam is reused to generate the excitation pulses (E1/E2). The AOMs are drived by a dual-channel arbitrary function generator (AFG). The excitation and retrieval pulses are driven by two synchronized channels of the AFG and combined with a PBS, and PC3 is used to switch the corresponding polarizations. The maximum power of $Rv_2$ on the superatom is about 500 mW with a waist radius of 19 $\upmu$m. In addition, the seed laser of 948 nm and another laser of 795 nm are both locked on an ULE (Ultra-Low Expansion) cavity. \section{Experimental time sequence} The experimental time sequences are shown in Fig.~\ref{fig:timeseq} and generated with an FPGA system. We generate the excitation pulses of 795 nm (Ext.) and 474 nm (E1/E2) with a width of 100 ns, together with PC3 controlling the polarizations of the 474 nm pulses. The width of the first excitation pulse is narrowed to realize a $\pi/2$ excitation. Then together with another patching pulse of $\ket{R_2}$, the initial preparation is finished. The $Rv_1/Rv_2$ pulses with the width of 170 ns then retrieve the entangled single photons, and the patching pulses (E1, E2 and 795nm Ext.) are used to recreate the qubits. The power, width and the frequency of the 474 nm pulses are all modified with the AFG, and we also sweep the relative phase of the GHZ states by changing the waveforms. Another Pockels cell of PC2 dynamically bring backwards the late photons retrieved with $Rv_2$ through the long fiber to overlap with the early photons. The noises from the $\ket{R_2}$ excitation are also brought backwards by PC2. Together with the filtering of PC1, the detection probability of noise in each SPD and each window is reduced to about 0.1\%. Besides, the 795 nm excitation beam is resonant with the ring cavity of the superatom, so the power requirement is reduced a lot, which can also help reduce the noise going into the SPDs. \begin{figure}[hbtp] \centering \includegraphics[width=0.6\textwidth]{s2.pdf} \caption{\textbf{Time sequence.} The 474 nm waveforms are modified with the AFG. Other time sequences are generated with the FPGA. The pulses marked as p1 to p4 correspond to the preparation and retrieval of the first photon. The pules in the dashed box are iterated to generate more photons.} \label{fig:timeseq} \end{figure} \section{Fixing the internal phase } To prepare the GHZ states, we first measure the oscillations of the coincidences in the $(\ket{E}\pm\ket{L})/\sqrt{2}$ basis by varying the relative phases with AFG for each $\ket{\Phi}_m$ state. We decompose $\ket{\Phi}_m$ with $\ket{D}=(\ket{E}+\ket{L})/\sqrt{2}$ and $\ket{A}=(\ket{E}-\ket{L})/\sqrt{2}$: \begin{equation*} \begin{split} \ket{\Phi}_m^\phi=&\frac{1}{\sqrt{2}} (\ket{E}^{\otimes m}+e^{i\phi}\ket{L}^{\otimes m})\\ =&\sum_{s=0,2,4...}\sum_{j_1,...,k_1,...} \ket{D}^{\otimes (m-s)}_{j_1j_2...}\ket{A}^{\otimes s}_{k_1k_2...} (1+e^{i\phi})/2^{(m+1)/2}\\ +&\sum_{s=1,3,5...}\sum_{j_1,...,k_1,...} \ket{D}^{\otimes (m-s)}_{j_1j_2...}\ket{A}^{\otimes s}_{k_1k_2...} (1-e^{i\phi})/2^{(m+1)/2}. \end{split} \end{equation*} The symbols of $j_1,j_2,...$ and $k_1,k_2,...$ are the photon indexes with the conditions of $j_1<j_2<j_3...$ and $k_1<k_2<k_3...$ . So the coincidences of $2^m$ terms are separated to two groups both of which are summed up. So we can determine the relative phase of zero from the normalized oscillations with $\phi$ (not shown), and $\phi$ is swept by changing the initial phase of $Rv_2$. \section{Original data for $\ket{\Phi}_1$ and $\ket{\Phi}_2$} After setting the relative phase, we measure the coincidence in different bases. Each photon detected can provide one count of SPD$_1$ or SPD$_2$, corresponding to the orthogonal basis. Then we can get all coincidences of $2^m$ terms shown in the histograms of Fig.~2a. For $\ket{\Phi}_1$ the counts of $\ket{E}\bra{E}$ and $\ket{L}\bra{L}$ are 13724 and 15240, and the counts of $\ket{D}\bra{D}$ and $\ket{A}\bra{A}$ are 28097 and 642. The experimental cycle numbers of two settings are both about $2.6\times10^5$ measured in 40 s. \begin{table*}[hbtp] \centering \caption{Coincidence of $\ket{\Phi}_2$ on eigenstate and superposition state basis} \begin{threeparttable} \begin{tabular}{cccccc} \toprule \hline Basis& \quad\quad & \quad\quad$EE$ \quad\quad& \quad\quad$EL$\quad\quad & \quad\quad$LE$\quad\quad & \quad\quad$LL$\quad\quad \\ Coincidences& \quad\quad & \quad\quad2407 \quad\quad& \quad\quad154\quad\quad & \quad\quad71\quad\quad & \quad\quad2301\quad\quad \\ \midrule Basis& \quad\quad & \quad\quad$DD$ \quad\quad& \quad\quad$DA$\quad\quad & \quad\quad$AD$\quad\quad & \quad\quad$AA$\quad\quad \\ Coincidences& \quad\quad & \quad\quad2234 \quad\quad& \quad\quad179\quad\quad & \quad\quad204\quad\quad & \quad\quad2324\quad\quad \\ \midrule Basis& \quad\quad & \quad\quad$CC$ \quad\quad& \quad\quad$CP$\quad\quad & \quad\quad$PC$\quad\quad & \quad\quad$PP$\quad\quad \\ Coincidences& \quad\quad & \quad\quad225 \quad\quad& \quad\quad2237\quad\quad & \quad\quad2286\quad\quad & \quad\quad192\quad\quad \\ \hline \bottomrule \end{tabular} \end{threeparttable} \label{fig:phi2} \end{table*} For $\ket{\Phi}_2$ state, the detailed results are shown in Table.~\ref{fig:phi2} with $\ket{C}=(\ket{E}+i\ket{L})/\sqrt{2}$ and $\ket{P}=(\ket{E}-i\ket{L})/\sqrt{2}$. The bases in the table are written in a simplified form. The experimental cycle number of each row is about $4\times10^5$ measured in 60 s. According to the definition in the main text, we can calculate the fidelity with the coincidences: \begin{gather*} F_e= (c_{EE}+c_{LL})/(c_{EE}+c_{EL}+c_{LE}+c_{LL}),\\ M_0^{\otimes 2} = (c_{DD}-c_{DA}-c_{AD}+c_{AA})/(c_{DD}+c_{DA}+c_{AD}+c_{AA}),\\ M_1^{\otimes 2} = (c_{CC}-c_{CP}-c_{PC}+c_{PP})/(c_{CC}+c_{CP}+c_{PC}+c_{PP}),\\ F_s = (M_0^{\otimes 2}-M_1^{\otimes 2})/2. \end{gather*} Besides, the probability ratio of single photon to noise is about 100 : 1. So the noise can contribute to the coincidences a little. The noise is not corrected in the main text. We estimate $c_{EL}\approx 107$ and $c_{EL}\approx 24$ with the noise corrected. \section{Temporal profile analysis} The profiles of the single photon pulses are shown in Fig.~\ref{fig:phwave}, with the dashed lines defining the detection windows. The time gap between adjacent windows is 640 ns. Residual excitation noises occur after each detection window. The position of the noise is manipulated by PC2. Because the switching time of the Pockels cell is within tens of nanoseconds, some signals and noises at the edge are not fully switched. We set the window narrower than the signal which seems to have slight improvement to the fidelity, and this also contributes to the efficiency scaling factor. In each window, the second-order auto-correlation function $g^{(2)}$ is measured to be $\sim 0.05$, mainly limited by the dark counts of 0.1\% ($g^{(2)}<$0.01 after correcting the dark counts). The value of $g^{(2)}=p_{s1s2}/(p_{s1} p_{s2})$ is measured by the Hanbury Brown-Twiss experiment, with $p_{s1s2}$ referring to the probability of coincidence and $p_{s1/s2}$ referring to the individual probability of counts on SPD$_{1/2}$. \begin{figure}[hbtp] \centering \includegraphics[width=0.6\textwidth]{photonwaveform.pdf} \caption{\textbf{Profile of the $\ket{\Phi}_6$ photons.} The measurement resolution of each data point is 2.5 ns. The detection windows are defined by the dashed lines. The residual noise is mainly after each detection window. The profile detected with SPD$_2$ is similar and not shown. We sweep the phase of the interferometer, so the results of two SPDs can be averaged.} \label{fig:phwave} \end{figure} From the profiles, we find the detected efficiency in the first window corresponds to the overall single photon efficiency of 9.4\%. But the heights of the signals in the following windows gradually decrease. To find out the problem, we analyse the original data of Fig.~2a. We calculate the conditional probability of $P_{(i+1)|i}$, which refers to the probability of detecting the photon in the window $i+1$ conditioned on the detection in the window $i$, and find that the conditional probabilities are not decreasing with the increase of $i$. So the in-fiber efficiency does not show obvious dropping, and can not be directly associated with the phenomenon in the time domain. Thus we infer each patching operation has a chance to generate some accumulated components in the Rydberg state, which will suppress subsequent excitations and can not be efficiently retrieved during the successive photon generation. As a result, the integral probability of the accumulated components will increase and thus the photon counts will decrease corresponding to the time domain. We think the accumulated components possibly come from the residual double Rydberg excitation due to our fast excitation of 100 ns. In the end of each cycle, we have already applied additional $Rv_1$ and $Rv_2$ pulses of hundreds of nanoseconds, which may help clean the accumulated components. From the dropping speed in Fig.~\ref{fig:phwave}, the probability of creating the accumulated components for each patching operation is estimated to be about 2\%. Once the accumulated components are created, the detection of successive photons will fail. Therefore the fidelity will not be influenced. \section{Bit-flip error} \subsection{Patching pulse infidelity} Some issues will cause the imperfection of the patching pulse, such as the fluctuation of the collective Rabi frequency coming from the passive stability of atom numbers or laser power. Two retrieving and two patching pulses are used to expand one photon and recreate the superatom qubits. Initialized on $\ket{1}_1\ket{0}_2$, the failure of the first patching pulse will cause the flip from $\ket{1}_1$ to $\ket{0}_1$, and thus the second qubit will flip from $\ket{0}_2$ to $\ket{1}_2$. Also, for the $\ket{0}_1\ket{1}_2$ state, the failure of the second patching pulse will result in the $\ket{0}_1\ket{0}_2$ state and eliminate the successive generation. In order to observe the failure of the patching pulse, we perform an independent measurement as shown in Fig.~\ref{fig:indepmeas}. We remove other operations and simply use $\ket{R_{1}}$ created with the patching pulse to suppress the subsequent $\ket{R_{2}}$. We find that compared with the situation with $\ket{R_{1}}$ not created, the photon retrieved from $\ket{R_{2}}$ is suppressed to about 2\%. We think the residual signal can be mainly explained as the failure of the $\ket{R_{1}}$ patching pulse. With a similar experiment, the failure of the $\ket{R_{2}}$ patching pulse is also measured to be about 2\%. \begin{figure}[hbtp] \centering \includegraphics[width=0.45\textwidth]{s3.pdf} \caption{\textbf{Independent measurements of $\ket{R_1}$ patching infidelity.} } \label{fig:indepmeas} \end{figure} \subsection{Unexpected retrieval} Further, we insert $Rv_{2}$ pulse between the excitations of $\ket{R_{1}}$ and $\ket{R_{2}}$ as shown in Fig.~\ref{fig:indepmeas1}, and find that the retrieval pulse will cause the residual signal from $\ket{R_{2}}$ to increase. This can be explained as the unexpected retrieval of $\ket{R_{1}}$. The polarization impurity of the retrieving pulses and finite splitting of the Rydberg energy levels can lead to the loss of $\ket{R_{1}}$. From the residual signal, we estimate that $\ket{R_{1}}$ will experience unexpected retrieval by $Rv_{2}$ with a probability of about 3\%. With a similar experiment, we estimate $\ket{R_{2}}$ will experience unexpected retrieval by $Rv_{1}$ with a probability of about 1\%. We think the distinction comes from the the imbalance of the dipole matrix elements ($\bra{e_1}er\ket{r_1}/\bra{e_1}er\ket{r_2}=\sqrt{3}$) and the intensity of the retrieving pulses ($I_{R_1}/I_{R_2}\sim1/3$). Thus the $\ket{R_1}$ state is more sensitive to the polarization impurity. \begin{figure}[hbtp] \centering \includegraphics[width=0.45\textwidth]{s4.pdf} \caption{\textbf{Independent measurements of the unexpected retrieval of $Rv_2$.} } \label{fig:indepmeas1} \end{figure} \subsection{Eigenstate fidelity} \label{bitflip} In each step, we cycle retrieval-patching-retrieval-patching operations to expand one photon and the superatom qubit. But as mentioned above, each operation may fail with small probabilities. In Table.~\ref{fig:bitflip}, we list the complete evolution when expanding one photon, with a failure of one of the operations. There we do not consider the failure of more than one operation because the high-order events have much lower probability. \begin{table*}[hbtp] \centering \caption{Evolution of the atomic state during each iteration} \begin{threeparttable} \begin{tabular}{cccccccccccccc} \toprule atomic state & \multicolumn{4}{c}{$\ket{0}_1\ket{1}_2$} & \quad & \quad & \quad & \multicolumn{4}{c}{$\ket{1}_1\ket{0}_2$} & \quad & \quad \\ steps\quad\quad & $Rv_1$ & $P_1$ & $Rv_2$ & $P_2$ &\quad error\quad& prob. \quad & \quad & $Rv_1$ & $P_1$ & $Rv_2$ & $P_2$ & \quad error \quad& prob.\quad \\ \midrule $Rv_1$ fails & $\widetilde{00}$ & 10 & 10 & 10 &\quad flip & 1\% & \quad & $\widetilde{00}$ & 10 & 10 & 10 & \quad & \quad \\ $P_1$ fails & 01 & 01 & $\widetilde{00}$ & 01 & \quad & \quad & \quad & $\widetilde{00}$ & 00 & 00 & 01 & \quad flip & 2\% \\ $Rv_2$ fails & 01 & 01 & $\widetilde{00}$ & 01 & \quad & \quad & \quad & $\widetilde{00}$ & 10 & $\widetilde{00}$ & 01 & \quad flip & 3\% \\ $P_2$ fails & 01 & 01 & $\widetilde{00}$ & 00 & \quad loss & 2\% & \quad & $\widetilde{00}$ & 10 & 10 & 10 & \quad & \quad \\ \bottomrule \end{tabular} \begin{tablenotes} \footnotesize \item[1] The failure of $Rv_{1/2}$ is defined as that the operation succeeds to retrieve $R_{1/2}$ but meanwhile also retrieves $R_{2/1}$. The failure of $P_{1/2}$ means the $\pi$ pulse on $R_{1/2}$ fails to create an excitation. \item[2] The tilde overline means the operation can lead to the single photon releasing. \item[3] Prob. refer to the probability for the single failure to happen. \end{tablenotes} \end{threeparttable} \label{fig:bitflip} \end{table*} Beginning with the superatom state of $\ket{0}_1\ket{1}_2$ or $\ket{1}_1\ket{0}_2$, when we implement $Rv_1\rightarrow P_1\rightarrow Rv_2\rightarrow P_2$ operations, errors may happen with either one of them failing. The $\ket{0}_1\ket{1}_2$ state will flip with the unexpected retrieval happening. Nevertheless, the loss event will not contribute to the m-fold coincidence. The $\ket{1}_1\ket{0}_2$ state can flip in two situations, and result in much more contributions to the infidelity. The difference between $\ket{0}_1\ket{1}_2$ and $\ket{1}_1\ket{0}_2$ comes from the sequential order of our time sequence. We define $p_1$ as the infidelity on $\ket{0}_1\ket{1}_2$ and $p_2$ as the the infidelity on $\ket{1}_1\ket{0}_2$. From Table.~\ref{fig:bitflip} we can calculate $p_1\approx 1\%$ and $p_2\approx 8\%$. There the $Rv_2$ failure on $\ket{1}_1\ket{0}_2$ can lead to the release of another photon, and thus we simply consider its contribution to infidelity to be doubled. The averaged probability for the errors happening on $\ket{0}_1\ket{1}_2$ and $\ket{1}_1\ket{0}_2$ is $(p_1+p_2)/2\approx 4.5\%$. We can simply estimate the scaling of $F_e$ being $0.955^{m-1}$. So the independent measurements give a close result compared with the fitting curve of $\alpha^{m-1}=0.96^{m-1}$ in the main text. \subsection{Relation between $F_e$ and $F_s$}\label{rela} As discussed above, the errors in each step can cause the generation of some unexpected quantum states. We simply consider the patching failure in step s and lead to the flipped state expressed as $\ket{\varepsilon_s}$. We assume there are no other additional dephasing terms. This process can be expressed as $$\ket{\Phi}_m \Rightarrow \ket{\Phi'}_m = q_1 \ket{E}^{\otimes m}+q_2\ket{L}^{\otimes m} +\sum k_s e^{i \phi_s}\ket{\varepsilon_s}$$ with $q_1, q_2>0$. It is easy to derive the fidelity of $F=|\langle \Phi'_m\ket{\Phi_m}|^2=(q_1+q_2)^2/2$. Since $\langle \varepsilon_s\ket{\Phi_m}$=0, $\ket{\varepsilon_s}$ and the phase $\phi_s$ will not contribute to the fidelity. Then $F_e$ can also be easily calculated with $q_1^2+q_2^2$ from the definition. Then we use an easy way to derive $F_s=2 F-F_e=2 q_1 q_2$, and thus $F_s\leqslant F_e$, which shows $F_e$ is the upper limit of $F_s$. In most situations, the quantum states are balanced, so we can consider $q_1^2\approx q_2^2$ and $F_e\approx F_s$. If there are phase fluctuations between $\ket{E}^{\otimes m}$ and $\ket{L}^{\otimes m}$, $F_s$ will be further reduced. \section{Other issues on the eigenbasis result} \subsection{Imbalance of the eigenstate} We find that the 3D histogram in the main text gradually shows imbalance with the increasing of photon number. And the imbalance of the $\ket{\Phi}_6$ state is pretty obvious. The imbalance of the bit-flip error can partly explain this with the flip of $\ket{E}$ being relatively larger and accumulated over m photons. Also, we find the first photon shows some imbalance which maybe comes from the drift of the first $\pi$/2 pulse after long time experiment. Considering these two reasons, The count of of $\ket{E}\bra{E}^{\otimes m}$ is estimated to be about 0.7 times of $\ket{L}\bra{L}^{\otimes m}$. Further, we think the statistical fluctuation of Poisson distribution also contributes a lot to the imbalance considering the error bar is large. \subsection{Influence of the detector afterpulse} The SPDs have a chance to give the afterpulse which means it can give another fake count in the next window after detecting the real photon. And it will cause the fake coincidences. Between the adjacent windows of 640 ns, the afterpulse of the SPDs in this work is measured to be about 0.1\%. In a simple situation, if there are some 3-fold event of $E0LL$ with the second photon not detected, then the afterpulse will cause additional fake $EELL$ coincidence with the probability of 0.1\%. We therefore consider all the possible events and correct the experimental result, then the fidelity of $\ket{\Phi}_6$ is calculated to be 62.8\%. Considering the influence of the afterpulse is not too much, we do not correct it in the main text. \section{Phase noise measurement} Firstly, the fidelities are influenced by the performance of the measurement devices, such as the precision of the phase locking. We use the weak probe laser of the 795 nm and the oscilloscope to test the locked measurement devices. The standard deviation $\sigma_{inter}$ of phase fluctuation is measured to be about 7.4$^\circ$ calculated from the intensity signal of interference. Further, although the laser is locked on the ULE cavity, there is residual phase noise of around megahertz which will also influence the fidelity~\cite{de2018analysis}. We analysed the laser noise with a 10 $\upmu$s-delayed Mach-Zehnder interferometer as shown in Fig.~\ref{fig:phasmea}. We split the laser power to two parts with a PBS and let them interfere with each other but with one arm delayed 10 $\upmu$s. With infinite length of delaying fiber, the process is equivalent to the interference of two independent laser locked on the same frequency, so the phase correlation between them will be eliminated and the intensity fluctuation in the temporal domain can indicate their phase noise of $\sqrt{2}\sigma_{laser}$, and $\sigma_{laser}$ is the standard deviation of one laser pulse. In this work, the time interval between adjacent operations is hundreds of nanoseconds, so we think the delay of 10 $\upmu$s is sufficient. We measure the intensity fluctuation with two APDs and subsequently calculate the phase evolution $\varphi(\tau)$. By directly calculating $\varphi(\tau_0)-\varphi(\tau_0+\Delta \tau)$, we can analyse its influence on the superatom at different time $t_0$. Then we calculate the standard deviation of $\sigma_{laser_1}$ being $ 4.8^\circ$ with $\Delta \tau$= 480 ns for the 795 nm laser. \begin{figure}[hbtp] \centering \includegraphics[width=0.4\textwidth]{s5.pdf} \caption{\textbf{Measurement of the laser phase noise.} The relative phase of two arms is not locked, as the drift of the interferometer is slow. The phase fluctuation of the laser can lead to fast variation of the intensity in several microseconds, measured by two APDs. } \label{fig:phasmea} \end{figure} The phase fluctuation of the 474 nm laser comes from two step joint operations of creating one photon as shown in Fig.~\ref{fig:timeseq}. The phases of the p1 to p4 pulses can be overlapped with the laser phase noise of $\varphi_{p1}$ to $\varphi_{p4}$. We thus analyse its influence of $\Delta\varphi(\tau_0)=(\varphi_{p1}-\varphi_{p3})-(\varphi_{p2}-\varphi_{p4})$ when creating a single photon. The result gives a value of $\sigma_{laser_2}$ being about $ 12^\circ$. So the standard deviation of the overall laser noise is therefore $\sqrt{\sigma_{laser_1}^2+\sigma_{laser_2}^2}\approx 13^\circ$ Also, we extend the analysis of data to more photons, and find that the noise increased to $\sqrt{m}$ times which indicates that the laser noises on different photons have no obvious correlation. \section{Phase-fidelity relation} In Sec.~\ref{rela}, we analyse the simple situation without additional dephasing. Further, we assume there is a relative phase between $\ket{E}^{\otimes m}$ and $\ket{L}^{\otimes m}$, and the phase noise is described by the normal distribution of $p(\phi)=e^{-\phi^2/(2 \sigma_r^2)}/\sqrt{2\pi \sigma_r^2}$. Here $\sigma_r$ is the standard deviation of the phase noise in radian. It is easy to derive the fidelity $F=\int_\phi(q_1^2+q_2^2+2q_1q_2 {\rm cos}\phi)d\phi$ and $F_s=\int_\phi 2 q_1q_2 {\rm cos}(\phi)d\phi$. We find $F_s=2q_1q_2 e^{-\sigma_r^2/2}\approx F_e e^{-\sigma_r^2/2}$ with the dephasing term in the form of $e^{-\sigma_r^2/2}$. The $m$-photon infidelity results from all above noise of $\sqrt{m^2\sigma^2_{inter}+m\sigma^2_{laser}}$. There, the fluctuation of the time-bin locking point is slow and the drift is the same for all $m$ photons, so the noise of $m$-fold rapidly increases to $m\sigma_{Lock}$. Then we calculate $F_s$ with the relation of multiplication of different contributions expressed as $F_s=F_e \beta_\phi \beta_{res}$ with $\beta_\phi=\beta_{inter}^{m^2}\beta_{laser}^m$. $\beta_{laser}$ and $\beta_{inter}$ are the dephasing terms of one photon. \end{document}
1,108,101,565,867
arxiv
\section{Introduction} \subsection{The Casson-Rivin program and its extension} \medskip There are several ways to construct hyperbolic metrics on an ideally triangulated 3-manifold with torus boundary. The most prominent ones are Thurston's algebraic gluing equations and Casson-Rivin's angle structure. In the angle structure approach, one first introduces the notion of angles on the triangulation and use the Lobachevsky function to define the volume of a tetrahedron with angle assignments. In \cite{luo-ajm}, Casson-Rivin's angle structure program is extended to closed triangulated 3-manifold. The volume of angle structure is defined and the critical points of the volume is investigated in \cite{luo-ajm}. The goal of this paper is to investigate the relationship between the critical points of the volume and a natural extension of the hyperbolic space by the de Sitter space. It turns out many of the critical points have geometric meaning in terms of geometric structures based on this extended hyperbolic space. \subsection{Volume maximization on triangulated $3$-manifolds} \begin{defi} Let $M$ be a closed 3-manifold, and let $T$ be a triangulation of $M$. Recall that the triangulation $T$ is obtained as follows. Take a finite set of disjoint Euclidean tetrahedra $s_1, ..., s_N$ and identify all their codimension-1 faces in pairs by affine homeomorphisms. The quotient space $M$ inherits a natural triangulation, denoted by $T$. A {\it wedge} of $T$ is a couple $(e,s)$, where $s$ is a simplex of $T$ and $e$ is an edge of $s$ in the unidentified space $\cup_{i=1}^N s_i$. The set of wedges of $T$ will be denoted by $W(T)$, while the set of vertices, edges, 2-faces and 3-simplices of $T$ will be denoted by $V(T), E(T), F(T)$ and $S(T)$ respectively. \end{defi} The main ingredient in Casson-Rivin's angle structure is based on the observation that the vertex link of an ideal hyperbolic tetrahedron is a Euclidean triangle. In our extension, we observe that the vertex link of a compact tetrahedron in the hyperbolic, spherical or Euclidean 3-space is a \it spherical \rm triangle. This prompts us to propose the following definition. \begin{defi} An {\it angle structure} on $T$ is a function $\theta:W(T)\rightarrow (0,\pi)$ such that: \begin{itemize} \item for each edge $e$ of $T$, the sum of the $\theta(w)$ over the wedges of the form $w=(e,s), s\in S(T)$, is equal to $2\pi$, \item for each $s\in S(T)$ and each vertex $v$ of $s$, $\theta(e_1,s)+\theta(e_2,s)+\theta(e_3,s) >\pi$, where the $e_i$ are the 3 edges of $s$ adjacent to $v$, \item for each $s\in S(T)$ and each vertex $v$ of $s$, $\theta(e_1,s)+\theta(e_2,s)< \theta(e_3,s)+\pi$, and similarly for permutations of $1,2,3$. \end{itemize} We denote by $AS(T)$ the space of angle assignments on $T$. \end{defi} The geometric meaning of the first condition is that, if the geometric structures on the simplices can be glued along the faces (this is discussed below) then the angles add up at the edges so that there is no singularity there. The second and the third conditions mean that for each simplex the link of each vertex is a spherical triangle. It is quite possible that, in some cases, $AS(T)$ might be empty, however all manifolds do admit a triangulation for which $AS(T)$ is non-empty \cite{kitaev-luo}. There is a well-defined manner, explained in \cite{luo-ajm} and recalled in sections 4-5, to associate to an angle assignment $\theta\in AS(T)$ a number which is, in a precise way, a generalized volume. This defines a function $V:AS(T)\rightarrow {\mathbb R}$. This ``volume'' is defined in terms of a natural extension of the Schl\"afli formula, so that it automatically verifies this identity. If $\theta_0$ is a critical point of $V$ in $AS(T)$ such that all the angles assigned to all simplices of $T$ are the dihedral angles of a hyperbolic simplex, then, thanks to the Schl\"afli formula, the lengths of an edge in the wedges containing it match, so that the faces of the simplices can be glued isometrically and $\theta_0$ defines a hyperbolic metric on $M$. One of the main points of this paper is that an extension of this statement holds when $\theta_0$ does not assign to all simplices of $T$ the dihedral angles of a hyperbolic simplex. \subsection{Extended hyperbolic structures} There is a rather natural extension of the hyperbolic space by the de Sitter space, used for instance in \cite{shu,cpt} in a polyhedral context somewhat reminiscent of the arguments followed here. We call $HS^3$ this extended hyperbolic space, so that $HS^3$ contains an open subset isometric to the 3-dimensional hyperbolic space and another open subset isometric to the quotient of the de Sitter space by the antipodal map. Given a 3-dimensional manifold $M$, an {\it HS-structure} on $M$ is a geometric structure locally modelled on $HS^3$. This is explained in section 2. \subsection{The main result} The main result presented here is that most critical points of the volume $V$ on the interior of $AS(T)$ have a natural interpretation in terms of HS-structures. \begin{thm} \label{tm:main} Let $\theta\in AS(T)$ be a critical points of $V$. Then one of the following applies. \begin{enumerate} \item $\theta$ corresponds to a spherical metric on $M$ (for each simplex of $T$, the angles are the angles of a spherical simplex, and those simplex can be glued isometrically along their faces, yielding a spherical metric), \item $\theta$ corresponds to an HS-structure on $M$. If this extended hyperbolic structure has a de Sitter part, then it contains a totally geodesic surface homeomorphic to $S^2$ or ${\mathbb R} P^2$, which is normal surface in $T$, \item all simplices in $T$ are either Euclidean or flipped Euclidean (see below). The volume $V(\theta)$ is then a non-negative integer multiple of $2\pi^2$. If at least one simplex is flipped, then $V>0$ and $(M,T)$ contains a normal surface homeomorphic to a sphere or a projective plane. \end{enumerate} \end{thm} The first case should be clear, and can happen only if $M$ is diffeomorphic either to $S^3$ or to ${\mathbb R} P^3$. In the second case, the totally geodesic space-like surfaces in the de Sitter parts of the HS-structure realize geometrically a decomposition of $M$ in irreductible parts, each of which carries a hyperbolic metric. The third case is quite special, presumably it can occur only in very specific cases, see below. \subsection{Geometric interpretation} The first idea in Theorem \ref{tm:main} is that considering HS-structures radically simplifies the way in which one can find hyperbolic structures on 3-manifolds by a critical point argument. Indeed, if $M$ is reducible, there might still be a critical point of $V$ on $AS(T)$, corresponding not to a hyperbolic metric (which is impossible if $M$ is reducible) but to a HS-structure, with a de Sitter part corresponding to each incompressible sphere in $M$. There is however a limit to this argument, as it stands. Given a HS-structure $h$ on $M$, the set of its hyperbolic points is a domain $N\subset M$ on which the restriction of $h$ defines a complete hyperbolic metric. The de Sitter parts of $h$ are topologically either products of $S^2\times {\mathbb R}$ or products of a projective plane by an interval. This means that each connected component of the boundary of $N$ has to be either a sphere or a projective plane, which is a very restrictive condition. \subsection{Further possible extensions} The construction of HS-structures associated to critical points of $V$ on $AS(T)$ suggests that a further extension of the space of angle assignments $AS(T)$ should be possible, allowing for instance for angle assignments such that the sum of angles at a vertex of a simplex is equal to or less than $\pi$. Such angles assignments would corresponds geometrically (at critical points of $V$) to triangulations with at least one vertex in the de Sitter part of the HS-structure obtained. This line of investigation is not pursued here. \section{Extended hyperbolic structures} \subsection{The extended hyperbolic space} One way to define this extension is to consider a strictly convex quadric $Q$ in the projective 3-space ${\mathbb R} P^3$. Given two distinct points $x,y\in {\mathbb R} P^3\setminus Q$, let $D$ be the projective line containing $x$ and $y$. If $D$ intersects $Q$, let $a,b$ be the points between $D$ and $Q$, chosen so that $a,x,y,b$ occur in this cyclic order on $D$. Then define the Hilbert distance between $x$ and $y$ as $$ d_H(x,y) = \frac{1}{2} \log [a,b;x,y]~, $$ where $[\cdot,\cdot;\cdot,\cdot]$ denotes the cross-ratio. If $D$ does not intersect $Q$, use the same formula with $a,b$ replaced by the complex intersections of the line containing $x,y$ with $Q$. This defines a ``distance'' in which the ball bounded by $Q$ can be interpreted as a projective model of $H^3$, while the outside it a projective model of the quotient of the de Sitter space $dS^3$ by the antipodal map. In particular, $d_H(x,y)$ can be: \begin{itemize} \item real and negative, if $x,y$ are in the ball bounded by $Q$, and this defines inside $Q$ a projective model of the hyperbolic 3-dimensional space (known as the Klein model). \item real and positive, if $x,y$ are outside the ball bounded by $Q$ and the line joining them intersects $Q$ in two points. This line is then time-like in $dS^3$. \item in $i(0,\pi)$, if $x,y$ are outside $Q$ and the line containing them does not intersect $Q$, this line is then space-like in $dS^3$. \item in $i\pi/2+{\mathbb R}$, if $x$ is inside the ball bounded by $Q$ and $y$ is outside it. \item $0$, if the line joining $x$ and $y$ is tangent to $Q$. This line is then a light-like in $dS^3$. \end{itemize} The same construction also works in dimension 2, yielding the extended hyperbolic plane $HS^2$. \subsection{The double cover} It is sometimes helpful to consider the double cover $\tilde{HS}^3$ of $HS^3$. It is diffeomorphic to $S^3$, and has two hyperbolic components each isometric to $H^3$, and one de Sitter component isometric to the full de Sitter space $dS^3$. The same works in any dimension. $\tilde{HS}^3$ is composed of two copies of the hyperbolic 3-space, and of a one copy of the whole de Sitter space. \subsection{Extended hyperbolic structures} An {\it HS-structure} on a closed 3-dimensional manifold $M$ is a geometric structure locally modeled on $HS^3$, with transformation group $PSL(2,{\mathbb C})$. If $h$ is an HS-structure on $M$, the set of points of $M$ where $h$ is locally hyperbolic is an open subset of $M$, which we denote by $M_H$, and the restriction of $h$ to $M_H$ is a complete hyperbolic metric. Similary, the set of points of $M$ where $h$ is locally modeled on the de Sitter space is an open subset of $M$, for which we use the notation $M_{dS}$. Then $M_H\cup M_{dS}$ is dense is $M$, and its complement is a surface. \section{Triangles} \subsection{The cosine formula} \begin{defi} We call $\cosh$ the restriction of the function $x\mapsto (e^x+e^{-x})/2$ to ${\mathbb R}_{<0}\cup [0,i\pi]\cup (i\pi+{\mathbb R}_{>0})$. \end{defi} With this definition, $\cosh$ is a bijection from its domain of definition to ${\mathbb R}$. \begin{lemma} \label{lm:cos} Let $\alpha_1, \alpha_2, \alpha_3$ be the (interior) angles of a hyperbolic triangle, and let $a_1,a_2,a_3$ be the length of the opposite edges. Then \begin{equation} \label{eq:cosh} \cosh(a_1)=\frac{\cos(\alpha_1)+\cos(\alpha_2)\cos(\alpha_3)}{\sin(\alpha_2)\sin(\alpha_3)}~. \end{equation} \end{lemma} \subsection{The triangle inequality} Consider now a triangle in $S^2$, of angles $\alpha_1, \alpha_2, \alpha_3$. It is a simple exercise to check that those angles satisfy the following equation: \begin{equation*} \alpha_2+\alpha_3<\alpha_1+\pi \tag{$TI_1$} \end{equation*} Using the exterior angles rather than the interior angles, this equation can be written as $$ (\pi-\alpha_1) < (\pi-\alpha_2)+(\pi-\alpha_3)~, $$ which is the triangle inequality for the dual triangle in the sphere. \subsection{A classification of M\"obius triangles} Following \cite{luo-tams}, we consider here a generalization of the notion of spherical (or hyperbolic) triangle. \begin{defi} A {\it M\"obius triangle} is a triple $(\alpha_1,\alpha_2,\alpha_3)\in (0,\pi)^3$. Given a M\"obius triangle, its {\bf edge lengths} are the complex numbers $(a_1,a_2,a_3)$ defined by Equation (\ref{eq:cosh}), with the definition of $\cosh$ given above. \end{defi} The rationale for the terminology used here is that, for any triple $(\alpha_1, \alpha_2,\alpha_3)\in (0,\pi)^3$, there exists a triangle in the complex plane bounded by three circles, unique up to M\"obius transformation, so that its inner angles are the $\alpha_i$'s. The constructions used below are however based mostly on the extended hyperbolic plane and on real projective geometry, rather than complex projective geometry. However sticking to the terms ``M\"obius triangle'' should be helpful to the reader insofar as it is closer to the previous works on the subjects, e.g. \cite{luo-ajm}, \cite{luo-tams}. \begin{lemma} \label{lm:triangles} Let $T=(\alpha_1,\alpha_2,\alpha_3)$ be a M\"obius triangle, let $s=\alpha_1+\alpha_2+\alpha_3$, and let $a_i$ be the edge lengths of $T$. Exactly one of the following five cases applies. \begin{enumerate} \item $T$ is {\bf spherical}: $s>\pi$, and the triangle inequalities $(TI_1),(TI_2),(TI_3)$ hold. Then $a_1,a_2,a_3\in i(0,\pi)$. \item $T$ is {\bf hyperbolic}: $s<\pi$. Then the triangle inequalities $(TI_1),(TI_2),(TI_3)$ hold, and $a_1,a_2,a_3<0$. \item $T$ is {\bf Euclidean}: $s=\pi$. Then the triangle inequalities $(TI_1),(TI_2),(TI_3)$ hold, and $a_1=a_2=a_3=0$. \item $T$ is {\bf flipped hyperbolic}: $\alpha_2+\alpha_3>\alpha_1+\pi$ (or similarly after a permutation of $1,2,3$). Then $a_1<0$ while $a_2,a_3\in i\pi + {\mathbb R}_{>0}$. \item $T$ is {\bf flipped Euclidean}: $\alpha_2+\alpha_3=\alpha_1+\pi$ (or similarly after a permutation of $1,2,3$). Then $a_2=a_3=i\pi$ and $a_1=0$. \end{enumerate} \end{lemma} The proof (which is elementary) is based on two preliminary statements. Let $\alpha_1,\alpha_2,\alpha_3\in (0,\pi)$, and let $s=\alpha_1+\alpha_2+\alpha_3$. \begin{sublemma} \label{slm:1} \begin{itemize} \item If $\alpha_2+\alpha_3<\pi$, then $\cosh(a_1)<1$ (resp. $>1$) if and only if $s>\pi$ (resp. $s<\pi$). \item If $\alpha_2+\alpha_3>\pi$, then $\cosh(a_1)<1$ (resp. $>1$) if and only if $(TI_1)$ holds (resp. $\alpha_2+\alpha_3>\pi+\alpha_1$). \end{itemize} \end{sublemma} \begin{sublemma} \label{slm:2} $\cosh(a_1)>-1$ if and only if either $\alpha_2>\alpha_3$ and $(TI_3)$ holds, or $\alpha_3>\alpha_2$ and $(TI_2)$ holds. \end{sublemma} \begin{proof}[Proof of Lemma \ref{lm:triangles}] Suppose first that the three triangle inequalities hold. Then $T$ is in one of the cases (1), (2) or (4) depending on $s$. Note also that if $(TI_1)$ does not hold, then clearly both $(TI_2)$ and $(TI_3)$ are satisfied. So $T$ is in case (3) if there is equality in inequality $(TI_1)$, or (5) otherwise. In case (1), $\cosh(a_1)<1$ by Sub-Lemma \ref{slm:1}, and $\cosh(a_1)>-1$ by Sub-Lemma \ref{slm:2}, so $a_1\in i(0,\pi)$ by the definition of $\cosh$ used here. The same holds of course for $a_2$ and $a_3$. In case (2), Sub-Lemma \ref{slm:1} shows that $\cosh(a_i)=1$ for $i=1,2,3$ so that all the $a_i$ are zero. In case (4), the first case of Sub-Lemma \ref{slm:1}, $\cosh(a_1)>1$, so that $a_1<0$ by our definition of $\cosh$, and the same applies to $a_2$ and $a_3$. In case (3), the second point in Sub-Lemma \ref{slm:1} shows that $\cosh(a_1)=1$, while Sub-Lemma \ref{slm:2} shows that $\cosh(a_2)=\cosh(a_3)=-1$, so that $a_2=a_2=i\pi$. The same argument applies to case (5), then $\cosh(a_1)>1$ while $\cosh(a_2),\cosh(a_3)<-1$, so that $a_1<0$ while $a_2,a_3\in i\pi + {\mathbb R}_{>}$. \end{proof} The following lemma shows that the edge lengths determine the shape of a M\"obius triangle. \begin{lemma} \label{lm:lengths-unique} Let $A=(\alpha_1,\alpha_2,\alpha_3)$ and $B=(\beta_1, \beta_2, \beta_3)$ be two M\"obius triangles which are not Euclidean or flipped Euclidean. If the corresponding edge lengths of $A$ and $B$ are the same, then $\alpha_i=\beta_i$ for all i. \end{lemma} The proof follows from the cosine law that \begin{equation} \label{eq:cos} \cos(\alpha_1)=\frac{-\cosh(a_1)+\cosh(a_2)\cosh(a_3)}{\sinh(a_2)\sinh(a_3)}. \end{equation} This laws shows that lengths determine the angles. \subsection{Geometric realization of triangles} The classification of M\"obius triangles in Lemma \ref{lm:triangles} has a natural interpretation in terms of triangles in the extended hyperbolic plane $\tilde{HS}^2$. There is no interpretation needed for spherical, Euclidean or hyperbolic triangle, but flipped hyperbolic triangle correspond to triangles in $\tilde{HS}^2$ with one vertex and two in the other. Suppose $t$ is such a flipped hyperideal triangle, with vertices $v_1,v_2,v_3$, with $v_1$ in one copy of $H^2$ and $v_2,v_3$ in the other. Let $\alpha_i$ be the angle of $t$ at $v_i$, $1\leq i\leq 3$. Those angles can be understood by ``flipping'' $t$, that is, considering the triangle $t'$ with vertices $v'_1, v_2,v_3$, where $v'_1$ is the antipode of $v_1$ in $\tilde{HS}^2$. $t'$ is a ``usual'' hyperbolic triangle, and its angles are $\beta_1=\alpha_1$, $\beta_2=\pi-\alpha_2, \beta_3=\pi-\alpha_3$. Since $t'$ is a hyperbolic triangle, its angles satisfy $$ \beta_1+\beta_2+\beta_3<\pi~, $$ which translates as $$ \alpha_1+\pi<\alpha_2+\alpha_3~, $$ the inverse triangle inequality for $t$. Similarly, $t$ satisfies the triangle inequality, $$ \pi+\beta_2 > \beta_1+\beta_3~~ \mbox{and}~~ \pi+\beta_3>\beta_1+\beta_2~, $$ which translates as $$ \alpha_1+\alpha_2 < \pi+\alpha_3~~\mbox{and}~~\alpha_1+\alpha_3<\pi+\alpha_2~. $$ This shows that $(\alpha_1,\alpha_2,\alpha_3)$ is a flipped hyperbolic triangle, according to Lemma \ref{lm:triangles}. The same argument can be used backwards, to show that any flipped hyperbolic triangle in the sense of Lemma \ref{lm:triangles} is the triple of angles of a flipped hyperbolic triangle in $\tilde{HS}^2$. Flipped Euclidean triangles can be understood in the same way but by taking a limit. The usual Euclidean triangles can be considered as limits of hyperbolic triangles of diameter going to zero -- actually, a blow-up procedure is necessary, since what we really want to consider are sequences of degenerating hyperbolic triangles for which the angles, and therefore the ratio of the edge lengths, has a limit. This can also be done for flipped hyperbolic triangles, with vertices converging either to a point in the one copy of the hyperbolic plane of $\tilde{HS}^2$ or to its antipode in the other copy. Putting this together, we obtain the following statement. \begin{lemma} \label{lm:mobius} Let $T=(\alpha_1, \alpha_2, \alpha_3)$ be a M\"obius triangle. \begin{enumerate} \item If $T$ is spherical, there is a triangle $t\subset S^2$, unique up to the action of $O(3)$, with angles $\alpha_1,\alpha_2$ and $\alpha_3$. \item If $T$ is hyperbolic, there is a triangle $t\in H^2$, unique up to the hyperbolic isometries, with angles $\alpha_1,\alpha_2, \alpha_3$. \item If $T$ is Euclidean, there is a triangle $t\subset {\mathbb R}^2$, unique up to isometries and homotheties, with angles $\alpha_1,\alpha_2, \alpha_3$. In other terms, there is a sequence of hyperbolic triangles $(t_n)_{n\in {\mathbb N}}$ in $H^2$, with angles $\alpha_{1,n},\alpha_{2,n},\alpha_{3,n}$ converging to $\alpha_1,\alpha_2,\alpha_3$, respectively. \item If $T$ is flipped hyperbolic, there is a $t\in \tilde{HS}^2$, with one vertex in one copy of $H^2$ and two in the other, with angles $(\alpha_1, \alpha_2, \alpha_3)$. It is unique up to the action of $O(2,1)$ on $\tilde{HS}^2$. \item If $T$ is flipped Euclidean, there is a sequence of flipped hyperbolic triangles $(t_n)_{n\in {\mathbb N}}$ in $\tilde{HS}^2$, with angles $\alpha_{1,n},\alpha_{2,n},\alpha_{3,n}$ converging to $\alpha_1,\alpha_2,\alpha_3$, respectively. \end{enumerate} \end{lemma} \section{Three-dimensional simplices} \subsection{Angle System } We now consider 3-dimensional simplices, and assign to each wedge an angle, as follows. \begin{defi} Suppose $s$ is a tetrahedron. Then an \it angle system \rm on $s$ is a function $\theta: \{ (e, s) | e \text{ is an edge of } s\} \to (0, \pi)$ so that for three edges $e_i, e_j, e_k$ ending at a vertex $v$ of $s$, the 3 angles $\theta(e_i, s), \theta(e_j, s), \theta(e_k, s)$ are the inner angles of a spherical triangle. Let $AS(s)$ be the space of all angle systems on $s$. An {\it angled 3-simplex} is a 3-simplex together with an angle system. If $T$ is a triangulation of a closed 3-manifold $M$, an angle system on $T$ is a function $\theta$ defined on the set of all wedges $\{ (e, s) | e$ is an edge of a tetrahedron $s \}$ so that \begin{itemize} \item for each edge $e$ of $T$, the sum of the values of $\theta$ on the wedges having $e$ as their edge is equal to $2\pi$, \item for each 3-simplex $s$ in $T$, the restriction of $\theta$ to all wedges in $s$ forms an angle system. \end{itemize} \end{defi} In the paper \cite{luo-ajm}, the geometric prototype of an angled 3-simplex is the M\"obius tetrahedron. Namely a topological tetrahedron in ${\mathbb R}^3$ bounded by four 2-spheres of inner angles less than $\pi$. However, there are angled 3-simplex which cannot be realized as a M\"obius 3-simplex. Our main observation is that, in terms of HS-geometry, these angled 3-simplices all have a geometric meaning. Furthermore, the edge lengths, volume and Schl\"afli formula can be generalized to the HS-geometry. These generalizations are exactly the underlying geometric meaning of the corresponding notions defined in \cite{luo-ajm}. \subsection{Face angles} \begin{defi} Let $\alpha=(\alpha_{12},\cdots,\alpha_{34})\in AS(s)$. The face angles of $\alpha$ are the numbers $\beta_{ijk}\in (0,\pi)$ defined, for $\{ i,j,k,l\}=\{ 1,2,3,4\}$, by the formula $$ \cos(\beta_{jk}^i)=\frac{\cos(\alpha_{il})+\cos(\alpha_{ij})\cos(\alpha_{ik})} {\sin(\alpha_{ij})\sin(\alpha_{ik})}~. $$ \end{defi} The geometric meaning of the face angle is as follows. According to the definition, at the $i$th vertex $v_i$, the angles $\alpha_{ij}, \alpha_{ik}, \alpha_{il}$ are the inner angles of a spherical triangle $A_{jkl}$, which can be considered as the link $v_i$ in the tetrahedron. Then the face angle $\beta_{jk}^i$ is the $jk$-th edge length of $A_{jkl}$. By definition, face angles are then in $(0,\pi)$. \subsection{Edge lengths} Using the faces angles, we make each codimension-$1$ face of an angled tetrahedron $s$ a M\"obius triangle. Thus, by lemma 3.2, we can define for each edge in each face, an edge length. The following is proved in \cite{luo-ajm}. \begin{lemma} \label{lm:lengths-same} If $L$ is an edge of a tetrahedron $s$ with an angle system and $D_1, D_2$ are two faces of $s$ having $L$ as an edge, then the length of $L$ in $D_1$ is the same as that in $D_2$. \end{lemma} Thus the following is well defined. \begin{defi} Let $\alpha=(\alpha_{12},\cdots,\alpha_{34})\in AS(s)$. The edge lengths of $\alpha$ are the numbers $(l_{ij})_{i\neq j}$ defined as follows: $l_{ij}$ is the length of the edge $ij$ in the two faces of $T$ adjacent to the vertices $i$ and $j$. \end{defi} \subsection{A classification of simplices} It is now possible to describe a classification of 3-dimensional angled simplices. It is slightly more elaborate than the corresponding classification for M\"obius triangles, because simplices can be ``flipped'' in two ways, depending on whether one or two vertices are in one of the copies of $H^3$ in $\tilde{HS}^3$. Here is the definition of the flip at the i-th vertex of $\alpha \in AS(s)$. See \cite{luo-ajm} for more details. The flipped simplex $\alpha' = (\alpha'_{12}, ..., \alpha'_{34})$ has angles $\alpha'_{ij} = \alpha_{ij}$ for $j \neq i$ and $\alpha_{jk}'=\pi-\alpha_{jk}$ for $j, k \neq i$. Geometrically, if $v_1, v_2, v_3, v_4$ are the vertices of a spherical simplex, then the flipped simplex (about the first vertex $v_1$) is the spherical simplex with vertices $-v_1, v_2, v_3, v_4$ where $-v_1$ is the antipodal point of $v_1$. \begin{lemma}\label{lm:simplex} Let $\alpha\in AS(s)$. After a permutation of $\{ 1,2,3,4\}$, $\alpha$ is of exactly one of the following types: \begin{enumerate} \item {\bf spherical}: all faces of $T$ are spherical triangles, and all edge lengths are in $i(0,\pi)$. \item {\bf hyperbolic}: all faces of $T$ are hyperbolic triangles, and all edge lengths are in ${\mathbb R}_{<0}$. \item {\bf flipped hyperbolic}: the face $(234)$ is a hyperbolic triangle, while the faces adjacent to the vertex $1$ are flipped hyperbolic triangles. The lengths of the edges $(12),(13),(14)$ are in $i\pi+{\mathbb R}_{>0}$, while the length of the other edges are in ${\mathbb R}_{<0}$. \item {\bf doubly flipped hyperbolic}: all faces of $T$ are flipped hyperbolic triangles, the lengths of the edges $(12)$ and $(34)$ are negative numbers while the length of the other edges are in $i\pi+{\mathbb R}_{>0}$. \item {\bf Euclidean}: all faces of $T$ are Euclidean triangles, all edge lengths are zero. \item {\bf flipped Euclidean}: the length of the edges $(12),(13),14)$ are equal to $i\pi$, while the lengths of $(14),(24),(34)$ are zero. \item {\bf doubly flipped Euclidean}: the lengths of $(12),(34)$ are zero while all other edges have length $i\pi$. \end{enumerate} \end{lemma} The terminology is based, as for triangles, on the idea of ``flipping'' a hyperbolic simplex: this means replacing one of its vertices, say $v_1$, by its antipode in $\tilde{HS}^3$. The dihedral angles at all three edges not adjacent to $v_1$ are then replaced by their complement to $\pi$, and the effect on the edge lengths is as described in Lemma \ref{lm:simplex}. Doubly flipping a hyperideal simplex means replacing two vertices by their antipodes in $\tilde{HS}^3$. \begin{proof}[Proof of Lemma \ref{lm:simplex}] Let $\alpha\in AS(s)$, let $v_1, v_2, v_3, v_4$ be the vertices of $s$. We consider different cases, depending on the lengths of the edges of $s$ and in particular of its face $(v_1,v_2,v_3)$. \begin{enumerate} \item $(v_1,v_2,v_3)$ is a spherical triangle, i.e., its edge lengths are in $i(0,\pi)$. Lemma \ref{lm:triangles}, applied to the three other faces of $s$, shows that the lengths of the three other edges of $s$ are also in $i(0,\pi)$, it follows that $s$ is spherical. \item $(v_1,v_2,v_3)$ is hyperbolic. Then its edge lengths are negative, and considering Lemma \ref{lm:triangles} shows that there are only two possibilities. \begin{enumerate} \item $(v_2,v_3,v_4)$ is hyperbolic, that is, its edge lengths are negative. Then, again by Lemma \ref{lm:triangles}, the length of the edge $(v_1,v_4)$ is also negative, so that $s$ is hyperbolic. \item $(v_2,v_3,v_4)$ is flipped hyperbolic, that is, the lengths of the edges $(v_2,v_4)$ and $(v_3,v_4)$ are in $i\pi+{\mathbb R}_{>0}$. Then the length of $(v_1,v_4)$ is also in $i\pi+{\mathbb R}_{>0}$, so that $s$ is flipped hyperbolic. \end{enumerate} \item $(v_1,v_2,v_3)$ is flipped hyperbolic, we can suppose without loss of generality that the length of $(v_1,v_2)$ is in ${\mathbb R}_{>0}$ and the lengths of the two other edges are in $i\pi+{\mathbb R}_{>0}$. Two cases are possible. \begin{enumerate} \item $(v_1,v_2,v_4)$ is hyperbolic, it then follows from Lemma \ref{lm:triangles} that $s$ is flipped hyperbolic. \item $(v_1,v_2,v_4)$ is flipped hyperbolic, it then follows from Lemma \ref{lm:triangles} that $s$ is doubly flipped hyperbolic. \end{enumerate} \item $(v_1,v_2,v_3)$ is Euclidean, so that all its edges have zero length. Lemma \ref{lm:triangles} then shows that there are two possible cases. \begin{enumerate} \item $(v_1,v_2,v_4)$ is Euclidean. Then all edges of $s$ have zero length, and $s$ is Euclidean. \item $(v_1,v_2,v_4)$ is flipped Euclidean, so that $(v_1,v_4)$ and $(v_2,v_4)$ have length $i\pi$. In this case $s$ if flipped Euclidean. \end{enumerate} \item $(v_1,v_2,v_3)$ is flipped Euclidean, we can suppose without loss of generality that $(v_1,v_2)$ has zero length zero while the lengths of the other two edges are equal to $i\pi$. There are again two cases to consider. \begin{enumerate} \item $(v_1,v_2,v_4)$ is Euclidean, so that all its edge lengths are zero, and it easily follows that $s$ is flipped Euclidean. \item $(v_1,v_2,v_4)$ is flipped Euclidean, so that the edges $(v_1,v_4)$ and $(v_2,v_4)$ have length $i\pi$. Then $s$ is doubly flipped Euclidean. \end{enumerate} \end{enumerate} \end{proof} According to the lemma, there are three types of angled simplices: Euclidean, hyperbolic and spherical. A angled simplex is of Euclidean (or hyperbolic) type if it can be flipped to a Euclidean (or hyperbolic) simplex. A spherical type simplex is the same as a spherical simplex. The type of a simplex can be determined by the length of any of its edges. \begin{cor} Suppose $e$ is an edge of an angled simplex of length $l(e)$. Then $s$ is of \begin{enumerate} \item{\bf Euclidean type} if and only if $l(e) \in \{ 0, i \pi \}$, \item{\bf hyperbolic type} if and only if $l(e) \in {\mathbb R}_{<0} \cup \{ i \pi + r | r \in {\mathbb R}_{>0}\}$, \item{\bf spherical type} if and only if $l(e) \in i(0, \pi)$. \end{enumerate} In particular, if $e$ and $e'$ are two edges of an angled simplex $s$, then their lengths $l(e)$ and $l(e')$ are in the same subset listed above. \end{cor} \subsection{Combinatorics of the space of simplices} The classification given in \S4.4 can be interpreted in terms of the HS-geometry as follows, as for M\"obius triangles in Lemma \ref{lm:mobius}. Let $s$ be a simplex, and let $\alpha\in AS(s)$. \begin{enumerate} \item If $\alpha$ is spherical, the $\alpha_{ij}$ are the dihedral angles of a unique simplex in $S^3$. \item If $\alpha$ is hyperbolic, the $\alpha_{ij}$ are the dihedral angles of a unique simplex in $H^3$. \item If $\alpha$ is hyperbolic, the $\alpha_{ij}$ are the dihedral angles of a unique simplex in $\tilde{HS}^3$, with three vertices in one of the copies of $H^3$ and one in the other. \item If $\alpha$ is hyperbolic, the $\alpha_{ij}$ are the dihedral angles of a unique simplex in $\tilde{HS}^3$, with two vertices in one of the copies of $H^3$ and two in the other. \item If $\alpha$ is Euclidean, the $\alpha_{ij}$ are the dihedral angles of an Euclidean simplex, unique up to homothety. They are also limits of sequences of angles of hyperbolic simplices. \item If $\alpha$ is flipped Euclidean, it is the limit of a sequence of angles of flipped hyperbolic polyhedra. \item Similarly, if $\alpha$ is doubly flipped Euclidean, it is the limit of a sequence of angles of doubly flipped hyperbolic polyhedra. \end{enumerate} Consider now $AS(s)$ as the space of 6-tuples of angles in $(0,\pi)$ satisfying some linear inequalities. It contains some subdomains corresponding to the different types of simplices. It is interesting to consider the combinatorics of this decomposition of $AS(s)$. The definitions show clearly that any continuous path going from a simplex of spherical type to a simplex of hyperbolic type has to go through a simplex of Euclidean type. Moreover, the only way to go from a hyperbolic simplex to a doubly hyperbolic simplex is through spherical simplices, and similarly for doubly hyperbolic simplices. \section{The generalized volume} \subsection{The Schl\"afli formula} The last part of the picture considered here is the generalized volume, which is defined for the simplices in the extended hyperbolic space. There are severaly ways to define it, we use here the Schl\"afli formula, which we first recall for ``usual'' (spherical or hyperbolic) simplices. We refer to \cite{milnor-schlafli,geo2} for a proof. \begin{lemma} \label{lm:schlafli} For any one-parameter family of spherical (resp. hyperbolic) simplices, its volume $V$ satisfies $2dV=\sum_e Im(l_e)d\alpha_e$ (resp. $2dV=\sum_e l_ed\alpha_e$). \end{lemma} Note that the lengths considered here are those defined above, so that they are in $(0,\pi)$ for spherical simplices, and in ${\mathbb R}_{<0}$ for hyperbolic simplices. \subsection{The generalized volume} \label{ssc:volume} The previous lemma leads to a fairly natural definition of a real-valued volume over the space of angled simplices. \begin{defi} Let $s$ be a tetrahedron and let $\omega$ be the $1$-form (Schl\"afli 1-form) defined on $AS(s)$ by $2\omega=\sum_e (Re(l_e)+Im(l_e)) d\alpha_e$. \end{defi} Note that the Schl\"afli 1-form is a continuous 1-form defined on the 6-dimensional convex polytope $AS(s)$. It is proved in \cite{luo-ajm} that, \begin{lemma} $\omega$ is closed. \end{lemma} Remark that $\omega$ vanishes on the subspace of Euclidean simplices. \begin{defi} The generalized volume $V:AS(s)\rightarrow {\mathbb R}$ is the primitive of $\omega$ which vanishes on the Euclidean simplices. \end{defi} There is another possibility, namely to define the volume as a complex-valued function, defining $\omega$ as $(1/2)\sum_e l_e d\theta_e$. The definition chosen here serves well for our purposes. Note that $V$ corresponds to the usual volume on spherical and hyperbolic simplices by Lemma \ref{lm:schlafli}. The volume of Euclidean simplices is zero by definition. However, the volume of flipped and doubly flipped Euclidean simplexes are not zero. \begin{lemma} \label{lm:vol-euclidean} Let $\alpha\in AS(s)$. \begin{enumerate} \item Suppose that $\alpha$ is flipped Euclidean, with the lengths of the edges adjacent to $v_1$ equal to $i\pi$ and the other lengths equal to $0$. Then $$ V(\alpha)=\pi(\alpha_{12}+\alpha_{13}+\alpha_{14}-\pi)~. $$ \item Suppose that $\alpha$ is doubly flipped Euclidean, with $l_{12}=l_{34}=0$ and the other lengths equal to $i\pi$. Then $$ V(\alpha) = \pi(\alpha_{13}+\alpha_{14}+\alpha_{23}+\alpha_{24}-2\pi)~. $$ \end{enumerate} \end{lemma} Note that in each case the volume, without the factor $\pi$, is equal to the area of a spherical polygon -- this will be useful below. \begin{proof}[Proof of Lemma \ref{lm:vol-euclidean}] For the first case, consider a small deformation that increases slightly the $\alpha_{1i}$, $2\leq i\leq 4$. This deforms $\alpha$ into a spherical simplex $\alpha'$, with vertices $v_2,v_3$ and $v_4$ very close to the antipode of $v_1$. The (spherical) Schl\"afli formula, applied to a 1-parameter formula deforming this simplex to a segment of length $\pi$, shows that the volume of this simplex is equal to $\pi(\alpha'_{12}+\alpha'_{13}+\alpha'_{14}-\pi)$, and the result follows for $\alpha$. The same argument works in the second case, the corresponding spherical simplex now has $v_1,v_2$ very close and almost antipodal to both $v_3$ and $v_4$. \end{proof} There is a quite different way to define this ``volume'' of domains in the extended hyperbolic space, in terms of an analytic continuation \cite{cho-kim}. \subsection{Smoothness} For a closed triangulated 3-manifold $(M, T)$, the volume $V$ of an angle system $x \in AS(T)$ is the sum of the volume of its angled 3-simplexes. Thus $v: AS(T) \to {\mathbb R}$ is a $C^1$ smooth function. Moreover it is real analytic outside the set of Euclidean type simplices. \section{Critical points} This section contains the proof of Theorem \ref{tm:main}. \subsection{Gluing conditions} Suppose $(M, T)$ is a connected triangulated closed 3-manifold so that $AS(T) \neq \emptyset$. We will consider the volume optimization $V: AS(T) \to {\mathbb R}$. \begin{lemma} Let $\theta_0\in AS(T)$ be a critical point of $V$ on $AS(T)$. Then, for each edge $e$ of $T$, the lengths of $e$ for all the simplices containing it are equal. \end{lemma} This follows from the definition of $\omega$. By the classification lemma \ref{lm:simplex} and the connectivity of $M$, we see that the types of any two 3-simplexes in $T$ in $\omega$ are the same. If $\omega$ is a local maximum point of $V$, then it cannot happen that all 3-simplices in $\omega$ are Euclidean simplices. Indeed, if otherwise, the volume of $\omega$ is zero. However, if we perturb $\omega$ slightly in $AS(T)$ to obtain a new point $\omega'$, then all simplices in $\omega'$ can be hyperbolic and spherical simplices. Thus $V(\omega') > V(\omega)$ which contradicts the local maximum condition. According to Lemma \ref{lm:lengths-unique}, for non-Euclidean type simplices, edge lengths determine the isometry type. So we obtain, \begin{cor} Suppose $\omega$ is not of Euclidean type. The faces of the simplices can be glued isometrically. Furthermore, $\theta_0$ defines in this way either a spherical structure or a HS-structure $h$ on $M$. \end{cor} Indeed, there are two possibilities. Namely either all simplieces in $\omega$ are of spherical type, or they are all of hyperbolic type. In the spherical type case, all simplices are spherical and are glued by isometries so that the sum of angles around each edge is $2\pi$. Thus we obtain a spherical metric on $M$. In the case where all simplices are of hyperbolic type, by Lemma \ref{lm:simplex}, we realize each simplex in $\omega$ as a geometric tetrahedron in $\tilde{HS}^3$ so that their faces can be glued isometrically. Thus, we obtain an HS-structure on $M$. Indeed, there are two subcases which could occur. In the first case, all simplices are hyperbolic. Thus we obtain a hyperbolic metric on $M$. In the second case, some simplex is a flipped hyperbolic. Then we obtain an HS-structure on $M$ by gluing these geometric tetrahedra in HS-geometry. Note that all vertices of $T$ are in the hyperbolic part of this HS-structure. \subsection{Normal spheres in HS-structures} \label{ssc:normal} Continuing the proof of Theorem \ref{tm:main}, we consider here an HS-structure $h$ on $M$, along with a triangulation $T$ with all vertices of $T$ in the hyperbolic part of $h$. Suppose moreover that the de Sitter part $M_{dS}$ for $h$ is non-empty. Let $M_0$ be a connected component of $M_{dS}$. Then $M_0$ is geodesically complete, so it is isometric either to the de Sitter space $dS^3$ or to its quotient by the antipodal map (see \cite{mess,mess-notes}). Therefore any plane in the tangent space to $M_0$ at a point is tangent to a (unique) totally geodesic space-like plane in $M_0$, which is homeomorphic either to $S^2$ (in the first case) or to ${\mathbb R} P^2$ (in the second case). Each of those totally geodesic surfaces is a normal surface in the triangulation $T$ of $M$. This simple argument shows that each connected component of the de Sitter part of $h$ corresponds to a normal surface in $(M,T)$. \subsection{Normal spheres for Euclidean critical points of $V$} In this section we consider the same question as in \S \ref{ssc:normal}, about normal surfaces in $(M,T)$, but for critical points of $V$ for which all simplices are of Euclidean type. The arguments are of the somehow similar but are less geometric and more combinatorial, because the geometric structures on the simplices cannot be glued to obtain a geometric structure of Euclidean type on $M$. We have seen in \S \ref{ssc:volume} that to each flipped (resp. doubly flipped) Euclidean simplex $s$ in $T$ can be associated a spherical triangle (resp. a quadrilateral). The edges of this triangle (resp. quadrilateral) are associated to the 2-faces of $T$ which have exactly two edges of length $i\pi$. Each such face bounds two simplices which are both either flipped or doubly flipped. It follows that the triangles (resp. quadrilaterals) can be glued along their edges to obtain a closed surface $\Sigma$ (which in general is not connected) -- however this gluing cannot in general be isometric for the spherical metrics since the lengths of the edges do not match. Moreover, the vertices of the triangulation of $\Sigma$ correspond to the edges of $T$ of length $i\pi$. \begin{remark} The angles of the triangles (resp. quadrilateral) at each vertex sum up to $2\pi$. \end{remark} \begin{proof} The angles of the triangles (resp. quadrilateral) adjacent to each vertex of $\Sigma$ are equal to the angles of the simplices of $T$ at the corresponding edge of length $i\pi$. Those angle sum up to $2\pi$ by definition of a angle structure on $T$. \end{proof} \begin{cor} Each connected component of $\Sigma$ is homeomorphic either to the sphere or to the projective plane. The sum of the areas of the faces of $\Sigma$ is an integer multiple of $2\pi$. \end{cor} \begin{proof} Let $\Sigma_0$ be a connected component of $\Sigma$, let $F_0$ be the set of its 2-faces, and let $V_0$ be the set of its vertices. Given $f\in F_0$ and $v\in V_0$, we write $v\simeq f$ if $v$ is adjacent to $f$, in this case we call $\theta_{f,v}$ the angle of $f$ at $v$. Let $a(f)$ be the area of the face $f$ of $\Sigma$. For each face $f\in F_0$ of $\Sigma_0$, we have by the Gauss-Bonnet formula $$ \sum_{v\in V_0, v\simeq f} (\pi-\theta_{f,v}) = 2\pi - a(f)~. $$ Summing over the faces of $\Sigma_0$ yields that $$ \sum_{f\in F_0}\left( \sum_{v\in V_0, v\simeq f} (\pi-\theta_{f,v})\right) = 2\pi \# F_0 - \sum_{f\in F_0} a(f)~. $$ The number of wedges in the triangulation of $\Sigma_0$ is twice the number of edges, which we denote by $\# E_0$. Therefore $$ \sum_{f\in F_0}\sum_{v\in V_0, v\simeq f} \pi = 2\pi \# E_0~. $$ Moreover the angles of the faces at each vertex sum up to $2\pi$, so that $$ \sum_{f\in F_0}\sum_{v\in V_0, v\simeq f} \theta_{f,v}) = 2\pi \# V_0~. $$ Using the definition of the Euler characteristic, we obtain that $$ \sum_{f\in F_0} a(v) = 2\pi \# V_0 - 2\pi E_0 = 2\pi F_0 = 2\pi \chi(\Sigma_0)~, $$ and both parts of the corollary follow immediately. \end{proof} \begin{cor} At a critical point of $V$ where all simplices are of Euclidean type, $V$ is an integer multiple of $2\pi^2$. \end{cor} \begin{proof} Lemma \ref{lm:vol-euclidean} shows that the volume of each flipped (resp. doubly flipped) simplex is equal to $\pi$ times the area of the corresponding triangle (resp. quadrilateral) in $\Sigma$. So the total volume is $\pi$ times the area of $\Sigma$, so that it is a non-negative integer multiple of $2\pi^2$. \end{proof} The proof of Theorem \ref{tm:main} is obtained by putting together the results of this section. \section{Further questions} The main point presented here is that extended hyperbolic structures have a natural role when constructing geometric structures on manifolds by maximization of volume over triangulated manifolds. This leads to a number of questions, for which answers would presumably help make progress on the understanding of geometric structures on 3-manifolds. \begin{q} If M is a connected sum of several hyperbolic 3-manifolds, does M support an HS-structure? \end{q} Another, more general question, is whether the constructions considered here can be extended to encompass angles structures with some ideal vertices. This would mean allowing angle structures on simplices for which the sum of the angles at a vertex is equal to, rather than less than, $2\pi$. Our hope is that such ideal vertices would permit critical points of the volume to realize torus decompositions of non atoroidal 3-manifolds. Another possibility, adding some flexibility to the construction, would be to allow for vertices in the de Sitter part of the extended hyperbolic space. Another natural question is of course to understand the critical points of $V$ on the boundary of $AS(T)$, hopefully showing that those boundary critical points correspond to collapsings. A last, more technical question, is whether existence of a critical point of $V$ on $AS(T)$ for which all simplices are of Euclidean type has topological consequences on $M$. For instance, if all simplices are Euclidean (rather than only of Euclidean type), does it follow that $M$ admits an Euclidean metric or more generally is $M$ a connected sum of Seifert fibered spaces? This is not obvious since the angles of the simplices add up to $2\pi$ at the edges of $T$, but the edge lengths do not match so that the faces of the simplices cannot be isometrically glued. \newcommand{\etalchar}[1]{$^{#1}$} \def$'${$'$}
1,108,101,565,868
arxiv
\section{Introduction} For many years there have been hints that the number of muons in ultrahigh energy cosmic ray (UHECR) air showers is larger than predicted by hadronic interaction models, e.g., \cite{HiRes-MIAmuons}. Most recently, the Pierre Auger Observatory~\cite{augerNIM15} compared the muon number in highly inclined events to predictions using the two leading LHC-tuned hadronic event generators (HEGs) for air showers, QGSJet-II-04~\cite{QII-04,QII} and EPOS-LHC~\cite{EPOS-LHC,EPOS}. The observed number of muons for $10^{19}$ eV primaries was found \cite{augerHorizMuons15} to be 30\%-80\% higher than the models predict assuming the primary composition inferred from the depth-of-shower-maximum distribution for each given model~\cite{augerXmaxMeas14,augerXmaxInterp14}, but the significance of the inferred muon excess is limited due to the uncertainty in the absolute energy calibration. For a given primary energy and mass, the number of muons is sensitive to hadronic interactions. Typically about 25\% of the final state energy in each hadronic interaction is carried by $\pi^{0}$'s, which immediately decay to two photons and thus divert energy from the hadronic cascade, which is the main source of muons, to the electromagnetic (EM) cascade. The hadronic cascade terminates when the energy of charged pions drops low enough that they decay before interacting, $\mathcal O(100$ GeV). If the average fraction of EM energy per interaction were increased or decreased, or there were more or fewer generations of hadronic interactions in the cascade (which depends on the primary mass and properties of the final states such as multiplicity), the muon ground signal would be lower or higher. Therefore, a significant discrepancy between observed and predicted muon ground signal would indicate that the description of hadronic interactions is inaccurate, assuming that the composition can be properly understood. \begin{figure}[ht] \centering \includegraphics[width=\linewidth]{fig1} \caption{Top: The measured longitudinal profile of an illustrative air shower with its matching simulated showers, using QGSJet-II-04 for proton (red solid) and iron (blue dashed) primaries. Bottom: The observed and simulated ground signals for the same event (p: red squares, dashed-line, Fe: blue triangles, dot-dash line) in units of vertical equivalent muons; curves are the lateral distribution function (LDF) fit to the signal.} \label{figFDSDComp} \end{figure} There has been excellent recent progress in composition determination~\cite{augerXmaxMeas14,augerXmaxInterp14,augerTAwg}, which provides a valuable ``prior" for modeling individual showers. Here we complement that progress with a new, more powerful approach to the muon analysis which removes the sensitivity to the absolute energy calibration. It is applicable to the entire data set of hybrid events: those events whose longitudinal profile (LP) is measured by the Pierre Auger Observatory's fluorescence detector (FD) \cite{augerFD,augerNIM15} at the same time the ground signal is measured with its surface detector (SD) \cite{augerSD,augerNIM15}. The ground signal of an individual shower of a CR of given energy and mass, depends primarily on the zenith angle and the depth-of-shower-maximum, $X_{\rm max}$, because together these determine the path length and thus attenuation of the electromagnetic and muonic components at ground. In order to most simply characterize a possible discrepancy between the predicted and observed properties of the air shower, we introduce an energy rescaling parameter, $R_{E} $, to allow for a possible shift in the FD energy calibration, and a multiplicative rescaling of the hadronic component of the shower by a factor $R_{\rm had}$. $R_{E}$ rescales the total ground signal of the event approximately uniformly, while $R_{\rm had}$ rescales only the contribution to the ground signal of inherently hadronic origin, which consists mostly of muons. Because the EM component of the shower is more strongly attenuated in the atmosphere than the muonic component, and the path length in the atmosphere varies as a function of zenith angle, $R_{E}$ and $R_{\rm had}$ can be separately determined by fitting a sufficiently large sample of events covering a range of zenith angles. In this analysis we test the consistency of the observed and predicted ground signal \emph{event by event}, for a large sample of events covering a wide range of $X_{\rm max}$ and zenith angles. By selecting simulated events which accurately match the observed LP of each event, we largely eliminate the noise from shower-to-shower fluctuations in the ground signal due to fluctuations in $X_{\rm max}$, while at the same time maximally exploiting the relative attenuation of the EM and muonic components of the shower. The LP and lateral distribution of the ground signal of an illustrative event are shown in Fig.~\ref{figFDSDComp}, along with a matching proton and iron simulated event; the ground signal size is measured in units of vertical equivalent muons (VEM), the calibrated unit of SD signal size~\cite{augerSDVEMCal}. Figure \ref{figFDSDComp} (bottom) illustrates a general feature of the comparison between observed and simulated events: the ground signal of the simulated events is systematically smaller than the ground signal in the recorded events. Elucidating the nature of the discrepancy is the motivation for the present study. The data we use for this study are the 411 hybrid events with $10^{18.8} < E < 10^{19.2}$ eV and zenith angle $0^{\circ}-60^{\circ}$ recorded between 1 January 2004 and 31 December 2012, which satisfy the event quality selection criteria in Refs. \cite{sdfdCalICRC, sdfdCalICRC11}. We thus concentrate on a relatively narrow energy range such that the mass composition changes rather little~\cite{augerXmaxMeas14,augerXmaxInterp14}, while having adequate statistics. This energy range corresponds to an energy of 110 to 170 TeV in the center-of-mass reference frame of the UHECR and air nucleon, far above the LHC energy scale. Figure \ref{figSecTR} shows the ratio of S(1000), the ground signal size at 1000 m from the shower core~\cite{augerNIM15}, for the events in our sample relative to that predicted for simulated events with matching zenith angle, depth-of-shower-maximum ($X_{\rm max}$) and calorimetric FD energy, for QGSJet-II-04~\cite{QII-04} and EPOS-LHC~\cite{EPOS-LHC}. For each HEG, the analysis is done using the composition mix which reproduces the observed $X_{\rm max}$ distribution~\cite{augerXmaxMeas14,augerXmaxInterp14}; we also show the result for pure protons for comparison. The discrepancy between a measured and simulated S(1000) evident in Fig.~\ref{figSecTR} is striking, at all angles and for both HEGs, and for both the mixed composition and pure proton cases. The zenith angle dependence of the discrepancy is the key to allowing $R_E$ and $R_{\rm had}$ to be separated. As seen in Fig. \ref{components}, the ground signal from the hadronic component is roughly independent of zenith angle, whereas that of the EM component falls with sec($\theta)$, so that to reproduce the rise seen in Fig.~\ref{figSecTR}, the hadronic component must be increased with little or no modification of the EM component. This will be quantified below. The analysis relies on there being no significant zenith-angle-dependent bias in the determination of the SD and FD signals. The accuracy of the detector simulations as a function of zenith angle in the $0^{\circ}-60^{\circ}$ range of the study here, and hence the absence of a zenith angle dependent bias in the SD reconstruction, has been extensively validated with muon test data~\cite{tanktests}. The absence of zenith angle dependence in the normalization of the FD signal follows from the zenith angle independence of $E_{\rm FD} /E_{\rm SD}$ of individual hybrid events. \begin{figure}[t] \centering \includegraphics[width=\linewidth]{fig2} \caption{The average ratio of S(1000) for observed and simulated events as a function of zenith angle, for mixed or pure proton compositions.} \label{figSecTR} \end{figure} \section{Production of Simulated Events} The first step of the analysis is to generate a set of Monte Carlo (MC) events, to find simulated events matching the LPs of the data events. The MC air-shower simulations are performed using the SENECA simulation code~\cite{Seneca}, with FLUKA~\cite{fluka1} as the low-energy HEG. Simulation of the surface detector response is performed with GEANT4~\cite{geant4} within the software framework \mbox{$\overline{\textrm{Off}}$\hspace{.05em}\protect\raisebox{.4ex}{$\protect\underline{\textrm{line}}$}}\xspace ~\cite{offline} of the Auger Observatory. We produce showers matching each data event, with both HEGs and for all four primary cosmic-ray types (proton, helium, nitrogen, and iron nuclei), as follows: \\ $\bullet$ Repeatedly generate showers with the measured geometry and calorimetric energy of the given data event, reconstructing the LP and determining the $X_{\rm max}$ value until 12 showers having the same $X_{\rm max}$ value as the real event (within the reconstruction uncertainty) have been produced, or stopping after 600 tries. For data events whose $X_{\rm max}$ cannot be matched with all primary types, the analysis is done using only those primaries that give 12 events at this stage, in 600 tries \cite{Footnote1}.\\ $\bullet$ Repeat the simulation of these 12 showers at very high resolution, and select the 3 which best reproduce the observed longitudinal profile based on the $\chi^2$-fit. For each of the 3 selected showers, do a full surface detector simulation and generate SD signals for comparison with the data. From these detailed simulations of 3 showers that match the full LP of the data event, determine the hadronic component of the simulated ground signal and the shower-to-shower variance. The choices of 12 and 3 showers in the two stages above assure, respectively, that i) the LPs of the final simulated data set fit the real data with a $\chi^2$ distribution that is comparable to that found in a Gaisser-Hillas fit to the data itself, and ii) that the variance within the simulated events for a given shower is smaller than the shower-to-shower fluctuations in real events. More than $10^7$ showers must be simulated to create the analysis library of well-fitting simulated showers for the 411 hybrid events of the data set. A high-quality fit to the LP is found for all events, for at least one primary type. \section{Quantifying the Discrepancy} The history of all muons and EM particles ($e^{\pm}$ and $\gamma$'s) reaching the ground is tracked during simulation, following the description in Ref. \cite{univ2011}. Most muons come from $\pi^\pm$ or K decay and most EM particles from $\pi^0$ decay. The portion of EM particles that are produced by muons through decay or radiative processes, and by low-energy $\pi^0$'s, are attributed to the hadronic signal, $S_{\rm had}$; muons that are produced through photoproduction are attributed to the electromagnetic signal, $S_{EM}$. The relative importance of the different components varies with zenith angle, as illustrated in Fig.~\ref{components}. Once $S_{EM}$ and $S_{\rm had}$ are known for a given shower $i$, with assumed primary mass $j$, the rescaled simulated S(1000) can be written as: \begin{equation} S_{\rm resc}(R_{E},R_{\rm had})_{i,j} \equiv R_{E} ~ {S_{EM, i,j}} + R_{\rm had} ~R_{E}^{\alpha} ~{S_{{\rm had}, i,j}}. \label{Srs} \end{equation} \begin{figure}[t] \centering \includegraphics[width=\linewidth]{fig3} \caption{The contributions of different components to the average signal as a function of zenith angle, for stations at 1 km from the shower core, in simulated 10 EeV proton air showers illustrated for QGSJet-II-04.} \label{components} \end{figure} The linear scaling of the EM contribution with $R_E$ is obvious, as is the factor $R_{\rm had}$ for the hadronic contribution. The factor $R_{E}^{\alpha}$ reflects the fact that the hadronic signal increases slower than linearly with energy, since higher energy events require more stages in the shower cascade before the pions have low enough energy to decay to muons rather than re-interact, and at each stage, energy is removed from the hadronic cascade. The value of $\alpha$ is a prediction of the HEG and depends also on mass; in practice both EPOS and QGSJet-II simulations find $\alpha \approx 0.9$, relatively independently of composition~\cite{alpha}. We investigated the sensitivity of our conclusions to the possibility that $\alpha$ predicted by the models is incorrect, and find its potential effect is small enough to be ignored for the present analysis~\cite{Footnote2}. The best fit values of $R_{E}$ and $R_{\rm had}$ are determined by maximizing the likelihood function $\prod_{i} P_{i}$, where the index $i$ runs over each event in the data set and the contribution of the $i$th event is \begin{equation} \label{P_i} P_{i} = \sum_{j} \frac{p_{j}\left(X_{{\rm max}, i}\right) }{\sqrt{2\pi}\sigma_{i,j}} ~ {\rm exp}\left[-\frac{\left(S_{\rm resc}(R_{E},R_{\rm had})_{i,j}-S(1000)_{i}\right)^2}{2~\sigma_{i,j}^2}\right]. \end{equation} The index $j$ labels the different possible primaries (p, He, N and Fe), and $p_{j}\left(X_{{\rm max}, i}\right)$ is the prior on the probability that an event with $X_{{\rm max}, i}$ has mass $j$, given the mass fractions $f_j$ in the interval $10^{19\pm0.2}$ eV (see Ref. \cite{augerXmaxMeas14} for the fit to the observed $X_{\rm max}$ distribution for each HEG): \begin{equation} p_j(X_{\rm max}) = f_j \, \mathcal{P}_j(X_{\rm max}) \,/ \,\Sigma_{j'} f_{j'} \, \mathcal{P}_{j'} (X_{\rm max}), \end{equation} where $\mathcal{P}_j(X_{\rm max})$ is the probability density of observing $X_{\rm max}$ for primary type $j$, for the given HEG. The variance entering Eq.~(\ref{P_i}) includes (a) measurement uncertainty of typically 12\%, from the uncertainty in the reconstruction of S(1000), the calorimetric energy measurement, and the uncertainty in the $X_{\rm max}$ scale, as well as (b) the variance in the ground signals of showers with matching LPs due to shower-to-shower fluctuations (ranging from typically $16$\% for proton-initiated showers to 5\% for iron-initiated showers) and (c) the uncertainty in separating $S_{\mu}$ and $S_{EM}$ in the simulation, and from the limited statistics of having only three simulated events (typically 10\% for proton-initiated showers and 4\% for iron-initated showers). \\ \section{Results and Discussion} \begin{table} \centering \caption{$R_E$ and $R_{\rm had}$ with statistical and systematic uncertainties, for QGSJet-II-04 and EPOS-LHC.} \label{tabFit} \begin{tabular}{lcc} \hline\hline Model & $R_{E}$ & $R_{\rm had}$ \\ \hline QII-04 p & ~~$1.09 \pm 0.08 \pm 0.09$ & ~~$1.59 \pm 0.17 \pm 0.09$ \\ QII-04 Mixed & ~~$1.00 \pm 0.08 \pm 0.11$ & ~~$1.61 \pm 0.18 \pm 0.11$ \\ EPOS p & ~~$1.04 \pm 0.08 \pm 0.08$ & ~~$1.45 \pm 0.16 \pm 0.08$ \\ EPOS Mixed & ~~$1.00 \pm 0.07 \pm 0.08$ & ~~$1.33 \pm 0.13 \pm 0.09$ \\ \hline\hline \end{tabular} \end{table} Table \ref{tabFit} gives the values of $R_{E}$ and $R_{\rm had}$ which maximize the likelihood of the observed ground signals, for the various combinations of HEGs and compositions considered. The systematic uncertainties in the reconstruction of $X_{\rm max}$, $E_{\rm FD}$ and S(1000) are propagated through the analysis by shifting the reconstructed central values by their one-sigma systematic uncertainties. Figure \ref{figContour} shows the one-sigma statistical uncertainty ellipses in the $R_{E}-R_{\rm had}$ plane; the outer boundaries of propagating the systematic errors are shown by the gray rectangles. The values of $R_{\rm had}$ needed in the models are comparable to the corresponding muon excess detected in highly inclined air showers~\cite{augerHorizMuons15}, as is expected because at high zenith angle the nonharonic contribution to the signal (shown with red curves in Fig.~\ref{components}) is much smaller than the hadronic contribution. However the two analyses are not equivalent because a muon excess in an inclined air shower is indistinguishable from an energy rescaling, whereas in the present analysis the systematic uncertainty of the overall energy calibration enters only as a higher-order effect. Thus the significance of the discrepancy between data and model prediction is now more compelling, growing from 1.38 (1.77) sigma to 2.1 (2.9) sigma, respectively, for EPOS-LHC (QGSJet II-04), adding statistical and systematic errors from Fig. 6 of Ref. \cite{augerHorizMuons15} and Table \ref{tabFit}, in quadrature. The signal deficit is smallest (the best-fit $R_{\rm had}$ is the closest to unity) with EPOS-LHC and mixed composition. This is because, for a given mass, the muon signal is $\approx15 $\% larger for EPOS-LHC than QGSJet-II-04~\cite{pierogEPOSvsQII}, and in addition the mean primary mass is larger when the $X_{\rm max}$ data are interpreted with EPOS rather than with QGSJet-II~\cite{augerXmaxInterp14}. Within the event ensemble used in this study, there is no evidence of a larger event-to-event variance in the ground signal for fixed $X_{\rm max}$ than predicted by the current models. This means that the muon shortfall cannot be attributed to an exotic phenomenon producing a very large muon signal in only a fraction of events, such as could be the case if micro-black holes were being produced at a much-larger-than-expected rate~\cite{feng+BH01,faUHECR13}. \begin{figure}[t] \centering \includegraphics[width=\linewidth]{fig4} \caption{Best-fit values of $R_{E}$ and $R_{\rm had}$ for QGSJet-II-04 and EPOS-LHC, for pure proton (solid circle/square) and mixed composition (open circle/square). The ellipses and gray boxes show the 1-$\sigma$ statistical and systematic uncertainties.} \label{figContour} \end{figure} \section{Summary} We have introduced a new method to study hadronic interactions at ultrahigh energies, which minimizes reliance on the absolute energy determination and improves precision by exploiting the information in individual hybrid events. We applied it to hybrid showers of the Pierre Auger Observatory with energies 6-16 EeV ($E_{\rm CM}$ = 110 to 170 TeV) and zenith angle $0^\circ-60^\circ$, to quantify the disparity between state-of-the-art hadronic interaction modeling and observed UHECR atmospheric air showers. We considered the simplest possible characterization of the model discrepancies, namely an overall rescaling of the hadronic shower, $R_{\rm had}$, and we allow for a possible overall energy calibration rescaling, $R_{E}$. No energy rescaling is needed: $R_{E} = 1.00\pm 0.10$ for the mixed composition fit with EPOS-LHC, and $R_{E} = 1.00\pm 0.14$ for QGSJet II-04, adding systematic and statistical errors in quadrature. This uncertainty on $R_{E}$ is of the same order of magnitude as the 14\% systematic uncertainty of the energy calibration~\cite{sdfdCalICRC}. We find, however, that the observed hadronic signal in these UHECR air showers is significantly larger than predicted by models tuned to fit accelerator data. The best case, EPOS-LHC with mixed composition, requires a hadronic rescaling of $R_{\rm had} = 1.33\pm0.16$ (statistical and systematic uncertainties combined in quadrature), while for QGSJet II-04, $R_{\rm had} = 1.61\pm 0.21$. It is not yet known whether this discrepancy can be explained by some incorrectly modeled features of hadron collisions, possibly even at low energy, or may be indicative of the onset of some new phenomenon in hadronic interactions at ultrahigh energy. Proposals of the first type include a higher level of production of baryons~\cite{pierogEPOSvsQII} or vector mesons~\cite{drescher07} (see Ref. \cite{engel15} for a recent review of the many constraints to be satisfied), while proposals for possible new physics are discussed in Refs. \cite{stringPerc12,faUHECR13,afICRC13}. The discrepancy between models and nature can be elucidated by extending the present analysis to the entire hybrid data set above $10^{18.5}$ eV, to determine the energy dependence of $R_E$ and $R_{\rm had}$. In addition, the event by event analysis introduced here can be generalized to include other observables with complementary sensitivity to hadronic physics and composition, e.g., muon production depth~\cite{MPD}, risetime~\cite{risetime} and slope of the LDF. AugerPrime, the anticipated upgrade of the Pierre Auger Observatory~\cite{augerprime}, will significantly improve our ability to investigate hadronic interactions at ultrahigh energies, by separately measuring the muon and EM components of the ground signal. \section*{Acknowledgments} \begin{sloppypar} The successful installation, commissioning, and operation of the Pierre Auger Observatory would not have been possible without the strong commitment and effort from the technical and administrative staff in Malarg\"ue. \end{sloppypar} \begin{sloppypar} We are very grateful to the following agencies and organizations for financial support: \end{sloppypar} \begin{sloppypar} Comisi\'on Nacional de Energ\'\i{}a At\'omica, Agencia Nacional de Promoci\'on Cient\'\i{}fica y Tecnol\'ogica (ANPCyT), Consejo Nacional de Investigaciones Cient\'\i{}ficas y T\'ecnicas (CONICET), Gobierno de la Provincia de Mendoza, Municipalidad de Malarg\"ue, NDM Holdings and Valle Las Le\~nas, in gratitude for their continuing cooperation over land access, Argentina; the Australian Research Council; Conselho Nacional de Desenvolvimento Cient\'\i{}fico e Tecnol\'ogico (CNPq), Financiadora de Estudos e Projetos (FINEP), Funda\c{c}\~ao de Amparo \`a Pesquisa do Estado de Rio de Janeiro (FAPERJ), S\~ao Paulo Research Foundation (FAPESP) Grants No.\ 2010/07359-6 and No.\ 1999/05404-3, Minist\'erio de Ci\^encia e Tecnologia (MCT), Brazil; Grant No.\ MSMT-CR LG13007, No.\ 7AMB14AR005, and the Czech Science Foundation Grant No.\ 14-17501S, Czech Republic; Centre de Calcul IN2P3/CNRS, Centre National de la Recherche Scientifique (CNRS), Conseil R\'egional Ile-de-France, D\'epartement Physique Nucl\'eaire et Corpusculaire (PNC-IN2P3/CNRS), D\'epartement Sciences de l'Univers (SDU-INSU/CNRS), Institut Lagrange de Paris (ILP) Grant No.\ LABEX ANR-10-LABX-63, within the Investissements d'Avenir Programme Grant No.\ ANR-11-IDEX-0004-02, France; Bundesministerium f\"ur Bildung und Forschung (BMBF), Deutsche Forschungsgemeinschaft (DFG), Finanzministerium Baden-W\"urttemberg, Helmholtz Alliance for Astroparticle Physics (HAP), Helmholtz-Gemeinschaft Deutscher Forschungszentren (HGF), Ministerium f\"ur Wissenschaft und Forschung, Nordrhein Westfalen, Ministerium f\"ur Wissenschaft, Forschung und Kunst, Baden-W\"urttemberg, Germany; Istituto Nazionale di Fisica Nucleare (INFN),Istituto Nazionale di Astrofisica (INAF), Ministero dell'Istruzione, dell'Universit\'a e della Ricerca (MIUR), Gran Sasso Center for Astroparticle Physics (CFA), CETEMPS Center of Excellence, Ministero degli Affari Esteri (MAE), Italy; Consejo Nacional de Ciencia y Tecnolog\'\i{}a (CONACYT) No.\ 167733, Mexico; Universidad Nacional Aut\'onoma de M\'exico (UNAM), PAPIIT DGAPA-UNAM, Mexico; Ministerie van Onderwijs, Cultuur en Wetenschap, Nederlandse Organisatie voor Wetenschappelijk Onderzoek (NWO), Stichting voor Fundamenteel Onderzoek der Materie (FOM), Netherlands; National Centre for Research and Development, Grants No.\ ERA-NET-ASPERA/01/11 and No.\ ERA-NET-ASPERA/02/11, National Science Centre, Grants No.\ 2013/08/M/ST9/00322, No. 2013/08/M/ST9/00728 and No.\ HARMONIA 5 -- 2013/10/M/ST9/00062, Poland; Portuguese national funds and FEDER funds within Programa Operacional Factores de Competitividade through Funda\c{c}\~ao para a Ci\^encia e a Tecnologia (COMPETE), Portugal; Romanian Authority for Scientific Research ANCS, CNDI-UEFISCDI partnership projects Grants No.\ 20/2012 and No.\ 194/2012, Grants No. 1/ASPERA2/2012 ERA-NET, No.\ PN-II-RU-PD-2011-3-0145-17 and No. PN-II-RU-PD-2011-3-0062, the Minister of National Education, Programme Space Technology and Advanced Research (STAR), Grant No.\ 83/2013, Romania; Slovenian Research Agency, Slovenia; Comunidad de Madrid, FEDER funds, Ministerio de Educaci\'on y Ciencia, Xunta de Galicia, European Community 7th Framework Program, Grant No.\ FP7-PEOPLE-2012-IEF-328826, Spain; Science and Technology Facilities Council, United Kingdom; Department of Energy, Contracts No. DE-AC02-07CH11359, No.\ DE-FR02-04ER41300, No.\ DE-FG02-99ER41107 and No. DE-SC0011689, National Science Foundation, Grants No.\ 0450696 and No.\ 1212528, and The Grainger Foundation, USA; NAFOSTED, Vietnam; Marie Curie-IRSES/EPLANET, European Particle Physics Latin American Network, European Union 7th Framework Program, Grant No.\ PIRSES-2009-GA-246806; and UNESCO. \end{sloppypar}
1,108,101,565,869
arxiv
\section{Introduction} \label{sect:intro} Over the last 8 billion years a large fraction of low-mass (M$_{\star}\la$\,10$^{9}$\,M$_{\odot}$) galaxies are still seen rapidly assembling most of their present-day stellar mass \citep{Cowie1996,Perez-Gonzalez2008}. Tracing the spectrophotometric properties of these vigorous star-forming \textit{dwarf} galaxies (SFDGs) out to $z\sim$1 is essential not only to study how they evolve through cosmic time, but also to understand the physical mechanisms driving the first stages of stellar mass build-up and chemical enrichment. To this end, key insights can be obtained from the tight relations found between stellar mass, metallicity and star formation rate (SFR). However, the shape and normalization of these relations at different redshifts are still poorly constrained at their low-mass end. While in the local Universe the mass-metallicity relation (MZR) has been extended down to 10$^8$\,M$_{\odot}$ \citep[e.g.][]{Andrews2013}, at intermediate and high redshifts, dwarf galaxies are strongly underrepresented \citep[e.g.][]{Henry2013}. These SFDGs are usually identified by their blue colors, high surface brightness and strong emission-lines. They include a rare population of extreme emission-line galaxies (EELGs) with the largest nebular content and lowest metal abundances \citep[e.g.][]{Kniazev2004,Papaderos2008,Hu2009,Atek2011,Morales-Luis2011}. Due to their high equivalent widths (EWs), an increasing number of EELGs are being discovered and characterized by deep spectroscopic surveys out to $z\sim$1 \citep[e.g.][]{Hoyos2005,Ly2014,Amorin2014a} and beyond \citep[e.g.][]{vdWel2011,Maseda2014}. In this \textit{Letter} we report the discovery of a sample of 31 EELGs at $0.2\la z \la 0.9$ identified from the \textit{VIMOS Ultra-Deep Survey} \citep[VUDS;][]{LeFevre2014}. We study their physical properties as part of a larger, ongoing study aimed at investigating the evolution of SFDGs out to $z\sim$\,1 using very deep spectroscopy \citep[e.g.][]{Amorin2014a}. The sensitivity of our VUDS spectra, detecting emission lines as faint as $\sim$1.5$\times$\,10$^{-18}$\,erg s$^{-1}$ cm$^{-2}$, makes it possible e.g., to derive $T_e$-based metallicities for a fraction of such faint galaxies. Thus, the present sample extends previous studies of star-forming (SF) galaxies at similar redshifts in size and limiting magnitude \citep{Henry2013,Ly2014}, allowing us to study in greater detail the LZR and MZR at $z<1$ two decades below 10$^{9}$M$_{\odot}$ with galaxies showing a wide range of properties, including a number of extremely metal-poor galaxies. Throughout this paper we adopt a standard $\Lambda$-CDM cosmology with $h$ = 0.7, $\Omega_m$ = 0.3 and $\Omega_\Lambda$ = 0.7. \section{Observations and sample selection} \label{sect:observations} \begin{figure}[t] \centering \includegraphics[angle=0,width=9.cm]{Fig1.eps} \caption{HST $F814W$-band imagery of EELGs in the COSMOS and ECDF fields covered by VUDS. Each postage stamp is 2$^{"}$ on a side. }\label{morphology} \end{figure} The \textit{Vimos Ultra Deep Survey} is a deep spectroscopic legacy survey of $\sim$\,10$^4$ galaxies carried out using VIMOS at ESO-VLT \citep{LeFevre2003}. This survey is aimed at providing a complete census of the SF galaxy population at $2 \la z \la 7$, covering $\sim$\,1\,deg$^2$ in three fields: COSMOS, ECDFS, and VVDS-2h. The VIMOS spectra consist of 14h integrations in the LRBLUE and LRRED grism settings, covering a combined wavelength range $3650<\lambda<9350$\AA, with a spectral resolution R$\sim$230. Data reduction, redshift measurement, and assessment of the reliability flags are described in detail in the survey and data presentation paper \citep{LeFevre2014}. The targets of VUDS have been primarily selected to have photometric redshifts $z_p > 2.4$ for either of the primary and secondary peaks of the PDF. A number of random targets purely magnitude selected to $23 \leq I_{\rm AB} \leq 25$ have been added to fill empty areas on observed slit masks. As a consequence, we identify a number of targets with spectroscopic redshift $z_s <2$. Many of these targets are galaxies with prominent optical emission lines, such as [\relax \ifmmode {\mbox O\,{\scshape ii}}\else O\,{\scshape ii}\fi]$\lambda$\,3727 or [\relax \ifmmode {\mbox O\,{\scshape iii}}\else O\,{\scshape iii}\fi]$\lambda$\,5007, that artificially boost the observed magnitudes in the stellar spectral energy distributions (SED). For this \textit{Letter} a representative sample of 31 EELGs (12 from COSMOS, 11 from VVDS-2h, and 8 from ECDFS) with mean $I_{\rm AB}$\,$\sim$24.5 mag was identified from an early version of VUDS data containing $\sim$40\% of the final sample. We first consider primary and secondary target galaxies with very reliable spectroscopic redshift (98\% and 100\% confidence level), at $z\leq 0.93$. We then select galaxies with at least three emission lines detected, [\relax \ifmmode {\mbox O\,{\scshape ii}}\else O\,{\scshape ii}\fi], [\relax \ifmmode {\mbox O\,{\scshape iii}}\else O\,{\scshape iii}\fi], and \relax \ifmmode {\mbox H}\beta\else H$\beta$\fi, and EW[\relax \ifmmode {\mbox O\,{\scshape iii}}\else O\,{\scshape iii}\fi]$>$\,100\AA. The first criterion ensures the derivation of gas-phase metallicities and the second allows us to select SFDGs with the highest \textit{specific} SFR \citep[e.g.][]{Atek2011,Ly2014,Amorin2014a}. While our EELGs look unresolved in ground-based images precluding a full morphological analysis, morphological information can be obtained for a subset of 16 EELGs that have been observed by the HST-ACS in the $F814W$ ($I$) band. As illustrated in Fig.~\ref{morphology}, EELGs include galaxies with both round and irregular shapes, showing angular sizes $<$\,1$^{"}$. Using the automated method presented in \citet{Tasca2009} for the EELGs imaged by the ACS we derive circularized half-light radii, $r_{50}$\,$=$\,$R_{50}$\,$(b/a)^{0.5}$\,$\sim$\,0.4-0.8 kpc thus confirming their extreme compactness. In most cases, we find these EELGs with no clear signs of ongoing mergers or very close companions. \section{Physical properties of VUDS EELGs} \label{sect:analysis} Deep VUDS spectra for the sample of EELGs are presented in Fig.~A.1\footnote{\label{note2}Only available in the electronic edition of the journal}. Long exposure times allow us to detect in most cases a remarkably faint continuum ($\sim$\,5$\times$\,10$^{-20}$erg\,s$^{-1}$\,cm$^{2}$\,\AA$^{-1}$, 1$\sigma$) and very faint lines, such as [\relax \ifmmode {\mbox O\,{\scshape iii}}\else O\,{\scshape iii}\fi]$\lambda$4363 or [\relax \ifmmode {\mbox N\,{\scshape ii}}\else N\,{\scshape ii}\fi]$\lambda$6584. In Table~1\footnote{\label{note1}Tables 1 and 2 are only available in electronic form at the CDS via http://cdsweb.u-strasbg.fr/cgi-bin/qcat?J/A+A/} we present line fluxes and uncertainties for the most relevant detected emission lines, which were performed manually using the IRAF task {\sl splot} following \citet{Amorin2012}. Reddening corrections were performed using the Balmer decrement, whenever available, and adopting the \citet{Calzetti2000} extinction law. For those EELGs with \relax \ifmmode {\mbox H}\alpha\else H$\alpha$\fi/\relax \ifmmode {\mbox H}\beta\else H$\beta$\fi \ or \relax \ifmmode {\mbox H}\beta\else H$\beta$\fi/\relax \ifmmode {\mbox H}\beta\else H$\gamma$\fi\ measurements the median reddening is $E(B-V)^{\rm med}_{\rm gas}$\,$=$\,0.26 ($\sigma=0.14$), in good agreement with previous studies for EELGs \citep[e.g.,][]{Dominguez2013,Ly2014,Amorin2014a}. In those cases where $E(B-V)_{\rm gas}$ cannot be measured through \relax \ifmmode {\mbox H}\alpha\else H$\alpha$\fi/\relax \ifmmode {\mbox H}\beta\else H$\beta$\fi \ or \relax \ifmmode {\mbox H}\beta\else H$\beta$\fi/\relax \ifmmode {\mbox H}\beta\else H$\gamma$\fi, or where its values are smaller than the theoretical values for the Case B recombination ($T_e=$\,2$\times$10$^4$K, $n_e=$100\,cm$^{-3}$), we adopt $E(B-V)_{\rm gas}$\,$=$\,$E(B-V)_{\star}$, where $E(B-V)_{\star}$ is the stellar extinction derived from the SED fitting described in Section\,3.2. This assumption seems reasonable since median values of stellar ($E(B-V)^{\rm med}_{\star}$\,$=$0.25, $\sigma=0.14$) and gas extinctions are in excellent agreement for galaxies for which both values are available. The adopted reddening constant for each galaxy is listed in Table~1. In the following sections we describe the derivation of the main physical properties for the EELG sample, which are presented in Table~2$^2$. \subsection{Ionization and metallicity properties from VUDS spectra} \begin{figure}[t!] \centering \includegraphics[angle=90,width=9.cm]{Fig3.eps} \caption{Diagnostic diagrams. Lines show the empirical separations between SF galaxies and AGNs. } \label{diagnostics} \end{figure} In Figure~\ref{diagnostics} we study the ionization properties of the EELG sample using three diagnostic diagrams based on strong emission line ratios. Our sample galaxies populate the region of SF galaxies with the highest excitation ([\relax \ifmmode {\mbox O\,{\scshape iii}}\else O\,{\scshape iii}\fi]/\relax \ifmmode {\mbox H}\beta\else H$\beta$\fi$\sim$\,5$\pm$2). Consistently with their low masses and blue $U-B$ colors, none of them shows indication of an Active Galactic Nuclei (AGN) activity. Our EELGs, however, are located near the limits between SF and AGN regions in Fig.~\ref{diagnostics} due to their high ionization conditions, as suggested by their high [\relax \ifmmode {\mbox O\,{\scshape iii}}\else O\,{\scshape iii}\fi]/[\relax \ifmmode {\mbox O\,{\scshape ii}}\else O\,{\scshape ii}\fi] ratios (Fig.~\ref{diagnostics}$a$). In the most extreme case, [\relax \ifmmode {\mbox O\,{\scshape iii}}\else O\,{\scshape iii}\fi] shows EW of $\sim$\,1700\AA, while the [\relax \ifmmode {\mbox O\,{\scshape ii}}\else O\,{\scshape ii}\fi] line is only barely detected. Moreover, in three EELGs, we tentatively detect ($\sim$2.5$\sigma$) He{\sc ii}$\lambda$\,4686\AA\ emission, suggesting the presence of very young, hot stars. Being rare at $z<1$ \citep[e.g.,][]{Jaskot2013,Nakajima2013,Amorin2014a}, these EELGs show ionization parameters ($\log(q_{\rm ion})$\,$\ga$\,8\,cm\,s$^{-1}$) comparable to some low-luminosity high redshift galaxies \citep[e.g.,][]{Fosbury2003,Amorin2014b}. In seven EELGs we detect ($\geq$\,2$\sigma$) the intrinsically faint $T_{e}-$sensitive auroral line [\relax \ifmmode {\mbox O\,{\scshape iii}}\else O\,{\scshape iii}\fi]$\lambda$4363\AA. For these galaxies we derive metallicity using the direct method \citep{Hagele2008}. In addition, we derive metallicities for the entire sample using the $R23(\equiv$(\relax \ifmmode {\mbox O\,{\scshape ii}}\else O\,{\scshape ii}\fi$+$\relax \ifmmode {\mbox O\,{\scshape iii}}\else O\,{\scshape iii}\fi)/\relax \ifmmode {\mbox H}\beta\else H$\beta$\fi) parameter and the calibration of \citet{McGaugh1991}. Following \citet[][]{Perez-Montero2013} we apply the linear relations detailed in \citet{Lamareille2006} to make these R23 metallicities consistent with those derived using the direct method. In order to break the degeneracy of $R23$ (i.e., to choose between the lower or upper branch) we use two additional indicators. For EELGs at $z\la$\,0.45 we choose the branch that best matches the metallicity obtained from the \mbox{\it N2}($\equiv$\relax \ifmmode {\mbox N\,{\scshape ii}}\else N\,{\scshape ii}\fi/\relax \ifmmode {\mbox H}\alpha\else H$\alpha$\fi) parameter and the calibration by \citet{Perez-Montero2009}, while for EELGs at $z\ga$\,0.45 we choose the branch that best matches the metallicity from the calibrations based on the [\relax \ifmmode {\mbox Ne\,{\scshape iii}}\else Ne\,{\scshape iii}\fi], [\relax \ifmmode {\mbox O\,{\scshape ii}}\else O\,{\scshape ii}\fi], and [\relax \ifmmode {\mbox O\,{\scshape iii}}\else O\,{\scshape iii}\fi] line ratios of \citet{Maiolino2008}. The difference between direct and strong-line metallicity estimations for the seven galaxies with [\relax \ifmmode {\mbox O\,{\scshape iii}}\else O\,{\scshape iii}\fi]$\lambda$4363\AA\ is $<$\,0.2 dex. We find the metallicity of our EELG spanning a wide range of subsolar values (7.5$\la$12$+\log$(O/H)$\la$\,8.3), including four extremely metal-poor galaxies ($Z\la$\,0.1$Z_{\odot}$). \subsection{Stellar properties from multiwavelength SED fitting} Stellar masses and rest-frame absolute magnitudes of EELGs were derived by fitting their stellar SEDs. In short, we fit \citet{Bruzual2003} stellar population synthesis models to the broad-band photometry -- from UV to NIR -- of each galaxy using chi-square minimization {following \citet{Castellano2014}. Magnitudes are previously corrected from the contribution of prominent optical emission lines following \citet{Amorin2014a}, while models assume stellar metallicities that best agree with the observed gas-phase metallicity. } We adopt a \citet{Chabrier2003} IMF, \citet{Calzetti2000} extinction law and assume a standard declining exponential star formation history. As a result, we find the sample of EELGs in VUDS spanning a range of low luminosities, $-14.5$\,$\la$\,$M_{\rm AB}(B)$\,$\la$\,$-18.8$, and low stellar masses, 6.9$\la$\,$\log($M$_{\star}$/M$_{\odot}) \la$\,8.6. \begin{figure}[t!] \centering \includegraphics[angle=0,width=8.3cm]{Fig4.eps} \caption{The SFR stellar mass plane of low-mass SFDGs. Solid and dashed lines show the so-called main sequence of galaxies at different redshifts and its extrapolation to low-mass regime, respectively, according to \citet{Whitaker2012}. Dotted lines indicate constant sSFR from 10$^{-10}$\,yr$^{-1}$ (bottom) to 10$^{-6}$\,yr$^{-1}$ (upper). Colors indicate redshift bins with mean values $\langle$\,$z$\,$\rangle$\,$=$\,0 (black), $\langle$\,$z$\,$\rangle$\,$=$\,0.25 (blue), $\langle$\,$z$\,$\rangle$\,$=$\,0.4 (green), $\langle$\,$z$\,$\rangle$\,$=$\,0.6 (orange) and $\langle$\,$z$\,$\rangle$\,$=$0.8 (red). Asterisks show VUDS EELGs with EW$_{\rm rest}$(\relax \ifmmode {\mbox O\,{\scshape iii}}\else O\,{\scshape iii}\fi)\,$>$\,200\AA\ and {EW$_{\rm rest}$(\relax \ifmmode {\mbox H}\beta\else H$\beta$\fi)\,$>$\,60\AA.}} \label{M-SFR} \end{figure} \section{The relation between mass, metallicity, and ongoing SFR of low-mass galaxies out to $z\sim$1} \label{sect:discussion} In Fig.~\ref{M-SFR} we show the SFR-mass diagram for the EELGs in VUDS and from the literature. Star formation rates are derived from the extinction-corrected \relax \ifmmode {\mbox H}\alpha\else H$\alpha$\fi\ or \relax \ifmmode {\mbox H}\beta\else H$\beta$\fi\ luminosities using the calibration of \citet{Kennicutt1998} and assuming a \citet{Chabrier2003} IMF. At a given redshift, our EELGs show SFRs and stellar masses a factor of $\sim$10 lower than similar samples from the literature. However, nearly all EELGs shown in Fig.~\ref{M-SFR} are well above the extrapolation to low stellar mass of the main sequence of galaxies \citep{Whitaker2012} at a given $z$. The EELGs in VUDS show enhanced \textit{specific} SFRs (sSFR$\sim$10$^{-9}$-10$^{-7}$\,yr$^{-1}$) and SFR surface densities (median $\Sigma_{\rm SFR}=$ SFR$/2\pi r^2_{50} =$\,0.35 ($\sigma =$\,0.19)\,M$_{\odot}$\,yr$^{-1}$\,kpc$^{-2}$), comparable to more luminous galaxy-wide starbursts at similar and higher redshifts \citep{Ly2014,Amorin2014a,Amorin2014b}. \begin{figure}[t!] \centering \includegraphics[angle=0,width=7.6cm]{Fig5a_rev.eps}\\\vspace{2mm} \includegraphics[angle=0,width=7.6cm]{Fig5b_rev.eps}\\\vspace{2mm} \includegraphics[angle=0,width=7.6cm]{Fig5c_rev.eps}\\\vspace{2mm} \includegraphics[angle=0,width=7.6cm]{Fig5d_rev.eps}\\ \caption{{($a$) Luminosity-metallicity and ($b$) mass-metallicity relations for VUDS EELGs and SFDGs from the literature. Metallicity differences with respect to the extrapolation to low stellar mass of the FMR by \citet[][]{Andrews2013} and \citet[][]{Mannucci2011} are shown in $(c)$ and $(d)$, respectively. Dashed lines indicate 1$\sigma$ deviations for these relations. Colors and symbols are as in Fig.~\ref{M-SFR}. The data have been homogenized to the Chabrier IMF and the same strong-line metallicity calibration presented in Section~3.1.} } \label{MZR} \end{figure} In Fig.~\ref{MZR} we study the LZR and MZR traced by EELGs in VUDS and other low-mass galaxies at $0<z<1$. The EELGs {extend} the LZR down to $M_{\rm AB}(B) \sim -14.5$ and the MZR down to M$_{\star} \sim$\,10$^{7}$M$_{\odot}$, which means $\sim$1 dex lower than previous studies \citep[e.g.,][]{Henry2013}, thus increasing substantially the number of low-mass galaxies under study, especially at $z$\,$\ga$\,0.5. Despite the relatively large scatter, VUDS EELGs appear to follow the LZR and MZR of more luminous and massive SFDGs. In particular, we find most EELGs in broad agreement with the local ($z<0.3$) LZR of \citet{Guseva2009} and MZR of \citet{Andrews2013}, which have been derived from galaxies with {$T_e$-based metallicities. There is nevertheless a tendency for EELGs with larger EWs to be more metal-poor at a given luminosity, stellar mass, and redshift.} These galaxies are those with the highest sSFR, i.e., those with the largest deviations from the main sequence of star formation at a given $z$, shown in Fig.~\ref{M-SFR}. While they follow more reliably the LZR traced by extremely metal-poor galaxies \citep[e.g.,][]{Kewley2007,Hu2009}, they tend to lie below the local MZR, similarly to other extreme galaxies \citep[see, e.g., the \textit{green peas}][]{Amorin2010}. Part of the above apparent dependence of the MZR on SFR can be explained in terms of the fundamental metallicity relation \citep[FMR;][]{Mannucci2010}, which suggests that galaxies with higher sSFR tend to be more metal-poor at a given stellar mass. As shown in Fig.~\ref{MZR}, the position of the VUDS EELGs appears broadly consistent with the extrapolation to low masses of the FMR, independently of the parametrization and metallicity scale adopted. We notice, however, that the scatter in the FMR for EELGs ($\sigma \sim$\,0.20) seems slightly larger than expected for magnitude-selected samples in the local universe. Overall, the above results are consistent with a picture where the most extreme SFDGs are very gas-rich galaxies experiencing an early stage of a galaxy-wide starburst, possibly fed by recent accretion of metal-poor gas \citep[e.g.,][]{Amorin2010,SanchezAlmeida2014}. In this picture, at least part of the scatter in the above scaling relations could be produced by differences in the accretion and star formation histories. Figure~\ref{MZR} also suggests that the shape of the MZR can be very sensitive to selection effects in its very low-mass end. Gas-rich dwarfs with prominent emission lines, enhanced sSFR and low metallicities may be overrepresented with respect to the global population of SFDGs in magnitude-selected spectroscopic samples at these redshifts, making the shape of the MZR at low mass not entirely representative of main sequence galaxies. Clearly, a thorough study using the deepest spectroscopy available for a statistical significant complete sample of SFDGs is much needed to test this hypothesis. Forthcoming analysis of VUDS galaxies at $z<1$ using the complete database will enable us to scrutinize in detail the underexplored low-mass universe at $z<1$. \begin{acknowledgements} We thank the anonymous referee for helpful comments that helped to improve this manuscript. We thank the ESO staff for their continuous support for the VUDS survey, particularly the Paranal staff conducting the observations and Marina Rejkuba and the ESO user support group in Garching. This work is supported by funding from the European Research Council Advanced Grant ERC-2010-AdG-268107-EARLY and by INAF Grants PRIN 2010, PRIN 2012 and PICS 2013. RA and AF acknowledge the FP7 SPACE project “ASTRODEEP” (Ref.No: 312725), supported by the European Commission. AC, OC, MT and VS acknowledge the grant MIUR PRIN 2010--2011. DM gratefully acknowledges LAM hospitality during the initial phases of the project. This work is based on data products made available at the CESAM data center, Laboratoire d'Astrophysique de Marseille. This work partly uses observations obtained with MegaPrime/MegaCam, a joint project of CFHT and CEA/DAPNIA, at the Canada-France-Hawaii Telescope (CFHT) which is operated by the National Research Council (NRC) of Canada, the Institut National des Sciences de l'Univers of the Centre National de la Recherche Scientifique (CNRS) of France, and the University of Hawaii. This work is based in part on data products produced at TERAPIX and the Canadian Astronomy Data Centre as part of the Canada-France-Hawaii Telescope Legacy Survey, a collaborative project of NRC and CNRS. \end{acknowledgements} \bibliographystyle{aa}
1,108,101,565,870
arxiv
\section{Introduction} Driver inattention is a dangerous phenomenon that can arise because of various reasons: distractions, drowsiness due to fatigue, less reaction time due to speeding, and intoxication. The consequences of inattentive driving can severely affect the driver's safety even under normal road conditions, and can be devastating in terms of life-loss and/or long-lasting injuries. According to NHTSA's latest revelations \cite{NHTSA}, in 2019, 3142 lives were claimed by distracted driving, 795 lives were claimed by drowsy driving, 9378 deaths were due to speeding, and 10,142 deaths were due to drunk driving, all in the United States alone. Therefore, several types of driver-assist systems have been developed and deployed in modern vehicles to mitigate inattentiveness. However, traditional driver-assist technologies are static and not personalized, which are insufficient to handle the situations in futuristic transportation systems with mostly connected and/or autonomous vehicles. For example, several deadly accidents have been reported where the Tesla driving assistants were working normally but the drivers were inattentive \cite{Tesla, Tesla2}. As per SAE standard J3016 \cite{SAE}, the state-of-the-art vehicles mostly fall under Levels 2/3, which continue to demand significant driver attention (e.g. Tesla autopilot \cite{Tesla3}), especially in uncertain road and weather conditions. Therefore, there is a strong need to design dynamic, data-driven driver-alert systems which present effective interventions in a strategic manner based on its estimates of the driver's attention level and physical conditions. However, the design of strategic interventions to mitigate the ill effects of driver inattention is quite challenging due to three fundamental reasons. Firstly, the \emph{driver may not follow the vehicle's recommendations} (i) if the driver is inattentive, (ii) if the driver does not trust the vehicle's recommendations, and/or (iii) if the recommendation signal is not accurate enough to steer driver's choices (e.g. the driver may not stop the vehicle because of a false alarm). Secondly, the \emph{persuasive effectiveness of vehicle's recommendations is technically difficult to evaluate} due to its complex/unknown relationship with the driver's (i) attention level \cite{koopman2017autonomous}, (ii) own judgment/prior of road conditions \cite{woide2019methodical}, and (iii) trust on the vehicle's recommendation system \cite{choi2015investigating}. In addition, it is difficult to mathematically model and estimate these three terms \cite{nishigaki2019driver,zyner2017long,abe2006alarm}. Finally, there is strong evidence within the psychology literature that \emph{human decisions exhibit several anomalies to traditional decision theory}. Examples include deviations from expected utility maximization such as Allais paradox \cite{tversky1992advances}, Ellsberg paradox \cite{ellsberg1961risk}, violations of transitivity and/or independence between alternatives \cite{busemeyer1993decision}; and deviations from classical Kolmogorov probability theory such as conjunction fallacy \cite{franco2009conjunction}, disjunction fallacy \cite{young2007potential}, and violation of sure thing principle \cite{khrennikov2009quantum}. There have been a few relevant efforts in the recent literature where both the driver and the driver-assist system interact in a game theoretic setting. These efforts can be broadly classified into two types: (i) the \emph{direct method} where the system uses its on-board AI to directly control the vehicle, and (ii) the \emph{indirect method} where the system indirectly controls the vehicle via relying on the driver to make decisions. On the one hand, Flad \emph{et al.} proposed a direct method that models driver steering motion as a sequence of motion primitives so that the aims and steering actions of the driver can be predicted and then the optimal torque can be calculated \cite{7929390}. Another example that proposes a direct method is by Na and Cole in \cite{na2014game}, where four different paradigms were investigated: (i) decentralized, (ii) non-cooperative Nash, (iii) non-cooperative Stackelberg, and (iv) cooperative Pareto, to determine the most effective method to model driver reactions in collision avoidance systems. Although direct methods can mimic driver actions, they certainly do not consider the driver's cognition state (in terms of preferences, biases and attention) and no intervention was designed/implemented to mitigate inattention. On the other hand, indirect methods have bridged this gap via considering driver's cognition state into account. Lutes \emph{et al.} modeled driver-vehicle interaction as a Bayesian Stackelberg game, where the on-board AI in the vehicle (leader) presents binary signals (no-alert/alert) based on which the driver (follower) makes a binary decision (continue/stop) regarding controlling the vehicle on a road \cite{lutes2020perfect}. This work and \cite{lutes2020perfect} share the same setting of unknown road condition and binary actions of two players, and also introduce a non-negative exponent parameter in the overall driver’s utility to capture his/her level of attention. The difference is that \cite{lutes2020perfect} still follows the traditional game theory framework of maximizing payoffs while this work extends the traditional framework in which the players do not necessarily maximize payoffs. Schwarting \emph{et al.} integrated Social Value Orientation (SVO) into autonomous-vehicle decision making. Their model quantifies the degree of an agent’s selfishness or altruism in order to predict the social behavior of other drivers. They modeled interactions between agents as a best-response game wherein each agent negotiates to maximize their own utilities \cite{schwarting2019social}. However, all the human players in the game of the above research are still assumed to be rational players maximizing utilities, even though the utilities are modified to capture attention level or social behavior, whether by a non-negative exponent parameter or by SVO. The present work bridges this gap by directly considering the driver as an agent who does not seek to maximize payoff, but instead uses a quantum-cognition based decision process to make decisions. Note that most of the past literature focused on addressing each of these challenges independently. The main contribution of this paper is that we address all the three challenges jointly in our driver-vehicle interaction setting. In Section \ref{section:formulation}, we propose a novel strategic driver-vehicle interaction framework where all the aforementioned challenges are simultaneously addressed in a novel game-theoretic setting. We assume that the vehicle constructs recommendations so as to balance a prescribed trade-off between information accuracy and persuasive effectiveness. On the other hand, we model driver decisions using an open quantum cognition model that considers driver attention as model parameter and incorporates the driver prior regarding road condition into the initial state. In Section \ref{section:lindblad}, we present a closed-form expression for the cognition matrix in the driver's open quantum cognition model. Given that the agent rationalities are fundamentally different from each other (vehicle being a utility-maximizer, and driver following an open quantum cognition model), we also propose a novel equilibrium notion, inspired by Nash equilibrium, and compute both pure and mixed equilibrium strategies for the proposed driver-vehicle interaction game in Sections \ref{section:pure} and \ref{section:mix} respectively. Finally, we analyze the impact of driver inattention on the equilibrium of the proposed game. \section{Strategic Driver-Vehicle Interaction Model} \label{section:formulation} In this section, we model the strategic interaction between a driver-assist system (car) and an inattentive driver as a one-shot Bayesian game. We assuming that the physical conditions of the road are classified into two states, namely, \emph{safe} (denoted as $S$) and \emph{dangerous} (denoted as $D$). The vehicle can choose one of the two signaling strategies: alert the driver (denoted as $A$), or no-alert (denoted as $N$) based on its belief about the road state. Meanwhile, based on the driver's belief about the road state and his/her own mental state (which defines driver's type), the driver chooses to either \emph{continue} driving (denoted as $C$), or \emph{stop} the vehicle (denoted as $S$). Note that although the letter $S$ is used to denote both road state being safe and driver decision being stop, the reader can easily decipher the notation's true meaning from context. Depending on the true road state, we assume that the vehicle (row player) and the driver (column player) obtain utilities as defined in Table \ref{tab:tab1}. When the road is dangerous, we expect the car to alert the driver. If the car does not alert, it will get a low payoff. Furthermore, we assume this low payoff depends on the driver's action. If the driver stops, the payoff is only slightly low because no damage or injury is incurred. If the driver continues to drive, the payoff is very low because damage or injury is incurred. When the road is safe, the correct action for the car is not to alert. If the car does not alert, it will get a high payoff. This high payoff depends on the driver's action. If the driver stops, the payoff is only slightly high because it does not help the driver and an unnecessary stop is waste of time and energy. If the driver continues to drive, the reward is very high because everything is fine. \begin{table}[htbp] \centering \begin{tabular}{r l} \begin{tabular}{r|c|c|} \multicolumn{1}{r}{} & \multicolumn{1}{c}{C} & \multicolumn{1}{c}{S} \\ \cline{2-3} N & $a_{1,s}$, $a_{2,s}$ & $b_{1,s}$, $b_{2,s}$ \\ \cline{2-3} A & $c_{1,s}$, $c_{2,s}$ & $d_{1,s}$, $d_{2,s}$ \\ \cline{2-3} \end{tabular} & \begin{tabular}{ r|c|c| } \multicolumn{1}{r}{} & \multicolumn{1}{c}{C} & \multicolumn{1}{c}{S} \\ \cline{2-3} N & $a_{1,d}$, $a_{2,d}$ & $b_{1,d}$, $b_{2,d}$ \\ \cline{2-3} A & $c_{1,d}$, $c_{2,d}$ & $d_{1,d}$, $d_{2,d}$ \\ \cline{2-3} \end{tabular} \end{tabular} \caption{Utilities of the car and the driver when the road is safe (left) and dangerous (right)} \label{tab:tab1} \end{table} In this paper, we assume that both the car and the driver does not know the true road state. While the car relies on its observations from on-board sensors and other extrinsic information sources (e.g. nearby vehicles, road-side infrastructure) and its on-board machine learning algorithm for road judgment to construct its belief $q \in [0,1]$ regarding the road state being safe, we assume that the driver constructs a belief $p \in [0,1]$ regarding the road state being safe based on what he/she sees and his/her prior experiences. Furthermore, as in the case of a traditional decision-theoretic agent, we assume that the car seeks to maximize its expected payoff. If $p_C$ is the probability with which the driver chooses $C$, then the expected payoff for choosing $N$ and $A$ at the car are respectively given by \begin{equation} \begin{array}{lcl} U_N(p_C) & = & p_C \Big[ a_{1, s} q + (1-q) a_{1, d} \Big] \\[1.5ex] && \qquad + \ (1- p_C) \Big[ b_{1, s} q + (1-q) b_{1, d} \Big] \end{array} \label{Eqn: Car's Utilities - N} \end{equation} and \begin{equation} \begin{array}{lcl} U_A(p_C) & = & p_C \Big[ c_{1, s}q + (1-q) c_{1, d} \Big] \\[1.5ex] && \qquad + \ (1- p_C) \Big[ d_{1, s}q + (1-q) d_{1, d} \Big]. \end{array} \label{Eqn: Car's Utilities - A} \end{equation} The calculation of $p_C$ is complicated by the fact that the driver exhibits bounded rationality. Fortunately, the bounded rationality can be characterized by the open quantum system cognition model, as described below. \subsection{Driver's Open Quantum Cognition Model} \label{subsec: oq} In this subsection, we present the basic elements of the open quantum system cognition model \cite{martinez2016quantum}, and how it is applied to model driver behavior. The cognitive state of the agent is described by a mixed state or density matrix $\rho$, which is a statistical mixture of pure states. Formally it is a Hermitian, non-negative operator, whose trace is equal to one. Under the Markov assumption (the evolution $\mathcal{E}$ can be factorized as $\mathcal{E}_{t_2,t_0}=\mathcal{E}_{t_2,t_1}\mathcal{E}_{t_1,t_0}$ given a sequence of instants $t_0$, $t_1$, $t_2$), one can find the most general form of this time evolution based on a time local master equation $d\rho/dt=\mathcal{L}[\rho]$, with $\mathcal{L}$ a differential superoperator (it acts over operators) called Lindbladian, which is defined as follows. \begin{defn}[\cite{rivas2012open}] The Lindblad-Kossakowski equation for any open quantum system is defined as \begin{equation}\label{eq:lindblad} \begin{array}{l} \displaystyle \frac{d\rho}{dt} = -i(1-\alpha)[H,\rho] + \displaystyle \alpha\sum_{m,n} \gamma_{m,n} \Big[ L_{m,n}\rho L^\dagger_{m,n} \\[1ex] \displaystyle \qquad \qquad \qquad \qquad \qquad \qquad -\frac{1}{2}\left\{L^\dagger_{m,n}L_{m,n}, \ \rho \right\} \Big], \end{array} \end{equation} where \begin{itemize} \setlength{\itemsep}{1ex} \item $H$ is the Hamiltonian of the system, \item $[H,\rho] = H\rho-\rho H $ is the commutation operation between the Hamiltonian $H$ and the density operator $\rho$, \item $\gamma_{m,n}$ are $(m,n)^{th}$ entry of some positive semidefinite matrix (denoted as $C$), \item $L_{m,n}$ is a set of linear operators, \item $\left\{ L^\dagger_{m,n}L_{m,n}, \ \rho \right\} = L^\dagger_{m,n}L_{m,n}\rho+\rho L^\dagger_{m,n}L_{m,n}$ denotes the anticommutator. The superscript $\dagger$ represents the adjoint (transpose and complex conjugate) operation. \end{itemize} \label{Defn: Lindblad-Kossakowski} \end{defn} In this paper, we set \begin{equation} L_{(m,n)}=\ket{m}\bra{n} \end{equation} as defined in \cite{martinez2016quantum}, where, for any $m$, $\ket{m}$ is a column vector whose $m$th entry is 1 and the other entries are 0. Note that $\bra{n}$ is obtained by transposing $\ket{n}$ and then taking its complex conjugate. Thus $\bra{n}$ is a row vector whose $n$th entry is 1 and 0 otherwise. The second term on the right side of Equation \eqref{eq:lindblad} contains the dissipative term responsible for the irreversibility in the decision-making process \cite{martinez2016quantum}, weighted by the coefficient $\alpha$ such that the parameter $\alpha\in [0,1]$ interpolates between the von Neumann evolution $(\alpha=0)$ and the completely dissipative dynamics $(\alpha=1)$. Furthermore, the term $\gamma_{(m,n)}$ is the $(m,n)$-th entry in the cognitive matrix $C(\lambda,\phi)$. This cognitive matrix $C(\lambda,\phi)$ is formalized as the linear combination of two matrices $\Pi(\lambda)$ and $B$, which are associated to the profitability comparison between alternatives and the formation of beliefs, respectively \cite{martinez2016quantum}: \begin{equation}\label{eq:cognition matrix} \begin{array}{lcl} C(\lambda,\phi) & = & \begin{bmatrix} \gamma_{(1,1)} & \cdots & \gamma_{(1,N)} \\ \vdots & \ddots & \vdots \\ \gamma_{(N,1)} & \cdots & \gamma_{(N,N)} \end{bmatrix} \\[6ex] & = & (1-\phi) \cdot \Pi^T(\lambda) \ + \ \phi \cdot B^T, \end{array} \end{equation} where $\phi\in[0,1]$ is a parameter assessing the relevance of the formation of beliefs during the decision-making process, $\Pi(\lambda)$ is the transition matrix where $(i,j)$-th entry $\pi_{ij}(\omega_l)$ is the probability that the decision maker switches from strategy $s_i$ to $s_j$ for a given state of the world $\omega_l$, and $B$ matrix allows the driver to introduce a change of belief about the state of the world in the cognitive process by jumping from one connected component associated to a particular state of the world $\omega_k\in\Omega$ to the connected component associated to another one $\omega_l\in\Omega$, while keeping the action $s_i$ fixed. The superscript $T$ denotes the transpose matrix. Finally, the dimension of the square matrix $C(\lambda,\phi)$, i.e. $N$, can be inferred from the detailed discussion given below. \begin{figure*}[!t] \centering \begin{equation} \begin{array}{lcl} \Pi(\lambda) & = & \quad \begin{bmatrix} \mu(\lambda) & 1-\mu(\lambda) & 0 & 0 & 0 & 0 & 0 & 0 \\ \mu(\lambda) & 1-\mu(\lambda) & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & \nu(\lambda) & 1-\nu(\lambda) & 0 & 0 & 0 & 0 \\ 0 & 0 & \nu(\lambda) & 1-\nu(\lambda) & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & \xi(\lambda) & 1-\xi(\lambda) & 0 & 0 \\ 0 & 0 & 0 & 0 & \xi(\lambda) & 1-\xi(\lambda) & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & o(\lambda) & 1-o(\lambda) \\ 0 & 0 & 0 & 0 & 0 & 0 & o(\lambda) & 1-o(\lambda) \end{bmatrix}. \end{array} \label{eq:PI} \end{equation} \end{figure*} At the driver, the world state primarily consists of two components: (i) the road condition, and (ii) the car's action, i.e., the set of world states of the driver is $\Omega=\{SN, SA, DN, DA\}$ where the first letter represents road condition and the second letter represents car action. The utilities of the driver for choosing a strategy at a world state are as follows: \begin{equation*} \begin{array}{lclclcl} u(C|SN) & = & a_{2,s}, & \ & u(S|SN) & = & b_{2,s}, \\[1ex] u(S|SA) & = & c_{2,s}, & \ & u(S|SA) & = & d_{2,s}, \\[1ex] u(C|DN) & = & a_{2,d}, & \ & u(S|DN) & = & b_{2,d}, \\[1ex] u(S|DA) & = & c_{2,d}, & \ & u(S|DA) & = & d_{2,d}. \end{array} \end{equation*} We choose the basis of the road-car-driver system spanning the space of states to be \begin{equation} \begin{array}{l} \Big\{ \ket{e_1},\ket{e_2},\ket{e_3},\ket{e_4},\ket{e_5},\ket{e_6},\ket{e_7},\ket{e_8} \Big\} \\[2ex] = \Big\{ \ket{SNC}, \ \ket{SNS}, \ \ket{SAC}, \ \ket{SAS}, \ \ket{DNC}, \ \ket{DNS}, \\[1ex] \qquad \qquad \qquad \ket{DAC}, \ \ket{DAS} \Big\}. \end{array} \label{Eqn: Basis States} \end{equation} Next we define the transition matrix $\Pi(\lambda)$. If the utility of the decision maker by choosing strategy $s_i$ at the world state of $\omega_l$ is $u(s_i|\omega_l)$, the transition probability that the decision maker would switch to strategy $s_i$ at time step $k+1$ from strategy $s_j$ at time step $k$ is given in the spirit of Luce’s choice axiom \cite{luce1977choice,luce2012individual,yellott1977relationship}: \begin{equation} \label{eq:luce} \pi_{(s_j \rightarrow s_i |\omega_l)} = P(s_i|s_j,\omega_l)=\frac{u(s_i|\omega_l)^\lambda}{\displaystyle \sum_{j=1}^{N_S}u(s_j|\omega_l)^\lambda}, \end{equation} where the exponent $\lambda\geq 0$ measures the decision maker’s ability to discriminate the profitability among the different options. When $\lambda=0$, each strategy $s_i\in S$ has the same probability of being chosen ($1/N_S$), and when $\lambda\rightarrow\infty$ only the dominant alternative is chosen. There are two implications in this formulation of $P(s_i|s_j,\omega_l)$: (1) $u(s_i|\omega_l)\geq 0$ to avoid negative $P(s_i|s_j,\omega_l)$; (2) $P(s_i|s_j,\omega_l)$ only depends on the destination $s_i$ and does not depend on the starting point $s_j$. Below are the probabilities needed for the $\Pi$ matrix: \begin{equation*} \begin{array}{lr} \mu(\lambda) = \displaystyle \frac{a^\lambda_{2,s}}{a^\lambda_{2,s}+b^\lambda_{2,s}}, & \nu(\lambda) = \displaystyle \frac{c^\lambda_{2,s}}{c^\lambda_{2,s}+d^\lambda_{2,s}}, \\[2ex] \xi(\lambda) = \displaystyle \frac{a^\lambda_{2,d}}{a^\lambda_{2,d}+b^\lambda_{2,d}}, & o(\lambda) = \displaystyle \frac{c^\lambda_{2,d}}{c^\lambda_{2,d}+d^\lambda_{2,d}}, \end{array} \end{equation*} where \begin{itemize} \setlength{\itemsep}{1ex} \item $\mu(\lambda)$ is the probability that driver picks $C$ when he/she assumes that road state is $S$ and the car chooses $N$, \item $\nu(\lambda)$ is the probability that driver will pick $C$ when he/she assumes that road state is $S$ and the car chooses $A$, \item $\xi(\lambda)$ is the probability that driver will pick $C$ when he/she assumes that road state is $D$ and the car chooses $N$, \item $o(\lambda)$ is the probability that driver will pick $C$ when he/she assumes that road state is $D$ and the car chooses $A$. \end{itemize} Equation \eqref{eq:PI} puts all the terms together in a matrix form and demonstrates the physical meaning of the row and column labels in $\Pi(\lambda)$. The $H$ matrix in Equation \eqref{eq:lindblad} is set as in \cite{martinez2016quantum}. When the elements of the $\Pi(\lambda)$ is nonzero, the elements of $H$ in the same position is 1; Otherwise it is zero. Thus, the $H$ matrix is \begin{equation}\label{eq:H} \begin{array}{lcl} H & = & \begin{bmatrix} 1 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\ 1 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 1 & 1 & 0 & 0 & 0 & 0 \\ 0 & 0 & 1 & 1 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 1 & 1 & 0 & 0 \\ 0 & 0 & 0 & 0 & 1 & 1 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 1 & 1 \\ 0 & 0 & 0 & 0 & 0 & 0 & 1 & 1 \end{bmatrix} \\[11ex] & = & \begin{bmatrix} 1 & 1 \\1 & 1\end{bmatrix}\oplus\begin{bmatrix} 1 & 1 \\1 & 1\end{bmatrix}\oplus\begin{bmatrix} 1 & 1 \\1 & 1\end{bmatrix}\oplus\begin{bmatrix} 1 & 1 \\1 & 1\end{bmatrix}. \end{array} \end{equation} In this paper, we set $\phi=0$ for the following two reasons: (1) Since the world state of the driver is mainly the action of the car and the action of the car is known when calculating the equilibrium, the driver does not need to form such a belief; (2) We are considering a one-shot game and we can assume the road condition does not change in one game, i.e., we are only considering short-time dynamic. The $B$ matrix is zeroed out and its content is not described here. Thus $C=\Pi$ and we set $\gamma_{m,n}=C_{m,n}$ in Equation \eqref{eq:lindblad}. \subsection{Pure and Mixed Strategy Equilibria} \label{section:eq} For the sake of simplicity, let us denote the car as Agent 1, and the driver as Agent 2 without any loss of generality. Since the car seeks to maximize its expected payoff given that the driver chooses a strategy $s_2 \in \{C,S\}$, it is natural that the car's final response $FR_1(s_2)$ is its best response that maximizes its expected payoff given in Equations \eqref{Eqn: Car's Utilities - N} and \eqref{Eqn: Car's Utilities - A}, i.e., $$FR_1(s_2) = BR_1(s_2) \left( \triangleq \displaystyle \max_{s_1 \in \{ N,A \} } U_{s_1}(s_2) \right).$$ On the contrary, driver's decisions are governed by the open quantum system model. If we denote the steady-state solution of Equation \eqref{eq:lindblad} as $OQ_{pure}(s_1;\alpha,\lambda)$ for a given car's strategy $s_1\in\{A,N\}$, the final response of the driver is defined as $$FR_2(s_1)=OQ_{pure}(s_1;\alpha,\lambda),$$ where $\alpha$ and $\lambda$ are driver's model parameters in Equations \eqref{eq:lindblad} and \eqref{eq:luce} respectively. Then the (pure-strategy) equilibrium of this game is defined as follows. \begin{defn} A strategy profile $(s_1^*,s_2^*)\in\{A,N\}\times\{C,S\}$ is a \textbf{pure strategy equilibrium} if and only if $s_1^*=BR_1(s_2^*)$ and $s_2^*=OQ_{pure}(s_1^*;\alpha,\lambda)$. \label{Defn: Pure Strategy Equilibrium} \end{defn} On the contrary, the concept of mixed strategy equilibrium is actually more natural to the open-quantum-system model since the solution tells the probability of taking various actions instead of indicating a particular action. The open quantum system model directly gives a mixed strategy. Let the mixed strategy of the driver is denoted as $\sigma_2 = (p_C,1-p_C)$ where $p_C$ is the probability that the driver chooses to continue. Similarly, let the car's mixed strategy be denoted as $\sigma_1 = (p_A, 1-p_A)$, where $p_A$ is the probability that the car chooses to alert. Then, a mixed strategy profile is denoted as $(\sigma_1, \sigma_2)$. In such a mixed strategy setting, the car's final response is its best mixed-strategy response, i.e. $$FR_1(\sigma_2)=BR_1(\sigma_2).$$ Similarly, the final response of the driver is obtained from the steady-state solution of Eq. \ref{eq:lindblad}, i.e. $$FR_2(\sigma_1)=OQ_{mix}(\sigma_1;\alpha,\lambda).$$ Then the mixed-strategy equilibrium of this game is defined as follows. \begin{defn} A strategy profile $(\sigma_1^*,\sigma_2^*)$ is an \textbf{mixed-strategy equilibrium} if and only if $\sigma_1^* = BR_1(\sigma_2^*)$ and $\sigma_2^* = OQ_{mix}(\sigma_1^*;\alpha,\lambda)$. \label{Defn: Mixed Strategy Equilibrium} \end{defn} \begin{figure}[!t] \centering \includegraphics[width=0.49\textwidth]{illustrate.png} \caption{Illustration of the car-driver interaction game} \label{Fig: illustrate} \end{figure} Note that the above equilibrium notions presented in Definitions \ref{Defn: Pure Strategy Equilibrium} and \ref{Defn: Mixed Strategy Equilibrium} are novel and different from traditional equilibrium notions in game theory. This is because our game comprises of two different players: (i) the car modeled as an expected utility maximizer, and (ii) the driver modeled using open quantum cognition equation, as is illustrated in Figure \ref{Fig: illustrate}. However, our equilibrium notions are both inspired from the traditional definition of Nash equilibrium, and are defined using players' final responses as opposed to best responses in the Nash sense. By doing so, we can easily expand traditional equilibrium notions to any strategic setting where heterogeneous entities interact in a competitive manner. \section{Driver's Final Response} \label{section:lindblad} Note that the dependent variable $\rho$ in Equation \eqref{eq:lindblad} is a matrix. In order to obtain the analytical solution, we vectorize $\rho$ by stacking its columns one on another to obtain vector $\vv{\rho}$. Thus, the vectorized version of Definition \ref{Defn: Lindblad-Kossakowski} is as follows. \begin{defn} The vectorized form for Lindblad-Kossakowski equation is given by \begin{equation} \label{eq:vectorize} \displaystyle \frac{d\vv{\rho}}{dt} = \left[ -i(1-\alpha)\vv{H}+\alpha\vv{L} \right] \vv{\rho}, \end{equation} where $I_N$ is the $N\times N$ identity matrix, \begin{equation}\label{eq:vecH} \vv{H} = H\otimes I_N-I_N\otimes H^T, \end{equation} \begin{equation} \vv{L} = \displaystyle \sum_{m,n} \gamma_{m,n}\Lambda_{m,n} \end{equation} \begin{equation}\label{eq:Lambda} \Lambda_{m,n} = L_{m,n}\otimes L^*_{m,n} - \Phi_{m,n}, \end{equation} \begin{equation}\label{eq:Phi} \Phi_{m,n} = \displaystyle \frac{1}{2} \Big( L^\dagger_{m,n}L_{m,n}\otimes I_N +I_N\otimes(L^\dagger_{m,n}L_{m,n})^* \Big), \end{equation} with the superscript * representing taking the complex conjugate of all entries. \label{Defn: Lindblad-Kossakowski vectorized} \end{defn} In the driver-car game presented in Section \ref{section:formulation}, note that we have $N=8$ basis states as stated in Equation \eqref{Eqn: Basis States}. We will first derive the sparse structure of $\vv{H}$ in Lemma \ref{Lemma: Lemma 1}. Note that the symbol $\oplus$ means direct-sum while the symbol $\otimes$ means tensor-product. The following two simple examples show their difference. \begin{equation*} \begin{bmatrix} a & b \\ c & d \end{bmatrix} \oplus \begin{bmatrix} e & f \\ g & h \end{bmatrix} = \begin{bmatrix} a & b & 0 & 0 \\ c & d & 0 & 0 \\ 0 & 0 & e & f \\ 0 & 0 & g & h \end{bmatrix} \end{equation*} \begin{equation*} \begin{bmatrix} a & b \\ c & d \end{bmatrix} \otimes \begin{bmatrix} e & f \\ g & h \end{bmatrix} = \begin{bmatrix} ae & af & be & bf \\ ag & ah & bg & bh \\ ce & cf & de & df \\ cg & ch & dg & dh \end{bmatrix} \end{equation*} \begin{lma} If the Hamiltonian $H$ of the 8-dimensinoal Lindblad-Kossakowski equation is defined as \begin{equation*} H = \begin{bmatrix} 1 & 1 \\1 & 1\end{bmatrix}\oplus\begin{bmatrix} 1 & 1 \\1 & 1\end{bmatrix}\oplus\begin{bmatrix} 1 & 1 \\1 & 1\end{bmatrix}\oplus\begin{bmatrix} 1 & 1 \\1 & 1\end{bmatrix}, \end{equation*} then its vectorized form $\vv{H}$ is given by \begin{equation} \vv{H}=J\oplus J\oplus J\oplus J, \end{equation} where \begin{equation*} J = \begin{bmatrix} X\oplus X\oplus X\oplus X & I_8 \\ I_8 & X\oplus X\oplus X\oplus X \end{bmatrix} \end{equation*} with $X= \begin{bmatrix} 0 & -1 \\ -1 & 0 \end{bmatrix}$. \label{Lemma: Lemma 1} \end{lma} \begin{proof} By Equation \eqref{eq:vecH}, we only need to calculate $H\otimes I_N$ and $I_N\otimes H^T$. Noting $N=8$, we have \begin{equation*} H\otimes I_N=\begin{bmatrix} I_8 & I_8 \\I_8 & I_8\end{bmatrix}\oplus\begin{bmatrix} I_8 & I_8 \\I_8 & I_8\end{bmatrix}\oplus\begin{bmatrix} I_8 & I_8 \\I_8 & I_8\end{bmatrix}\oplus\begin{bmatrix} I_8 & I_8 \\I_8 & I_8\end{bmatrix} \end{equation*} and \begin{equation*} I_N\otimes H^T=I_N\otimes H=K\oplus K\oplus K\oplus K \end{equation*} where \begin{equation*} K=\begin{bmatrix} \mathbf{1}\oplus\mathbf{1}\oplus\mathbf{1}\oplus\mathbf{1}& 0 \\0 & \mathbf{1}\oplus\mathbf{1}\oplus\mathbf{1}\oplus\mathbf{1}\end{bmatrix} \end{equation*} with \textbf{1} the $2\times 2$ matrix whose elements are all 1. Subtracting $I_N\otimes H^T$ from $H\otimes I_N$ blockwise then leads to the claimed $J$. \end{proof} \begin{remark} \normalfont The condition of Lemma 1 is just setting the Hamiltonian of the Lindblad-Kossakowski equation as in Equation \eqref{eq:H}. $\vv{H}$ is a sparse block diagonal matrix with four blocks, each being $J$. $J$ is a sparse matrix consists of four blocks where the off-diagonal blocks are identity matrices and the diagonal matrices are again block diagonal matrices. Such a special structure results from stacking the columns of the all-one matrices. \end{remark} Theorem 1 presents the special sparse structure of $\vv{L}$. To prove Theorem 1, Lemma \ref{Lemma: Lemma 2} is needed. Lemma \ref{Lemma: Lemma 2} gives the sparse structure of $\Lambda_{m,n}$. \begin{lma}\label{Lemma: Lemma 2} The $(M,N)^{th}$ entry of the matrix $\Lambda_{m,n}$ with $m \neq n$ is given by \begin{equation} \Lambda_{m,n}(M,N) = \begin{cases} -\displaystyle \frac{1}{2}, & \text{ if } M = N = 8(n-1)+k \\ & \text{ or } M = N = 8(k-1)+n, \\ & k \in \{1, 2, \cdots, 8 \} \setminus \{n\} \\[2ex] -1 & \text{ if } M = N = 9n-8 \\[2ex] 1 & \text{ if } M = 9m-8, N = 9n-8 \\[2ex] 0 & \text{ otherwise} \end{cases} \end{equation} The $(M,N)^{th}$ entry of the matrix $\Lambda_{m,n}$ with $m=n$ is given by \begin{equation} \Lambda_{m,n}(M,N) = \begin{cases} -\displaystyle \frac{1}{2}, & \text{ if } M = N = 8(n-1)+k \\ & \text{ or } M = N = 8(k-1)+n, \\ & k \in \{1, 2, \cdots, 8 \} \setminus \{n\} \\[2ex] 0 & \text{ otherwise} \end{cases} \end{equation} \end{lma} \begin{proof} $L_{m,n}=\ket{m}\bra{n}$ is a real matrix, so $L^*_{m,n}=L_{m,n}$ and $L^\dagger_{m,n}=L^T_{m,n}$. Since only the $(m,n)$ entry of $L_{m,n}$ is 1 and the others are 0, $L_{m,n}\otimes L^*_{m,n}$ is a $64\times 64$ matrix with all entries zero except the $(8(m-1)+m,8(n-1)+n)$ entry, which is 1. Note that $m$ and $n$ range from 1 to 8. Since the $(n, n)$ entry of $L^\dagger_{m,n}L_{m,n}$ is 1 and the other entries are 0, $L^\dagger_{m,n}L_{m,n}\otimes I_8$ is a 64$\times$64 matrix whose entries are all zero except the $[8(n-1)+1]$th to the 8$n$th diagonal entries (which are 1), and $I_8\otimes(L^\dagger_{m,n}L_{m,n})^*$ is a 64$\times$64 matrix whose entries are all zero except the $(M,M)$ entries (which are 1) with $M=8(k-1)+n$, $k=1,2,...,8$ for each fixed $n$. Thus by Equation \eqref{eq:Phi}, $\Phi_{m,n}$ is a 64$\times$64 matrix whose entries are all zero except the $(M,M)$ entries with $M=8(n-1)+k$ or $M=8(k-1)+n$, $k=1,2,...,8$ for each fixed $n$. The $(M,M)$ entries are 1/2 when $8(n-1)+k\neq 8(k-1)+n$ and is 1 when $8(n-1)+k=8(k-1)+n$. By Equation \eqref{eq:Lambda}, subtracting $\Phi_{m,n}$ from $L_{m,n}\otimes L^*_{m,n}$ leads to the claimed result: When $m\neq n$, there is no cancellation of nonzero entries between $\Phi_{m,n}$ and $L_{m,n}\otimes L^*_{m,n}$. When $m=n$, only the $(9n-8,9n-8)$ entry of $L_{m,n}\otimes L^*_{m,n}$ is nonzero (which is 1). The $(9n-8,9n-8)$ entry of $\Phi_{m,n}$ also 1. Thus the resultant only has 14 nonzero entries. \end{proof} \begin{remark} \normalfont Note that $\Lambda_{m,n}$ does not mean the $(m,n)$ entry of $\Lambda$. $\Lambda_{m,n}$ is itself a matrix. There are 64 such matrices and they will be weighed by $\gamma_{m,n}$ and summed. Then $(M,N)$ entry of $\Lambda_{m,n}$ depend on $m$, $n$, $M$, and $N$. $\Lambda_{m,n}$ is very sparse. The nonzero entries can only take $\pm1$ and $-1/2$ since the building blocks $L_{m,n}$ and $I_N$ only has 1 as nonzero entry value. Given $m$ and $n$, the $(M,N)$ entries with $M=N=8(n-1)+k$ or $M=N=8(n-1)+k$ are special since either $L_{m,n}\otimes L^*_{m,n}$ or $\Phi_{m,n}$ takes nonzero values at these entries. \end{remark} Next we will multiply the $\Lambda_{m,n}$ obtained in Lemma \ref{Lemma: Lemma 2} with $\gamma_{m,n}$ and sum over all $m$ and $n$ to obtain $\vv{L}$ in Theorem \ref{Theorem: Theorem 1}. \begin{thrm}\label{Theorem: Theorem 1} Let the coefficients $\gamma_{m,n}$ in the 8-dimensinoal Lindblad-Kossakowski equation be the $(m,n)$ entries of the matrix (ref. to Equation \eqref{eq:PI}) \begin{equation*} C = \Pi(\lambda) = M[\mu(\lambda)]\oplus M[\nu(\lambda)]\oplus M[\xi(\lambda)]\oplus M[o(\lambda)], \end{equation*} where $M[a]$ is of the form \begin{equation*} M[a]= \begin{bmatrix} a & a \\ 1-a & 1-a \end{bmatrix}. \end{equation*} Then, the $(M,N)^{th}$ entries of $\vv{L}$ within the vectorized Lindblad-Kossakowski equation (ref. to Def. \ref{Defn: Lindblad-Kossakowski vectorized}) with $M=N$ are given by \begin{equation*} \vv{L}_{M,N} =\begin{cases} -\displaystyle\frac{1}{2} \ (C_{n+1,n}+C_{l+1,l}), & n\neq l, n \text{ is odd} \\[1ex] -\ C_{n+1,n}, & n=l, n \text{ is odd} \\[1ex] -\displaystyle\frac{1}{2} \ (C_{n-1,n}+C_{l-1,l}), & n\neq l, n \text{ is even} \\[1ex] -\ C_{n-1,n}, & n=l, n \text{ is even} \end{cases} \end{equation*} where $n = \lfloor \frac{M-1}{8}+1 \rfloor$ and $l= (M-1) \mod 8 + 1$, and the $(M,N)^{th}$ entries of $\Gamma$ with $M \neq N$ are given by $$ \vv{L}_{M,N} = \begin{cases} C_{n+1,n}, & M = 9n+1, N = 9n-8, n =1,3,5,7 \\[1ex] C_{n-1,n}, & M = 9n-17, N = 9n-8, n =2,4,6,8 \\[1ex] 0, & \text{otherwise} \end{cases} $$ \end{thrm} \begin{proof} Interested readers may refer to Appendix \ref{sec: Proof of theorem 1}. \end{proof} \begin{remark} \normalfont $\vv{L}_{M,N}$ depends on $M$ and $N$. The expression of $\vv{L}_{M,N}$ must consist of entries of $C$. Theorem 1 just reveals explicitly these relations. The entries of $C$ appearing in the expression of $\vv{L}_{M,N}$ are $C_{n,n}$ and $C_{n\pm1,n}$ where $n = \lfloor \frac{M-1}{8}+1 \rfloor$ or $n= (M-1) \mod 8 + 1$. Such relations arise due to vectorization (stacking columns). Dividing by 8 and mode 8 appear since each column to be stacked is 8-dimensional. Despite summation over all $m$ and $n$, at most two entries of $C$ appear in $\vv{L}_{M,N}$ since $C=\Pi(\lambda)$ is itself sparse. \end{remark} Next we will combine the $\vv{H}$ obtained in Lemma \ref{Lemma: Lemma 1} and the $\vv{L}_{m,n}$ obtained in Theorem \ref{Theorem: Theorem 1} to obtain $-i(1-\alpha)\vv{H}+\alpha\vv{L}$ in Corollary \ref{Corollary: Corollary 1}. \begin{cor}\label{Corollary: Corollary 1} If the coefficients $\gamma_{m,n}$ of the 8 dimensional Lindblad-Kossakowski equation is set as the $(m,n)$ entries of \begin{equation*} C=\Pi(\lambda)=M(\mu(\lambda))\oplus M(\nu(\lambda))\oplus M(\xi(\lambda))\oplus M(o(\lambda)), \end{equation*} where $M(a)$ is a matrix in the form of \begin{equation*} M(a)=\begin{bmatrix} a & a \\1-a & 1-a\end{bmatrix}, \end{equation*} then \begin{equation*} -i(1-\alpha)\vv{H}+\alpha\vv{L} = A_1\oplus A_2\oplus A_3\oplus A_4 \end{equation*} where \begin{equation*} A_i=\begin{bmatrix} B_{i1}\oplus B_{i2}\oplus B_{i3}\oplus B_{i4} & -i(1-\alpha)I_8+\alpha E_i \\-i(1-\alpha)I_8+\alpha D_i & B_{i5}\oplus B_{i6}\oplus B_{i7}\oplus B_{i8}\end{bmatrix}. \end{equation*} $D_i$ and $E_i$ are 16$\times$16 matrices. They both have only one nonzero entry. The nonzero entries are taken from the cognition matrix $C$: \begin{equation*} \begin{array}{ccc} D_1(2,1)=C_{2,1}, & E_1(1,2)=C_{1,2}, & D_2(4,3)=C_{4,3}, \\[2ex] E_2(3,4)=C_{3,4}, & D_3(6,5)=C_{6,5}, & E_3(5,6)=C_{5,6}, \\[2ex] D_4(8,7)=C_{8,7}, & E_4(7,8)=C_{7,8}. & \end{array} \end{equation*} The $B_{ij}$'s are 4$\times$4 matrices: \begin{equation*} B_{ii} = \displaystyle F_i-\frac{\alpha}{2}\begin{bmatrix} C_{2i,2i-1}-C_{2i-1,2i-1} & 0 \\0 & C_{2i-1,2i}+C_{2i,2i}\end{bmatrix}, \end{equation*} \begin{equation*} B_{i(i+4)}=G_i-\frac{\alpha}{2}\begin{bmatrix} C_{2i-1,2i-1}+C_{2i,2i-1} & 0 \\0 & C_{2i-1,2i}-C_{2i,2i}\end{bmatrix}, \end{equation*} \begin{equation*} B_{ij}=F_i-\frac{\alpha}{2}\begin{bmatrix} C_{2j-1,2j-1}+C_{2j,2j-1} & 0 \\0 & C_{2j-1,2j}+C_{2j,2j}\end{bmatrix}, \end{equation*} \begin{equation*} B_{i(j+4)}=G_i-\frac{\alpha}{2}\begin{bmatrix} C_{2j-1,2j-1}+C_{2j,2j-1} & 0 \\0 & C_{2j+1,2j}+C_{2j,2j}\end{bmatrix} \end{equation*} for $i\neq j$, $i=1,2,3,4$, $j=1,2,3,4$, where \begin{equation*} F_i=i(1-\alpha)\begin{bmatrix} 0 & 1 \\1 & 0\end{bmatrix}-\frac{\alpha}{2}(C_{2i-1,2i-1}+C_{2i,2i-1})I_2, \end{equation*} \begin{equation*} G_i=i(1-\alpha)\begin{bmatrix} 0 & 1 \\1 & 0\end{bmatrix}-\frac{\alpha}{2}(C_{2i-1,2i}+C_{2i,2i})I_2. \end{equation*} \end{cor} \begin{remark} \normalfont The Lindblad-Kossakowsi equation itself is not a cognition model since its coefficients $\gamma_{m,n}$ are quite general. The open quantum cognition model is built by setting the $\gamma_{m,n}$ as $(m,n)$ entry of the cognition matrix $C$. The condition in Corollary 1 is just setting $\phi=0$ in Equation \eqref{eq:cognition matrix} and using the $\Pi(\lambda)$ prescribed in Equation \eqref{eq:PI}. This is exactly the scenario of the car-driver game. \end{remark} \begin{remark} \normalfont The vectorized operator $-i(1-\alpha)\vv{H}+\alpha\vv{L}$ of the vectorized Lindblad-Kossakowski equation is a block diagonal matrix with four blocks. The four blocks have very similar structures. Each block is actually quite sparse since each block is a block matrix with totally four sub-blocks and the two off-diagonal sub-blocks are almost identity matrix (only one entry is different). \end{remark} \section{Pure-strategy equilibrium} \label{section:pure} The diagonal elements of the steady-state solution $\rho$ of Equation \eqref{eq:lindblad} are just $Pr(SNC), Pr(SNS), \cdots, Pr(DAS)$. Then we can calculate the probability for the driver to continue as \begin{equation}\label{eq:PrCsum} Pr(C) = Pr(SNC) + Pr(SAC) + Pr(DNC) + Pr(DAC). \end{equation} Let $p$ be the probability that the driver judges the road to be safe before knowing the car's action and $U_2$ be the utility function of the driver. In this paper, we model driver's pure strategy $s_2$ as the output of the open quantum cognition model parameters $\alpha$ and $\lambda$ taking the pure strategy of the car $s_1$ as input: \begin{equation} s_2 = OQ_{pure}(s_1;\Theta) = \begin{cases} C, & \text{ if } Pr(C) \geq 0.5, \\[2ex] S, & \text{ if } Pr(C) < 0.5. \end{cases} \end{equation} where $\Theta=(\alpha,\lambda,p,U_2)$ is the parameter tuple of the open quantum model. \begin{remark} \normalfont In this paper, we use $Pr(C)$ in two different ways to obtain pure and mixed strategy equilibria. We obtain a pure strategy at the driver by employing a hard threshold on $Pr(C)$ (in our case, Continue if $Pr(C) \geq 0.5$, Stop otherwise). By treating $Pr(C)$ as the driver's mixed strategy in Section \ref{section:mix}, we will obtain the mixed-strategy equilibrium. \end{remark} We set the initial density matrix as $\rho_0=\ket{\Psi_0}\bra{\Psi_0}$, where \begin{equation*} \ket{\Psi_0}=\sqrt{p/2}(\ket{e_3}+\ket{e_4})+\sqrt{(1-p)/2}(\ket{e_7}+\ket{e_8}) \end{equation*} when the car action is A and \begin{equation*} \ket{\Psi_0}=\sqrt{p/2}(\ket{e_1}+\ket{e_2})+\sqrt{(1-p)/2}(\ket{e_5}+\ket{e_6}) \end{equation*} when the car action is N, with $\ket{e_i}$ prescribed in Subsection \ref{subsec: oq}. The calculation of the generalized pure-strategy equilibrium is similar to that of the Nash equilibrium. We simply replace the best response with the final response. We loop over the car strategies. In the loop, the car strategy is the input of the open quantum model and a driver strategy is the output. If the car strategy is the best response with the outputted driver strategy, then the strategy profile is outputted as pure-strategy equilibrium. Algorithm \ref{algo_pure} lists the procedures of calculating the pure-strategy equilibrium. \begin{figure}[!t] \centering \begin{subfigure}{0.24\textwidth} \centering \includegraphics[width=\textwidth]{priorAlpha2.png} \caption{Driver-Agnostic car} \end{subfigure}% \begin{subfigure}{0.24\textwidth} \centering \includegraphics[width=\textwidth]{pureAlpha2.png} \caption{Driver-Conscient car} \end{subfigure}% \caption{Equilibrium points of the driver-car games with a driver-agnostic car and with a driver-conscient car (i.e., assumes that the driver uses open quantum model with $\lambda= 10, \alpha=0.2$ to make decisions) under various prior beliefs.} \label{fig:pureAlpha} \end{figure} \begin{table}[!t] \centering \begin{tabular}{c} \begin{tabular}{r|c|c|} \multicolumn{1}{r}{} & \multicolumn{1}{c}{C} & \multicolumn{1}{c}{S} \\ \cline{2-3} N & $a_{1,s} = 85$, $a_{2,s} = 85$ & $b_{1,s} = 75$, $b_{2,s} = 50$ \\ \cline{2-3} A & $c_{1,s} = 40$, $c_{2,s} = 85$ & $d_{1,s} = 50$, $d_{2,s} = 50$ \\ \cline{2-3} \end{tabular} \\[5ex] \begin{tabular}{ r|c|c| } \multicolumn{1}{r}{} & \multicolumn{1}{c}{C} & \multicolumn{1}{c}{S} \\ \cline{2-3} N & $a_{1,d} = 25$, $a_{2,d} = 25$ & $b_{1,d} = 30$, $b_{2,d} = 60$ \\ \cline{2-3} A & $c_{1,d} = 75$, $c_{2,d} = 25$ & $d_{1,d} = 85$, $d_{2,d} = 85$ \\ \cline{2-3} \end{tabular} \end{tabular} \caption{Utilities used in our numerical results when the road is safe (above) and dangerous (below)} \label{tab:tab1-sim} \end{table} Furthermore, in our numerical evaluation, we assume the utilities at both the car and the driver as shown in Table \ref{tab:tab1-sim}. In addition to the case of a driver-conscient car, we consider a benchmark case where the car does not care about the driver and makes decisions solely based on its prior, i.e., alert if $q<0.5$ and does not alert if $q\geq0.5$. In this benchmark case, the final response of the car is independent of the driver's strategy. The equilibrium points of the driver-car games with a driver-agnostic car and with a driver-conscient car (driver making decisions according to open quantum model with $\lambda= 10, \alpha=0.2$) under various prior beliefs on road condition are shown in Fig. \ref{fig:pureAlpha}. When both the driver and car are sure of safety, the equilibrium is $(N, C)$. When both the driver and car are sure of danger, the equilibrium is $(A, S)$. When the driver is sure of safety but the car is sure of danger, the equilibrium is $(A, C)$. When the driver is sure of danger but the car is sure of safety, the equilibrium is $(N, S)$. The division line is not $p = q = 0.5$. (S, A) has the largest area. When the car is driver-agnostic, the border between Not Alert and Alert in the equilibrium plot is always $q=0.5$ regardless of the equilibrium strategy of the driver. When the car is driver-conscient, the border between Not Alert and Alert depends on the equilibrium strategy of the driver (or equivalently, road prior of the driver): the border is located close to $q=0.7$ when $p\leq0.50$ and the border is located close to $q=0.52$ when $p\geq0.52$. \IncMargin{1em} \begin{algorithm}[!t] \SetKwData{Left}{left} \SetKwData{This}{this} \SetKwData{Up}{up} \SetKwFunction{Union}{Union}\SetKwFunction{FindCompress}{FindCompress} \SetKwInOut{Input}{input}\SetKwInOut{Output}{output} \Input{Parameters: $\alpha,\lambda$; prior about road safety of the driver: $p$; prior about road safety of the car: $q$; Utility function of the car: $U_1(s_1,s_2,r)$ where $r=S$ means safe and $r=D$ means dangerous; Utilities of the driver: $U_2$.} \Output{Pure-strategy equilibrium: $S^*$} \BlankLine \tcp{Empty set means no equilibrium} $S^*=\emptyset$\; \For{$s_1\;\mathbf{in}\;\{N,A\}$}{ $\Bar{s}_1 = \mathrm{element\;of\;}\{N,A\}\setminus\{s_1\}$\; $s_2 = OQ_{pure}(s_1;\alpha,\lambda)$\; $u = qU_1(s_1,s_2,S)+(1-q)U_1(s_1,s_2,D)$\; $\Bar{u} = qU_1(\Bar{s}_1,s_2,S)+(1-q)U_1(\Bar{s}_1,s_2,D)$\; \If{$u\geq\Bar{u}$}{ $S^*=S^*\cup\{s_1,s_2\}$\; } } \caption{Calculating the pure-strategy equilibrium of the car-driver game} \label{algo_pure} \end{algorithm} \DecMargin{1em} The equilibrium points of the driver-car game with $\lambda$ = 0, 1, 2, 3, 4, 10 and $\alpha$ = 0.8 under various prior beliefs on road condition are shown in Fig. \ref{fig:pureLambda} (C: Continues, S: Stop, A: Alert, N: Not Alert). When $\lambda$ drops from 10 to 4, the border between S and C shifts from left to right. When $\lambda$ drops from 4 to 3, the border between (S, A) and (C, A) shifts from left to right and a region with two equilibrium points appears inside the region of (C, N). The two equilibria are (C, N) and (S, A). When $\lambda$ drops from 4 to 3 and from 3 to 2, the border between (S, A) and (C, A) shifts from left to right and the region with two equilibrium points enlarges with the border shift. When $\lambda$ drops from 2 to 0, the driver can no longer distinguish the utilities. The $(C, N)$ region is merged into the $(S, N)$ region and the two-equilibrium region is merged into the $(S, A)$ region. The border between $(S, A)$ and $(C, A)$ shifts from left to right and a new no-equilibrium region appears inside the previous $(C, N)$ region. \begin{figure*}[htp] \centering \begin{subfigure}{0.25\textwidth} \centering \includegraphics[width=\textwidth]{qiziLamda10.png} \caption{$\lambda=10$} \end{subfigure}% \begin{subfigure}{0.25\textwidth} \centering \includegraphics[width=\textwidth]{qiziLamda3.png} \caption{$\lambda=3$} \end{subfigure}% \begin{subfigure}{0.25\textwidth} \centering \includegraphics[width=\textwidth]{qiziLamda1.png} \caption{$\lambda=1$} \end{subfigure}% \caption {Equilibrium points of the driver-car game with $\alpha=0.8$ under various prior beliefs on road condition} \label{fig:pureLambda} \end{figure*} \begin{remark} \normalfont When $\lambda=0$, the driver cannot distinguish the utilities at all and is completely random, so the concept of final response does not apply. The type of pure-strategy equilibrium strongly aligns with the priors of the driver and the car. The desired equilibria are $(C, N)$ and $(S, A)$, where the driver's action is in harmony with car's action. \end{remark} \begin{remark} \normalfont Since Fig. \ref{fig:pureAlpha} and Fig. \ref{fig:pureLambda} are plotted over $(p,q)$ axes, we can find out which type of equilibrium is most common. With the prescribed utilities, the most common pure-strategy equilibrium is $(S, A)$. This is the most favorable equilibrium, since following the car's recommendation in the dangerous road can save life. \end{remark} \begin{remark} \normalfont As the driver's ability to distinguish utilities weakens ($\lambda$ decreases), $(S, A)$ becomes more likely. This means that the driver follows the car's advice diligently especially when he/she is incapable of making decisions on a dangerous road. \end{remark} \section{Mixed-strategy equilibrium} \label{section:mix} When calculating the mixed-strategy equilibrium, $p$ and $p_A$ appear in the initial state of the open quantum model since the mixed-strategy of the car is completely determined by $p_A$ (ref. to Subsecion \ref{section:eq}). Theorem 2 will give a closed-form expression of $Pr(C)$ by solving the vectorized Lindblad-Kossakowski equation (ref. to Definition \ref{Defn: Lindblad-Kossakowski vectorized}). \begin{figure*} \begin{thrm} Let the initial density matrix be given as $\rho_0=\ket{\Psi_0}\bra{\Psi_0},$ where \begin{equation*} \ket{\Psi_0}=\sqrt{p(1-p_A)/2}(\ket{e_1}+\ket{e_2})+\sqrt{pp_A/2}(\ket{e_3}+\ket{e_4}) +\sqrt{(1-p)(1-p_A)/2}(\ket{e_5}+\ket{e_6}) +\sqrt{(1-p)p_A/2}(\ket{e_7}+\ket{e_8}). \end{equation*} The probability that the driver chooses to continue is \begin{equation*} \displaystyle Pr(C) = \frac{2(1-\alpha)^2}{c} + \frac{\alpha^2}{c}r + rh(t) +\Big(\frac{1}{2}-\frac{2(1-\alpha)^2}{c}\Big)e^{-\alpha t} \cos \left[ 2(1-\alpha)t \right] -\frac{\alpha(1-\alpha)}{c}e^{-\alpha t} \sin \left[ 2(1-\alpha)t \right], \end{equation*} where $c=\alpha^2+4(1-\alpha)^2$, $r = p (1-p_A) C_{1,2}+ p p_A C_{3,4} + (1-p)(1-p_A)C_{5,6} + (1-p)p_A C_{7,8}$, and \begin{equation*} h(t) = \displaystyle \frac{\alpha}{c}e^{-\alpha t} \Big\{ 2(1-\alpha)\sin \left[ 2(1-\alpha)t \right] - \alpha \cos \left[2(1-\alpha)t \right] \Big\}. \end{equation*} \vspace{-2ex} \label{Thrm: Pr(C)} \end{thrm} \begin{proof} Interested readers may refer to Appendix \ref{sec: Proof of Pr(C)}. \end{proof} \vspace{-2ex} \end{figure*} \begin{remark} \normalfont The output of the open quantum model, $Pr(C)$, presented in Theorem \ref{Thrm: Pr(C)} consists of both transient and stationary parts. The transient part consists of sine and cosine multiplied with exponential decay. Thus, there always exists steady state when the exponential decay rate $\alpha > 0$. When $\alpha = 0$ or $\lambda = 0$, $Pr(C)=0$ and the driver is completely random and absent-minded. Furthermore, a driver with a higher $\alpha$ can make a decision faster. Thus $\alpha$ also represents the brain power and attentiveness of the driver. On the other hand, the parameter $\lambda$ appears only within the $C_{ij}$ terms, which are linearly weighted by monomial terms of $p$ and $p_A$. This is essentially prior probability multiplied with likelihood, or initial probability multiplied with transition probability. Viewing $\alpha$ and $p_A$ as constants, the steady-state $Pr(C)$ is a linear function of the driver's prior $p$. \end{remark} For brevity, the mixed-strategy equilibrium is denoted as ($p_A^*, p_C^*$). If the car knows that the driver will play $p_C^*$, then its expected payoffs from playing alert and no alert must be equal, otherwise, it either chooses to alert only or chooses not to alert only and does not need to mix between them. Thus we have \begin{equation} \begin{split} p_C^* [a_{1, s}q + a_{1, d}(1-q)] + (1- p_C^*)[b_{1, s}q + b_{1, d}(1-q)] \\= p_C^* [c_{1, s}q + c_{1, d}(1-q)] + (1- p_C^*)[d_{1, s}q + d_{1, d}(1-q)]. \end{split} \end{equation} Solving for $p_C^*$, we obtain \begin{equation} p_C^* = \frac{q\Delta_s + (1-q)\Delta_d}{q (\Delta_s +c_{1, s} -a_{1, s})+ (1-q) (\Delta_d +c_{1, d} -a_{1, d} )}. \label{Eqn: p_C^*} \end{equation} where $\Delta_s = b_{1, s}-d_{1, s}$ and $\Delta_d = b_{1, d}-d_{1, d}$. For the sake of illustration, we consider the bi-matrix game presented in Table \ref{tab:tab1}. Upon substituting the utility values in Table \ref{tab:tab1-sim} for this example in Equation \eqref{Eqn: p_C^*}, we obtain \begin{equation} \begin{array}{lcl} p_C^* & = & \displaystyle \frac{11-16q}{3q+1}. \end{array} \end{equation} Note that the above $p_C^*$ maybe outside [0, 1]. If so, there is no mixed-strategy equilibrium. In order for $p_C^*$ to lie within [0, 1] under the prescribed utilities, $q$ must lie within [10/19, 11/16]. This is a very narrow range of $q$. Given $p_C^*$, the car can assign any $p_A$ to A because A and N give the same payoff. Next we need to search the $p_A$ that produce $p_C^*$. Such a $p_A$ is just the desired $p_A^*$. Since $p_C^*$ is completely determined by $q$, it is more convenient to plot $p_A^*$ versus $p$ and $p_C^*$. $p_A^*$ versus $p$ and $p_C^*$ with various $\lambda$s is shown in Fig. \ref{fig:mix}. The mixed-strategy equilibria only exist in a narrow band extending from a low-$p_C^*$-low-$p$ region to a high-$p_C^*$-high-$p$ region. There may not exist a mixed -strategy equilibrium for a given $q$, but there always exists one for a given $p$. When $p_C^*$ and $p$ increase, the band gets narrower. Within the band, the gradient of $p_A^*$ is perpendicular to the band, i.e., $p_A^*$ increases when $p$ increases and $p_C^*$ decreases simultaneously. When $\lambda$ decreases, the band gets flatter and the band firstly widens and then narrows. As $\alpha$ decreases, the band gets flatter and narrower. \begin{figure*}[htp] \centering \begin{subfigure}{0.25\textwidth} \centering \includegraphics[width=\textwidth]{mixLambda3.png} \end{subfigure}% \begin{subfigure}{0.25\textwidth} \centering \includegraphics[width=\textwidth]{mixLambda1.png} \end{subfigure}% \begin{subfigure}{0.25\textwidth} \centering \includegraphics[width=\textwidth]{mixAlpha6.png} \end{subfigure}% \begin{subfigure}{0.25\textwidth} \centering \includegraphics[width=\textwidth]{mixAlpha4.png} \end{subfigure} \caption {Existence of $p_A^*$ on the plane of $p$ and $p_C^*$ for different values of $\lambda$ and $\alpha$} \label{fig:mix} \end{figure*} \begin{remark} \normalfont In the case of mixed equilibria, when the driver is attentive, the equilibrium strategy $P^*_C$ is well aligned with her prior (higher $p$, higher $P^*_C$) as shown in Figure \ref{fig:mix}. However, when the driver gradually loses her attention ($\alpha$ or $\lambda$ decreases), $P^*_C$ steadily approaches to 0.5 regardless of $p$. This means that the driver becomes uncertain to choose $C$ or $S$ at equilibrium, when she is inattentive. \end{remark} \section{Conclusion and Future Work} \label{section:conclusion} In this paper, we developed a strategic driver-assist system based on a novel vehicle-driver interaction game. While the car is modeled as an expected utility maximizer, the driver is characterized by open-quantum cognition model which models his/her attentiveness ($\alpha$) as well as sensitivity to the utilities ($\lambda$). Based on a novel equilibrium concept proposed to solve any general human-system interaction game, we showed that both the car and the driver employ a threshold-based rule on their respective priors regarding the road state at equilibrium. Through numerical results, we also demonstrated how these thresholds vary under different settings based on road conditions and agent behavior. Specifically, in our proposed framework, we showed that an inattentive driver $(\lambda \leq 1)$ would stop the car in about 65\% of all possible belief profile settings, and at least in 77\% of belief profiles settings when the car alerts the driver. On the contrary, if there were no driver-assist system in the car, an inattentive driver would have stopped the car only in 50\% of all possible belief profiles settings (the region where $p < 0.5$), and in about 38.5\% of all scenarios if the car were to alert the driver using our driver-assist system. At the same time, our proposed driver-assist system has improved persuasive ability by taking into account driver behavior, in addition to its inferences regarding the road state. This improvement in performance was demonstrated by the increase in threshold on a driver-conscient car's belief, as opposed to that of a driver-agnostic car. Furthermore, we also proved that there always exists a mixed strategy equilibrium for any given driver's prior, but only under a small range within car's prior values. As the driver loses attention, we demonstrated that the mixed strategy of the driver at equilibrium drifts towards uniformly distributed probabilistic decisions. In the future, we will investigate repeated interaction games where the car can learn driver's model parameters over multiple iterations. Furthermore, we will also incorporate $B$ matrix in the Lindblad model to account for the effects of mental deliberation in resolving conflicts between his/her own prior and the car's signal. \section{Acknowledgment} Dr. S. N. Balakrishnan just passed away before this paper is ready to submit. However, Dr. Balakrishnan participated in every aspect through the research related to this paper. Therefore, we decided to still keep Dr. Balakrishnan as the co-author of this paper. \bibliographystyle{unsrt}
1,108,101,565,871
arxiv
\section*{\sc introduction} \indent \indent Currently nearly 400 000 variable stars are listed in the Variable Stars Index (VSX, {\it http://aavso.org/vsx}). In the General Catalogue of Variable Stars (Samus, et al., 2007-2016 \cite{samus}), the on-line version of which is available at {\it http://www.sai.msu.su/gcvs/gcvs/}, there are currently 52011 objects with official GCVS names. Among them, there are 10845 objects classified as the eclipsing ones, distributed among the subtypes as 5294 (EA), 3018 (EB), 1434 (EW), 1099 (E). Only few dozens of these objects were studied carefully, using not only photometric, but also spectral and (rarely) polarimetrical observations. For such rare objects, the "physical" modelling is possible with a determination of radii, masses, temperatures. The "standard" approach is so-called "Wilson -- Devinney" model \cite{wd71}, \cite{wd94}, which was realized in some famous programs "Binary Maker" \cite{bm}, Phoebe \cite{phoebe} and in the set of programs elaborated by S.Zo{\l}a et al. \cite{zola1}, \cite{zola2}. Different problems of the physical modelling were described e.g. by \cite{cher1993}, \cite{kopal1959}, \cite{kallrath2009}. For the majority of stars, there are only photometric observations, often obtained with one (or no) filter, thus the "physical" modelling is not possible because of unknown temperatures of the components and their mass ratio. In this case, only "phenomenological" modelling is available, which characterizes a smaller number of parameters, which describe the light curve, namely, the period $P,$ the initial epoch $T_0,$ brightness at primary maximum $m_{max}$ and primary minimum $m_{min},$ the duration of the eclipse $D.$ Additionally in the section "remarks" in the GCVS, are listed the brightness at the secondary minimum and (if different from $m_{max}$) at the secondary maximum and the phase shift of the secondary minimum in respect to the phase 0.5 (significant in a case of elliptical orbits) \cite{samus}, \cite{tsessevich}. Typically the values of brightness and phases are being determined using local approximations of observations in intervals, which include the extrema (either maximum, or minimum) (e.g. \cite{andr2005}). Some methods used the trigonometrical polynomial approximation of the complete light curve \cite{ruc1993}. Also there is a set of studies based on the "simplified physical" model, which suggests spherically symmetrical components with uniform brightness distribution \cite{shulberg},\cite{tk2013},\cite{malkov2007}. And recently were actively used more accurate methods, based on special shapes (patterns) of the minima \cite{andr2010},\cite{andr2012}, \cite{mikulas2015}. Another algorithm to determine the statistically optimal degree $s$ of the trigonometrical polynomial (sometimes called the "restricted Fourier series") is based on the minimization of the r.m.s. estimate of the accuracy of the smoothing function at the arguments of observations \cite{a1994}, \cite{andr2003}. This method was effectively applied also for pulsating Mira-type variables \cite{kuda1996}. In this paper, we compare previously used approximations with that using the special shape (pattern) and study behavior of the test function in the parameter space. For illustration, we have used $n=1000$ values of the phenomenological "NAV" function {\cite{andr2012}) with fixed parameters, which is a model for the light curve of an EA-type eclipsing binary. \section*{\sc the methods of calculations: trigonometric polynomial} There were oversimplified models, which could be effective for automatic classification of numerous newly discovered variable stars using the surveys, e.g. the "EA" catcher with a parabolic shape of minima of equal width and different depth \cite{andr2000}. More recently, Papageorgiou et al. \cite{papa2014} proposed a model of parabolic shape either for the out-of-the eclipse parts (phases (0.1 -- 0.4) and 0.6 -- 0.9) of the light curve), or to the eclipses (fixed phases (-0.2 -- 0.2), (0.3 -- 0.7)). The correspoding curve is shown in Fig.~\ref{fig1}. One may note a reasonably good appoximation out of eclipses, but a bad approximation at the phases of minima because of an overestimated eclipse width. \begin{figure}[!h] \centering \epsfig{file = image1.pdf,width = 0.5\linewidth} \caption{The model light curve and its approximation by parabola at the intervals of phases centered on minima and maxima, as proposed by (Papageorgiou et al., 2014)}\label{fig1} \end{figure} \begin{figure}[!h] \centering \epsfig{file = image2.pdf,width = 0.5\linewidth} \caption{Trigonometrical polynomial approximations of the phenomenological light curve. The degree s is shown by numbers near corresponding curves.}\label{fig2} \end{figure} Moreover, the light curve is not continuous. In the much earlier "EA" catcher \cite{andr2000}, the smoothing function was continuous, and the width (as well as the phase shift) was determined using non-linear least squares fitting (see examples of this and other functions in \cite{andr2016a}, \cite{andr2016b}, \cite{andr2016c}). The approximation of the extrema of variable stars using the algebraic polynomial of the statistically optimal degree was realized in the software \cite{b2003}, \cite{andrandr2015}. The separate case of abrupt decline and inclined parts of the light curve, when the analytical function are obviously bad approximations, was discussed by \cite{andrandr2014}. The fixed width in the method \cite{papa2014} leads to systematic differences between the observations and the approximation. Next approximation is a trigonometrical polynomial \begin{eqnarray} x_c(\phi)&=& C_1+\sum_{j=1}^s(C_{2j}\cos(2\pi j\phi)+C_{2j+1}\sin(2\pi j\phi))\nonumber\\ &=&C_1+\sum_{j=1}^s R_j\cos(2\pi j(\phi-\phi_{0j})), \label{eq1} \end{eqnarray} where $\phi$ is the phase, $\phi_{0j}$ are initial phases corresponding to the maximum of the wave with the $j^{th}$ term of the sum, and $R_j$ are corresponding semi-amplitudes. The coefficients $C_\alpha$ $(\alpha=1..m=1+2s)$ are determined using the least squares method. The approximations, which use the trigonometric polynomial of different degrees $s$, is shown in Fig.~\ref{fig2}. One may note an expected refinement of the approximation with an increasing $s.$ The coefficients $C_{2j}$ are shown in Fig.~\ref{fig3}. They describe terms with a cosine function, so the "symmetrical" part of the light curve. For even $j,$ the absolute values are typically larger, what is explained by a similarity in depth of the primary and secondary minima, as the coefficients with even $j$ approximate a mean light curve with a double frequency, and the coefficients with odd $j$ approximate the difference: \begin{eqnarray} \frac{x_c(\phi)+x_c(\phi+0.5)}{2}&=& C_1+\sum_{k=1}^{s/2}(C_{4k}\cos(4\pi k\phi)+C_{4k+1}\sin(4\pi k\phi)),\\ \frac{x_c(\phi)-x_c(\phi+0.5)}{2}&=& \sum_{k=1}^{s/2}(C_{4k-2}\cos(2\pi (2k-1)\phi)+C_{4k-1}\sin(2\pi(2k-1)\phi)).\nonumber \label{eq2} \end{eqnarray} \begin{figure}[!h] \centering \epsfig{file = image3.pdf,width = 0.5\linewidth} \caption{Dependence of coefficients $C_j$ on $j$.}\label{fig3} \end{figure} \begin{figure}[!h] \centering \epsfig{file = image4.pdf,width = 0.5\linewidth} \caption{Dependence of the mean squared error estimate $\sigma[x_c]$ of the approximation for an additional noise with a r.m.s value of 0 (bottom), 0.01 (up) , 0.001 (middle).}\label{fig4} \end{figure} Additionally, if the O'Connell effect is practically absent (what is the case for the majority of objects) \cite{papa2014}, the terms with sine vanish, and one gets only sums of terms with cosines. At this dependence, the coefficients tend to zero, but too slowly. E.g. the last coefficient exceeding an arbitrary limiting value of 0.001 occurs at $j=64,$ so the corresponding number of parameters $m=1+2\cdot 64=129$ is extremely large. For the determination of the statistically optimal value of $s,$ different criteria may be used (e.g. \cite{a1994}, \cite{andr2003}). The first is based on the Fischer's criterion, which assumes uncorrelated observational errors obeying the normal distribution. For our data set (which contains computed values without any noise), this criterion is not applicable, as the deviations between the data and the approximation are systematic and not random. For real stars, we used this criterion as well (e.g. \cite{andr2016b},\cite{andr2003}). The second criterion is based on minimization of the r.m.s. accuracy estimate $\sigma[x_c]$ of the approximation $x_c(\phi)$ at the arguments of observations $\phi_k$: \begin{eqnarray} \sigma^2[x_c]&=&\frac{m}{n}\sigma^2_{0m},\nonumber\\ \sigma^2_{0m}&=&\frac{\Phi_m}{n-m},\\ \Phi_m&=&\sum_{k=1}^{n}w_k\cdot(x_k-x_c(\phi_k))^2.\nonumber \label{eq3} \end{eqnarray} Here $\sigma_{0m}$ is a "unit weight error", $\Phi_m$ as a "test" ("target") function to be minimized in the parameter space. The dependence of $\sigma[x_c]$ on the number of parameters $m$ is shown in Fig.~\ref{fig4}. In fact, it may be split into two almost monotonical sequencies for even and odd degrees of the trigonometric polynomial (as a consequence of the separate dependencies of the coefficients described above). The bottom sequence show a systematic decrease with $s,$ so formally the degree of the trigonometric polynomial should be extremely large, close to $n/2,$ i.e. the approximation tends to an interpolating function. This is because the data are precisely described by a function. In a real situation, the statistical errors are present, leading to qualitative and quantitative changes. For an illustration, we have suggested an additional observational noise with a standard error of (arbitrarily) 0.001 and 0.01. The resulting dependencies show a broad, but distinct minimum in Fig.~\ref{fig4} at $s=132$ and $s=34,$ respectively. The additional noise shifts the position of the minimum of the dependence of the r.m.s. value of the accuracy of the approximation towards smaller values, leading to the systematic shifts. Anyway, the degree is very large, leading to a significant number of coefficients, which are not statistically significant. \section*{\sc the methods of calculations: the NAV algorithm} To decrease the number of the parameters, Andronov \cite{andr2010}, \cite{andr2012} proposed the following approximation, which was called "the NAV" ("New Algol Variable") algorithm: \begin{eqnarray} x_c(\phi)&=& G_1+G_2\cos(2\pi\phi)+G_3\sin(2\pi\phi)+\nonumber\\ && +G_4\cos(4\pi\phi)+G_5\sin(4\pi\phi)+\\ && +G_6H(\phi-C_4, C_1,C_2)+G_7H(\phi-C_4+0.5, C_1,C_3).\nonumber \label{eq11} \end{eqnarray} where the shape (pattern) is localized to the phase interval \begin{equation} H(\zeta,C_1,\beta)=\left\{ \begin{array}{ll} V(z)=(1-|z|^\beta)^{3/2}, & {\rm if} |z|<1\\ 0, & {\rm if} |z| \geq 1 \end{array} \right. \label{eq12} \end{equation} where $z=\zeta/C_1,$ and $C_1$ is the eclipse half-width $(=D/200$, if using the eclipse full width in per cent, as defined in the GCVS \cite{samus} and required for the classification). The second-order trigonometrical polynomial is typically sufficient to describe the effects of reflection, ellipticity and asymmetry (O'Connell effect), and the $H-$ functions describe the shapes of the minima, with a parameter $\beta,$ which is generally different for the primary $(\beta_1=C_2$) and secondary $(\beta_2=C_3$) minima. Generally, there may be a shift $\phi_0=C_4.$. Phenomenological modeling of multi-color observations of a newly discovered eclipsing binary 2MASS J18024395 + 4003309 = VSX J180243.9+400331 is presented in \cite{a2015}. \begin{figure}[!h] \centering \epsfig{file = image5.pdf,width = 1.0\linewidth} \caption{Dependencies of the light curves (intensity vs. phase) on the parameters $C_8=D/2$ (left) and $C_9 = \beta_1$ (right). The relative shift in intensity between subsequent curves is 0.1. The thick line shows a best fit curve.}\label{fig5} \end{figure} In Fig.~\ref{fig5}, we show dependencies of the best fit approximation with one parameter changing in a range, while other "non-linear" parameters $(C_1..C_4)$ are set to the best fit values, whereas the "linear" parameters $(G_1..G_7)$ are determined using the least squares subroutine. The central thick line coincides with our artificial data, which were used for an illustration. It is clearly seen that the change of one of the "non-linear" parameters leads to changes in the "linear" parameters and thus the approximation. In Fig.~\ref{fig6}, we show the "levels" - the lines of equal values of $\Phi_m$ at the two-parameter diagrams. They resemble deformed ellipses and show only a slight inclination close to the best fit point (marked by an arrow). The most drastic changes of the light curve are due to variations of the phase shift $C_4=\phi_0.$ There are only global minima of the function, except for the dependence with a phase shift $C_4.$ Such structure of the test function leads to the following algorithm of determination of the global minimum - at first, "brute force" determination of the minimum at a grid of values of $C_1..C_4$ with a further iterations using the differential corrections. However, if using the starting point at some middle point, the iterations may converge to a local minimum instead of the global one. \section*{\sc conclusions} The approximations with special pattern (also called "shape" or "profile") to fit the minima have much better quality of convergence of the smoothing curve with the data points. In this paper, we studied the dependence of the test function on four "non-linear parameters" $C_1..C_4,$ whereas the "linear" parameters $G_1..G_7$ are determined using the method of the least squares. The "NAV" ("New Algol Variable") algorithm is an effective tool presenting a good pattern for the minima, which may be improved by using an additional parameter, which describes its shape. \begin{figure}[!h] \centering \epsfig{file = image6.pdf,width = 1.0\linewidth} \caption{Lines of equal levels of the test-function for different pairs of the parameters. Arrows show the best fit point.}\label{fig6} \end{figure}
1,108,101,565,872
arxiv
\section{Introduction} \subsection{Backgrounds for this series} \subsubsection{$\mu$-cscK metric} Let $X$ be a compact K\"ahler manifold and $L$ be a K\"ahler class of an ample $\mathbb{Q}$-line bundle. (In this first article, $X$ is smooth. We may take transcendental $L$ in many arguments, but we assume rationality in order to adjust our framework to non-archimedean pluripotential theory. ) For a parameter $\lambda \in \mathbb{R}$ and a Hamiltonian real holomorphic vector field $\xi := \mathrm{Im} (\partial^\sharp \theta_\xi) = J \nabla \theta_\xi/2$, a K\"ahler metric $\omega \in L$ is called a \textit{$\mu^\lambda$-cscK metric with respect to $\xi$} if $L_\xi \omega = 0$ and the \textit{$\mu^\lambda_\xi$-scalar curvature} \[ s^\lambda_\xi (\omega) := s (\omega) + \Delta \theta_\xi - |\partial^\sharp \theta_\xi|^2 - \lambda \theta_\xi \] is constant. This curvature notion naturally comes up from the moment map picture on K\"ahler--Ricci soliton \cite{Ino1, Ino2}. Indeed, K\"ahler--Ricci soliton is equivalent to $\mu^{2\pi \lambda}$-cscK metric in the K\"ahler class $-\lambda^{-1} K_X$ on a Fano manifold $X$ (cf. \cite{Ino1}). The framework on $\mu$-cscK encloses both the frameworks on K\"ahler--Ricci solitons and cscK metrics. We refer \cite{Ino2} for foundational aspects of $\mu$-cscK metrics and \cite{Lah1, Lah2} for more general aspects. In the study \cite{Ino2}, we introduced the following \textit{$\mu$-volume functional}: \[ \log \mathrm{Vol}^\lambda (\xi) := \bar{s}^\lambda_\xi + \lambda \log \int_X e^{\theta_\xi} \omega^n. \] The functional is defined on the Lie algebra $\mathfrak{k}$ of a compact subgroup $K \subset \mathrm{Aut} (X, L)$ and is independent of the choice of $K$-invariant metric $\omega \in L$. For Fano manifold, the functional is equivalent to Tian--Zhu's volume functional \cite{TZ1} appeared in the study of K\"ahler--Ricci soliton. This series is mainly devoted to a further exploration of the $\mu$-volume functional. We study a natural extension of (the minus log of) the $\mu$-volume functional defined on the space of non-archimedean metrics ($\supset$ test configurations), which we call the \textit{non-arhchimedean $\mu$-entropy}. The extension is introduced based on the equivariant cohomological nature of this $\mu$-volume functional. When our computation has equivariant cohomological background, it is convenient to compute with the moment map $\mu_\xi = -\theta_\xi/2$ rather than with the $\bar{\partial}$-potential $\theta_\xi$. It is thus convenient to assign some terminology for $\mu^\lambda_{-\xi/2}$-cscK metric. To avoid confusion, we call it \textit{$\check{\mu}^\lambda_\xi$-cscK metric}. A \textit{$\mu^\lambda$-cscK metric} is a $\mu^\lambda_\xi$/$\check{\mu}^\lambda_\xi$-cscK metric for some $\xi$. \subsubsection{Digression: phase transition and extremal limit} Throughout this series, we study $\mu$-cscK metrics for a fixed parameter $\lambda$, especially for $\lambda \le 0$. It is observed in \cite{Ino2} that there are some interesting phenomenon when varying $\lambda$, which inspires us to interpret the parameter $-\lambda$ as ``(empirical) temperature''. Though it is off the topic of this series, we briefly describe it here as it is a fascinating aspect. With this heuristic interpretation, the situation of this series can be understood as ``isothermal process''. (Alternatively, one may regard $\lambda^{-1}$ as ``(absolute) temperature''. The smaller $\lambda$, the higher temperature. Negative temperature is hotter than any positive $\lambda > 0$, in view of statistical role of the reverse temperature. ) A fall in ``temperature'' $-\lambda$ yields a ``phase transition phenomenon'': the possible states $\xi$ associated to some $\mu^\lambda$-cscK metrics branch off at some $-\lambda \ll 0$. Even on the simplest variety $\mathbb{C}P^1$, there appears a $\mu^\lambda_\xi$-cscK metric with respect to a non-trivial state $\xi \neq 0$ once the temperature $-\lambda$ gets across the ``phase transition point'' $-8\pi/\int_{\mathbb{C}P^1} \omega$. Such new \textit{non-trivial} $\mu^\lambda$-cscK metrics are unique modulo $\mathrm{Aut} (\mathbb{C}P^1)$ (in this case), while the Fubini--Study metric gives a \textit{trivial} $\mu^\lambda$-cscK metric. For this example, the non-trivial state $\xi \neq 0$ is breaking the symmetry: $\{ g \in \mathrm{Aut} (\mathbb{C}P^1) ~|~ g_* \xi = \xi \} = \mathbb{C}^\times$. In particular, the associated non-trivial possible state $\xi \neq 0$ tangent to a maximal compact group $SU (2) \subset \mathrm{Aut} (\mathbb{C}P^1)$ form a sphere $S^2 \subset \mathfrak{su} (2)$ as an orbit of the adjoint action of $SU (2)$. In view of the $\mu$-volume functional, the nontrivial $\mu^\lambda_\xi$-cscK metric turns into a new ``stable state'', while the Fubini--Study metric transforms into ``supercooled state''. \begin{figure}[h] \begin{center} \includegraphics[width=9cm]{muimage.pdf} \end{center} \caption{A typical image of the behavior of the minus log of the $\mu$-volume functional on $\mathfrak{u} (1) \subset \mathfrak{aut} (X, L)$. } \end{figure} Conversely, when the ``temperature'' is sufficiently high $-\lambda \gg 0$, the possible states $\xi$ associated to some $\mu^\lambda$-cscK metrics are uniquely determined for each ``temperature'' $-\lambda$ (cf. \cite[Theorem B (3)]{Ino2}), and is characterized as the minimizer of the $\mu$-volume functional. As ``temperature'' heats up $-\lambda \to + \infty$, the rescaled state $\lambda \xi$ converge to the extremal vector field $\xi_{\mathrm{ext}}$ (cf. \cite[Theorem D]{Ino2}). This implies the $\mu^\lambda$-scalar curvature converge to the extremal scalar curvature as $\lambda \to -\infty$: \[ s^\lambda_{\xi_\lambda} (\omega) ~ \longrightarrow ~ s (\omega) - \theta_{\xi_{\mathrm{ext}}}. \] Based on this observation, we showed the following in \cite{Ino2} and \cite{Lah1, Ino3}, respectively. \begin{itemize} \item If there is an extremal metric in $L$, we can construct by perturbation a family $\{ \omega_\lambda \}_{\lambda \ll 0}$ of $\mu^\lambda$-cscK metrics in $L$ converging to the extremal metric as $\lambda \to -\infty$. \item Conversely, if there are $\mu^\lambda$-cscK metrics in $L$ (or just $\mu^\lambda$K-semistable) for every $- \infty < \lambda \ll 0$, then $(X, L)$ is relatively K-semistable. \end{itemize} Therefore, we can understand extremal metric (resp. relative K-stability) as the limit of $\mu^\lambda$-cscK metrics (resp. $\mu^\lambda$K-stability). We conjecture the following uniqueness for ``high temperature'' case $-\lambda \ge 0$, which fails for $-\lambda \ll 0$ as we already noted. \begin{conj} \label{uniqueness} For $\lambda \le 0$, $\mu^\lambda$-cscK metrics are unique modulo the automorphism group. \end{conj} The main difficulty is that the $\mu$-volume functional is not convex in general, even when $\lambda \le 0$. As a partial evidence of the conjecture, we currently know the following. \begin{itemize} \item \cite{Lah2}: For a fixed $\lambda \in \mathbb{R}$ and $\xi$, $\mu^\lambda_\xi$-cscK metrics are unique modulo $\mathrm{Aut}_\xi (X, L)$. \item \cite{Ino2}: If $\mu^\lambda_\xi$-cscK metric exists for $\lambda \le 0$, then $\xi$ is a local minimizer of the $\mu^\lambda$-volume functional. Moreover, there are only finitely many local minimizers in the Lie algebra $\mathfrak{k}$ of any compact group $K \subset \mathrm{Aut} (X, L)$. \end{itemize} In this article, we will see $\xi$ is a global minimizer of the $\mu^\lambda$-volume functional, if $\mu^\lambda_\xi$-cscK metric exists for $\lambda \le 0$. \subsubsection{Equivariant intersection} Equivariant intersection is a basic language for describing $\mu$K-stability and our $\mu$-entropy of test configurations. It also brings us transparent understanding on some known results in K-stability (cf. \cite{Ino3, Leg}). The idea of using localization can be traced back to Futaki \cite{Fut}, although we apply localization in a slightly different way from his original work. We briefly explain the concept below. We refer \cite{EG1, GS, GGK} and the appendix of \cite{Ino3} for more information. Let $X$ be a complex $n$-dimensional compact complex space. For a $T$-equivariant (locally finite) homology class $D^T \in H^{\mathrm{lf}, T}_{2n-2} (X; \mathbb{R})$ and a $T$-equivariant cohomology class $L_T \in H^2_T (X; \mathbb{R})$, we define the (absolute) \textit{equivariant intersection} $(D^T. L_T^{n+k-1}) \in S^k \mathfrak{t}^\vee$ by the equivariant push-forward to the point: \[ (D^T. L_T^{n+k-1}) := p_* (D^T \frown L_T^{n+k-1}) \in H^{\mathrm{lf}, T}_{-2k} (\mathrm{pt}) = H^{2k}_T (\mathrm{pt}) = S^k \mathfrak{t}^\vee. \] We can regard $(D^T. L_T^{n+k-1})$ as a polynomial function on $\mathfrak{t}$ of degree $k$. We denote by $(D^T. L_T^{n+k-1}; \xi)$ the value of the polynomial at $\xi \in \mathfrak{t}$. When $T = \mathbb{C}^\times$, we write $(D^{\mathbb{C}^\times}. L_{\mathbb{C}^\times}^{n+k-1}; \tau. \eta)$ as $(D^{\mathbb{C}^\times}. L_{\mathbb{C}^\times}^{n+k-1}; \tau)$ for $\tau \in \mathbb{R}$ and the generating vector $\eta \in N \subset \mathfrak{t}$ corresponding to the identity $\mathrm{id}: \mathbb{C}^\times \to \mathbb{C}^\times$. By definition, equivariant intersection is the intersection on the infinite dimensional Borel construction $X \times_T ET$. We can identify the equivariant intersection with the usual finite dimensional (relative) intersection in the following way. Take a basis $\{ \chi_i \}$ of the character lattice $M$ of $T$ and denote by $\mathbb{C}_{\chi_i}$ the representations corresponding to $\chi_i$. Put $E_l T := \prod_{i=1}^{\mathrm{rk} T} (\mathbb{C}_{\chi_i}^{l+1} \setminus 0)$ for $l \gg k$, which is a finite dimensional approximation of the classifying space $ET$. We consider the $T$-action on $E_l T$ induced by $\chi_i$, and denote by $X \times_T E_l T$ the quotient $(X \times E_l T)/T$ with respect to the diagonal action. By the construction of the equivariant locally finite homology, $D^T$ is identified with a locally finite homology class of degree $(2n-2) + 2 l \mathrm{rk} T$ on $X \times_T E_l T$. On the other hand, $L_T$ gives a degree $2$ cohomology class on $X \times_T E_l T$. Therefore we may identify $D^T \frown L_T^{n+k-1}$ with a locally finite homology class of degree $-2k + 2 l \mathrm{rk} T$ on $X \times_T E_l T$ and $(D^T. L_T^{n+k-1})$ with its push-forward along $X \times_T E_l T \to E_l T / T$, which lives in $H^{2k} (E_l T/T) = H^{2k}_T (\mathrm{pt})$ by the Poincare duality. This construction is well-defined by \cite{EG1}. Let $w = \sum_{k=0}^\infty a_k z^k$ be an entire holomorphic function on $\mathbb{C}$. Using the equivariant resolution of $X$ and the Cartan model of equivariant locally finite homology, we showed in \cite{Ino3} that the following infinite series is locally uniformly absolutely-convergent and thus gives an entire holomorphic function on $\mathfrak{t} \otimes \mathbb{C}$: \[ (D^T. w (L_T); \xi) := \sum_{k=0}^\infty a_k (D^T. L_T^k; \xi). \] In the study of $\mu$K-stability, we applied this to the case $w (t) = e^t$. In this first article, we compute equivariant intersection using equivariant differential form. See \cite{Ino3} and \cite{GS, GGK} for this treatment. \subsubsection{$\mu$K-stability} Let $\xi$ be a Hamiltonian real holomorphic vector field generating a closed torus $T = \overline{\exp (\mathbb{R}. \xi)}$. We denote its complexification by the same symbol $T$. A polarized scheme $(X, L)$ is called \textit{$\check{\mu}^\lambda_\xi$K-semistable} if $\check{\mathrm{Fut}}^\lambda_\xi (\mathcal{X}, \mathcal{L}) \ge 0$ for every $T$-equivariant (normal) test configuration $(\mathcal{X}, \mathcal{L})$. Here the \textit{$\mu$-Futaki invariant} $\check{\mathrm{Fut}}^\lambda_\xi (\mathcal{X}, \mathcal{L}) := D_\xi \check{\mu} (\mathcal{X}, \mathcal{L}) + \lambda D_\xi \check{\sigma} (\mathcal{X}, \mathcal{L})$ for a normal test configuration $(\mathcal{X}, \mathcal{L})$ is defined by the following equivariant intersection: \begin{align*} D_\xi \check{\mu} (\mathcal{X}, \mathcal{L}) &:= 2 \pi \frac{(K_{\bar{\mathcal{X}}/\mathbb{C}P^1}^T. e^{\bar{\mathcal{L}}_T}; \xi) \cdot (e^{L_T}; \xi) - (K_X^T. e^{L_T}; \xi) \cdot (e^{\bar{\mathcal{L}}_T}; \xi) }{(e^{L_T}; \xi)^2}, \\ D_\xi \check{\sigma} (\mathcal{X}, \mathcal{L}) &:= \frac{(\bar{\mathcal{L}}_T. e^{\bar{\mathcal{L}}_T}; \xi) \cdot (e^{L_T}; \xi) - (L_T. e^{L_T}; \xi) \cdot (e^{\bar{\mathcal{L}}_T}; \xi) }{(e^{L_T}; \xi)^2} - \frac{(e^{\bar{\mathcal{L}}_T}; \xi)}{(e^{L_T}; \xi)}. \end{align*} It is shown in \cite{Lah1} and is restated in \cite{Ino3} that if a smooth polarized manifold $(X, L)$ admits a $\check{\mu}^\lambda_\xi$-cscK metric, then $(X, L)$ is $\check{\mu}^\lambda_\xi$K-semistable. In \cite{Ino3}, we introduce an equivariant characteristic class $\bm{\mu}^\lambda = \bm{\mu} + \lambda \bm{\sigma}$ for family of polarized schemes. The $\mu$-Futaki invariant is understood as ``\textit{the derivative of the $\mu$-character at $\xi$ to the direction of the test configuration $(\mathcal{X}, \mathcal{L})$}''. We justified this slogan by introducing a differential operator $\mathcal{D}_\xi$ on equivariant cohomology and applying it to the $\mu$-character $\bm{\mu}^\lambda$. This idea applies not only to test configuration, but also to family of polarized schemes, which yields an analogue of CM line bundle in our $\mu$K-stability context. For a test configuration $(\mathcal{X}, \mathcal{L})$, the $\mu$-character $\bm{\mu}^\lambda_{\mathbb{C}^\times} (\mathcal{X}, \mathcal{L})$ lives in $\hat{H}_{\mathbb{C}^\times} (\mathbb{C}, \mathbb{R})$. Identifying $\hat{H}_{\mathbb{C}^\times} (\mathbb{C}, \mathbb{R})$ with the ring $\mathbb{R} \llbracket \eta \rrbracket$ of formal power series, we can regard $\bm{\mu}^\lambda_{\mathbb{C}^\times} (\mathcal{X}, \mathcal{L})$ as a formal power series. We can easily see by the method in \cite{Ino3} that the corresponding power series actually converges around the origin and extends to a real analytic function $\bm{\check{\mu}}^\lambda (\mathcal{X}, \mathcal{L}; \bullet)$ on $\mathbb{R}$. In this series, we are interested in the function $\bm{\check{\mu}}^\lambda (\mathcal{X}, \mathcal{L}; \bullet)$ restricted to the half line $[0, \infty)$. In this first article, we do not use such background, so we define later the functional $\bm{\check{\mu}}^\lambda (\mathcal{X}, \mathcal{L}; \bullet)$ by equivariant intersection formula on the compactified space $\bar{\mathcal{X}}$, which can be compared to this original description by localization. \subsection{Outline of this article} The story of this series is reminiscent of the pioneering works by Chi Li \cite{Li1} on the normalized volume in the context of Sasaki--Einstein metric, by He \cite{He}, Dervan--Sz\'ekelyhidi \cite{DS} and recent Han--Li \cite{HL2} in the context of K\"ahler--Ricci soliton, and by Donaldson \cite{Don}, Dervan \cite{Der2} and Xia \cite{Xia} in the context of extremal metric. We firstly introduce main concepts of this article. \subsubsection{Perelman's $W$-entropy as a functional on the tangent bundle} Let $X$ be a compact K\"ahler manifold and $L$ be a K\"ahler class. We denote by $\mathcal{H} (X, L)$ the space of K\"ahler metrics in the K\"ahler class $L$. For a reference K\"ahler metric $\omega_{\mathrm{ref}} \in \mathcal{H} (X, L)$, we obtain an open embedding $\mathcal{H} (X, L) \hookrightarrow C^\infty (X)/\mathbb{R}$ by assigning the K\"ahler potential $\varphi$ of a metric $\omega_\varphi = \omega_{\mathrm{ref}} + \sqrt{-1} \partial \bar{\partial} \varphi$. For another choice of the reference $\omega'_{\mathrm{ref}} = \omega_\psi$, the coordinate change is just given by parallel translation $\varphi \mapsto \varphi - \psi$, so we can identify the tangent bundle $T \mathcal{H} (X, L)$ with the product $\mathcal{H} (X, L) \times C^\infty (X)/\mathbb{R}$ in the canonical way: \[ \mathcal{H} (X, L) \times C^\infty (X)/\mathbb{R} \xrightarrow{\sim} T \mathcal{H} (X, L): (\omega, f) \mapsto \frac{d}{dt}\Big{|}_{t=0} (\omega + t \sqrt{-1} \partial \bar{\partial} f). \] We use this canonical identification to compute variations of functionals on $T\mathcal{H} (X, L)$. We sometimes call $f \in T_\omega \mathcal{H} = C^\infty (X)/\mathbb{R}$ a \textit{momentum} and a pair $(\omega, f)$ a \textit{state}. (smack of abuse of language...) For a K\"ahler metric $\omega \in \mathcal{H} (X, L)$ and $f \in C^{0, 1} (X)$, the (normalized) \textit{$W$-entropy} is defined as \begin{equation} \check{W}^\lambda (\omega, f) := - \frac{\int_X \big{(} s (\omega) + |\partial^\sharp f|^2 - \lambda (n+f) \big{)} e^f \omega^n}{\int_X e^f \omega^n} - \lambda \log \int_X e^f \frac{\omega^n}{n!}. \end{equation} We can write as $\check{W}^\lambda = \check{W} + \lambda \check{S}$ by putting \begin{align} \check{W} (\omega, f) &:= - \frac{\int_X \big{(} s (\omega) + |\partial^\sharp f|^2 \big{)} e^f \omega^n}{\int_X e^f \omega^n}, \\ \check{S} (\omega, f) &:= \frac{\int_X (n+f) e^f \omega^n}{\int_X e^f \omega^n} - \log \int_X e^f \frac{\omega^n}{n!}. \end{align} As $\check{W}^\lambda (\omega, f+c) = \check{W}^\lambda (\omega, f)$, we may regard $\check{W}^\lambda$ as a functional on the tangent bundle $T \mathcal{H} (X, L)$ (by restricting to $C^\infty (X) \subset C^{0,1} (X)$). We recall our $W$-entropy is a scaling of Perelman's original definition \cite{Per}: \[ W (g, f, \tau) = \int_X (\tau (R (g) + |\nabla f|^2) - (2n - f)) e^{-f} \frac{\mathrm{vol}_g}{(4\pi \tau)^n} \] for $f$ normalized as $\int_X e^f \mathrm{vol}_g/(4\pi \tau)^n = 1$. Here $R (g)$ denotes the Riemannian scalar curvature for a general Riemannian metric and is $2 s (g)$ when $g$ is K\"ahler. While $\tau$ is assumed to be positive (and is regarded as a reverse time $\tau = T_0 -t$) in Perelman's study on Ricci flow, our $W$-entropy behaves well for $\tau^{-1} = \lambda \le 0$ in our study of $\mu$-cscK metrics, mainly due to the invertibility of the operator $\Delta - \nabla f - \lambda$. In our framework, we are interested in the functional on the space of K\"ahler metrics, not on the whole Riemannian metrics, to encounter with holomorphy. As in \cite{Per}, the \textit{$\mu$-entropy} is defined as \begin{equation} \bm{\check{\mu}}^\lambda (\omega) := \sup_{f \in C^{0,1} (X)} \check{W}^\lambda (\omega, f). \end{equation} We will see in section \ref{Perelman's W-functional and mu-entropy} that the supremum is indeed attained by a smooth function. The functional $\bm{\check{\mu}}^\lambda: \mathcal{H} (X, L) \to \mathbb{R}$ is lower semicontinuous by definition, while the smoothness, even the continuity, is not evident. In section \ref{mu-cscK metrics and Perelman's $W$-entropy}, we show the smoothness in the case $\lambda \le 0$. This functional is intensively studied especially for Fano manifold in many literatures including \cite{He, TZ2, TZZZ, DS}. We discuss this in section \ref{Observations, He}. In this article, we study this functional for a general polarized manifold. The functional plays a role analogous to the Calabi functional. We will see in section \ref{Observations, Calabi} that the $\mu$-entropy is indeed connected to the Calabi functional by the extremal limit $\lambda^{-1} \to 0$. The functional $\bm{\check{\mu}}^\lambda$ is bounded from below: \begin{equation} \bm{\check{\mu}}^\lambda (\omega) \ge \check{W}^\lambda (\omega, 0) = 2\pi \frac{(K_X. L^{\cdot n-1})}{L^{\cdot n}} + \lambda (n - \log \frac{(L^{\cdot n})}{n!}). \end{equation} This is contrast to the (weighted) Mabuchi/Ding functional as these are unbounded when (weighted) K/D-unstable. \subsubsection{Test configuration} Schemes are of finite type over $\mathbb{C}$ throughout this series. In this series, a \textit{test configuration} of a polarized scheme $(X, L)$ is a triple $(\mathcal{X}, \mathcal{L}; \tau)$ of the following data. \begin{itemize} \item $\mathcal{X}$ is a scheme endowed with a $\mathbb{C}^\times$-action, a proper flat surjective morphism $\varpi: \mathcal{X} \to \mathbb{C}$ which is $\mathbb{C}^\times$-equivariant with respect to the scaling action on $\mathbb{C}$, and a $\mathbb{C}^\times$-equivariant isomorphism $j_\circ : X \times \mathbb{C}^* \xrightarrow{\sim} \varpi^{-1} (\mathbb{C}^*)$. \item $\mathcal{L}$ is a $\mathbb{C}^\times$-equivariant relatively ample $\mathbb{Q}$-line bundle on $\mathcal{X}$ satisfying $j_\circ^* \mathcal{L} = p_X^* L$ on $X \times \mathbb{C}^*$. We often identify $\mathcal{L}$ with an element of the equivariant cohomology $H^2_{\mathbb{C}^\times} (\mathcal{X}; \mathbb{R})$. When we emphasize that $\mathcal{L}$ is a $\mathbb{C}^\times$-equivariant class, we write it as $\mathcal{L}_{\mathbb{C}^\times}$. \item $\tau$ is a non-negative real number. (scaling parameter) \end{itemize} In the non-archimedean pluripotential theory (cf. \cite{BHJ1, BJ1, BJ2}), a test configuration $(\mathcal{X}, \mathcal{L}; \tau)$ is understood as the rescaled non-archimedean metric $\phi_{(\mathcal{X}, \mathcal{L}), \tau}$ of $\phi_{(\mathcal{X}, \mathcal{L})}$. The scaling parameter $\tau$ was veiled in literatures as it can be dismissed thanks to the fact that `classical' invariants for test configuration, such as Donaldson--Futaki invariant and norms, are homogeneous with respect to $\tau$. In analytic viewpoint, these `classical' invariants are homogeneous because it is explained as the slope of some functionals \textit{on the space $\mathcal{H} (X, L)$ of K\"ahler potentials}. In our study, we are interested in $W$-entropy and its action funtional along geodesic rays. The $W$-functinal is defined \textit{on the tangent bundle $T \mathcal{H} (X, L)$ of the space of K\"ahler potentials}, not on $\mathcal{H} (X, L)$. This difference yields the non-linearlity of the $\mu$-entropy with respect to $\tau$. We denote by $\mathbb{C}_-$ the affine space $\mathbb{C}$ endowed with the reverse scaling $\mathbb{C}^\times$-action: $z.t = t^{-1} z$. We can compactify $\mathcal{X}$ by equivariantly gluing $X \times \mathbb{C}_-$ and $\mathcal{X}$ over $X \times \mathbb{C}^*$. Similarly we obtain a natural extension $\bar{\mathcal{L}} \to \bar{\mathcal{X}}$ of $\mathcal{L}$. We can shift the original $\mathbb{C}^\times$-action on $\mathcal{L}$ by a weight $m \in \mathbb{Z}$ of $\mathbb{C}^\times$. Under this shifting, the extension $\bar{\mathcal{L}}$ is replaced with $\bar{\mathcal{L}} + m [\mathcal{X}_0]$. Since $\bar{\mathcal{L}}$ is relatively ample, we can take $m$ so that $\bar{\mathcal{L}} + m [\mathcal{X}_0] = \bar{\mathcal{L}} + \varpi^* \mathcal{O} (m)$ is ample. As this shifting will not affect the interested invariant $\bm{\check{\mu}}^\lambda (\mathcal{X}, \mathcal{L}; \tau)$, we sometimes assume $\bar{\mathcal{L}}$ is ample by shifting. (We must pay attention that the moment map associated to $\bar{\mathcal{L}}$ also shifts, so that we are not free to choose the normalization of the moment map when we make use of the ampleness of $\bar{\mathcal{L}}$. ) We call a test configuration $(\mathcal{X}, \mathcal{L}; \tau)$ \textit{normal (resp. smooth)} if $\mathcal{X}$ is normal (resp. smooth) and \textit{snc smooth} if it is smooth and the central fibre $\mathcal{X}_0$ is supported on an snc divisor on $\mathcal{X}$. We say a test configuration $(\mathcal{X}, \mathcal{L}; \tau)$ \textit{dominates} another test configuration $(\mathcal{X}', \mathcal{L}'; \tau)$ if the canonical birational map $j'_\circ \circ j_\circ: \mathcal{X} \dashrightarrow \mathcal{X}'$ extends to a morphism $\mathcal{X} \to \mathcal{X}'$. The \textit{trivial configuration} is the product $(X \times \mathbb{C}, L \times \mathbb{C})$ endowed with the scaling $\mathbb{C}^\times$-action $(x, \tau). t = (x, \tau t)$. For a normal test configuration $(\mathcal{X}, \mathcal{L}; \tau)$, we denote by $(\mathcal{X}_d, \mathcal{L}_d; \tau)$ the normalization of the base change of $(\mathcal{X}, \mathcal{L})$ along $z^d: \mathbb{C} \to \mathbb{C}$. We have $\phi_{(\mathcal{X}_d, \mathcal{L}_d), \tau} = \phi_{(\mathcal{X}, \mathcal{L}), d \tau}$ for the associated non-archimedean metrics. \subsubsection{$\mu$-entropy of test configuration} For a normal test configuration $(\mathcal{X}, \mathcal{L}; \tau)$, we define its \textit{$\mu^\lambda$-entropy} $\bm{\check{\mu}}^\lambda (\mathcal{X}, \mathcal{L}; \tau)$ by \begin{align} \bm{\check{\mu}} (\mathcal{X}, \mathcal{L}; \tau) &:= 2 \pi \frac{(K_X. e^L) - \tau. (K_{\bar{\mathcal{X}}/\mathbb{C}P^1}^{\mathbb{C}^\times}. e^{\bar{\mathcal{L}}_{\mathbb{C}^\times}}; \tau)}{(e^L) - \tau. (e^{\bar{\mathcal{L}}_{\mathbb{C}^\times}}; \tau)}, \\ \bm{\check{\sigma}} (\mathcal{X}, \mathcal{L}; \tau) &:= \frac{(L. e^L) - \tau. (\bar{\mathcal{L}}_{\mathbb{C}^\times}. e^{\bar{\mathcal{L}}_{\mathbb{C}^\times}}; \tau)}{(e^L) - \tau. (e^{\bar{\mathcal{L}}_{\mathbb{C}^\times}}; \tau)} - \log \Big{(} (e^L) - \tau. (e^{\bar{\mathcal{L}}_{\mathbb{C}^\times}}; \tau) \Big{)}, \\ \bm{\check{\mu}}^\lambda (\mathcal{X}, \mathcal{L}; \tau) &:= \bm{\check{\mu}} (\mathcal{X}, \mathcal{L}; \tau) + \lambda \bm{\check{\sigma}} (\mathcal{X}, \mathcal{L}; \tau). \end{align} By equivariant localization (cf. \cite{GGK, Ino3}), we can write these invariants as \begin{align*} \bm{\check{\mu}} (\mathcal{X}, \mathcal{L}; \tau) &= 2\pi \frac{(\kappa_{\mathcal{X}_0}. e^{\mathcal{L}|_{\mathcal{X}_0}}; \tau)}{(e^{\mathcal{L}|_{\mathcal{X}_0}}; \tau)}, \\ \bm{\check{\sigma}} (\mathcal{X}, \mathcal{L}; \tau) &= \frac{(\mathcal{L}|_{\mathcal{X}_0}. e^{\mathcal{L}|_{\mathcal{X}_0}}; \tau)}{(e^{\mathcal{L}|_{\mathcal{X}_0}}; \tau)} - \log (e^{\mathcal{L}|_{\mathcal{X}_0}}; \tau), \end{align*} which relates the definition in this first article to the $\mu$-character introduced in \cite{Ino3} and that in the second article \cite{Ino4}. Here $\kappa_{\mathcal{X}_0} \in \mathrm{CH}^{\mathbb{C}^\times} (\mathcal{X}_0, \mathbb{Q})$ denotes the $\mathbb{C}^\times$-equivariant canonical Chow class of the central fibre, which can be defined for general scheme (cf. \cite{Ful, EG2} or \cite{Ino3}). In this first article, we only use these localized expression as a simplified expression of the equivariant intersection in the above definition. It is observed in \cite{Ino2, Ino3} that for the product configuration $(\mathcal{X}, \mathcal{L}) = (X_{\mathbb{C}, \xi}, L_{\mathbb{C}, \xi})$ associated to (the fundamental vector field $\xi$ of) a $\mathbb{C}^\times$-action on $X$, we have \[ \bm{\check{\mu}}^\lambda (X_{\mathbb{C}, \xi}, L_{\mathbb{C}, \xi}; \tau) = -\log \frac{\mathrm{Vol}^\lambda (-\tau \xi/2)}{(e^n. n!)^\lambda} = \check{W}^\lambda (\omega, \mu_{\tau \xi}). \] Since the central fibre is reduced, we have $\bm{\check{\mu}}^\lambda (X_{\mathbb{C}, d \xi}, L_{\mathbb{C}, d \xi}; \tau) = \bm{\check{\mu}}^\lambda ((X_{\mathbb{C}, \xi}, L_{\mathbb{C}, \xi})_d; \tau) = \bm{\check{\mu}}^\lambda (X_{\mathbb{C}, \xi}, L_{\mathbb{C}, \xi}; d \tau)$ for non-negative integer $d$. Thus for a torus action $(X, L) \circlearrowleft T$, we can define the functional $\bm{\check{\mu}}^\lambda (X, L; \bullet): \mathbb{Q} \otimes N \to \mathbb{R}$ by $p/q \otimes \xi \mapsto \bm{\check{\mu}}^\lambda (X_{\mathbb{C}, \xi}, L_{\mathbb{C}, \xi}; p/q)$, where $N \subset \mathfrak{t}$ is the lattice of one parameter subgroups. We can extend this functional to the whole $\mathfrak{t}$ continuously by putting \begin{equation} \label{mu-entropy for vector} \bm{\check{\mu}}^\lambda (X, L; \xi) := \check{W}^\lambda (\omega, \mu_\xi) = -\log \frac{\mathrm{Vol}^\lambda (-\xi/2)}{(e^n. n!)^\lambda} \end{equation} for general $\xi \in \mathfrak{t}$, using a $\xi$-invariant K\"ahler metric $\omega \in L$ and a moment map $\mu$ which satisfies $-d\mu_\xi = i_\xi \omega$. As observed in \cite{Ino2}, this is independent of the choice of the $\xi$-invariant K\"ahler metric $\omega \in L$. By this description, we obviously have $\bm{\check{\mu}}^\lambda (X, L; \xi) \le \bm{\check{\mu}}^\lambda (\omega)$ for any $\xi$-invariant K\"ahler metric $\omega \in L$. We will see this indeed holds for non-invariant general $\omega \in L$. \subsubsection{Main results of the first paper} Here we collect the main results of this article. The first result is about the characterization of $\mu^\lambda$-cscK metrics in terms of $W$-entropy. \begin{thm} The following are equivalent for a state $(\omega, f) \in T\mathcal{H} (X, L)$. \begin{itemize} \item The vector field $\partial^\sharp f$ is holomorphic and the metric $\omega$ is a $\mu^\lambda$-cscK metric with respect to $\xi = \mathrm{Im} \partial^\sharp f$. \item The state $(\omega, f)$ is a critical point of $\check{W}^\lambda: T \mathcal{H} (X, L) \to \mathbb{R}$. \end{itemize} \end{thm} When $\lambda \le 0$, we can reinterpret this variational result in terms of $\mu$-entropy. \begin{thm} \label{main theorem on mu-entropy} When $\lambda \le 0$, the $\mu$-entropy $\bm{\check{\mu}}^\lambda$ is smooth on $\mathcal{H} (X, L)$. In this case, the following are equivalent for a K\"ahler metric $\omega \in \mathcal{H} (X, L)$. \begin{enumerate} \item[(a)] $\omega$ is a $\mu^\lambda$-cscK metric. \item[(b)] $\omega$ is a minimizer of $\bm{\check{\mu}}^\lambda: \mathcal{H} (X, L) \to \mathbb{R}$. \item[(b')] $\omega$ is a critical point of $\bm{\check{\mu}}^\lambda: \mathcal{H} (X, L) \to \mathbb{R}$. \item[(b'')] There is $\xi$ such that $\bm{\check{\mu}}^\lambda (X, L; \xi) = \bm{\check{\mu}}^\lambda (\omega)$. \end{enumerate} \end{thm} We firstly give a proof for the implication (a) $\iff$ (b') $\iff$ (b'') $\Longleftarrow$ (b) in section \ref{mu-cscK metrics and Perelman's $W$-entropy}. The rest implication (a) $\Longrightarrow$ (b) is concluded in section \ref{W-functional along geodesic rays} as a consequence of the following. \begin{thm} For $\lambda \in \mathbb{R}$, we have \[ \sup_{(\mathcal{X}, \mathcal{L}), \tau \ge 0} \bm{\check{\mu}}^\lambda (\mathcal{X}, \mathcal{L}; \tau) \le \inf_{\omega_\varphi \in \mathcal{H} (X, L)} \bm{\check{\mu}}^\lambda (\omega_\varphi), \] where $(\mathcal{X}, \mathcal{L}; \tau)$ runs over all test configurations. \end{thm} This inequality is analogous to Donaldson's lower bound \cite{Don} (resp. \cite{He, DS}) for Calabi functional (resp. $H$-functional). These are indeed related by extremal limit $\lambda \to - \infty$ as explained in section \ref{Observations, Calabi}. We will reformulate this result in the second article as we explain later. The first two theorems are proved in section \ref{mu-cscK metrics and Perelman's $W$-entropy}. The last theorem is proved in section \ref{Slope formula}, based on the monotonicity established in section \ref{Monotonicity}. We employ Berman--Berndtsson's subharmonicity argument \cite{BB} there. \subsubsection{Conventions for K\"ahlerian tensor calculus} Here we fix our convention for K\"ahlerian tensor calculus used throughout this series. Let $X$ be a complex manifold. We put $d^c := \frac{\sqrt{-1}}{2} (\bar{\partial} - \partial)$ so that it is a real operator satisfying $dd^c = \sqrt{-1} \partial \bar{\partial}$. Usually, $d^c$ is divided by $\pi$ or $2\pi$ from ours, though, we prefer to use our convention as this difference affects to the geodesic equation and hence to our computation on equivariant integration: for $a \in \mathbb{R}$, a (smooth) path of K\"ahler metrics $\omega + d (a. d^c) \phi_t$ is a geodesic iff $\ddot{\phi}_t - a. |\bar{\partial} \dot{\phi}_t|^2 = 0$. When we consider $(1,1)$-forms in a cohomology class $[\alpha]$, we often identify a smooth function $\varphi$ and the associated $(1,1)$-form $\alpha_\varphi = \alpha + \sqrt{-1} \partial \bar{\partial} \varphi = \alpha + dd^c \varphi$. We use the notation ``$\varphi \in C^\infty (X, \alpha)$'' to clarify this situation: ``we are now identifying $\varphi \in C^\infty (X)$ with the $(1,1)$-form $\alpha_\varphi$''. As usual, for a K\"ahler form $\omega$, we denote by $\mathcal{H} (X, \omega)$ the set of smooth functions satisfying $\omega_\varphi > 0$. We often denote by $L$ a K\"ahler class and by $\mathcal{H} (X, L)$ the set of K\"ahler metrics in $L$, which is sometimes implicitly assumed to be integral to simplify arguments. Just in order not to make a confusion, we avoid the following simple convention familiar in pluri-potential context: denote by $(1,1)$-form $dd^c \varphi$ for $\varphi$ representing a collection of local potentials $\varphi = \{ \varphi_\alpha \}_\alpha$ or a metric $h = e^{-\varphi}$ on a line bundle. Instead, we always fix a reference $\alpha$ and use the notation $dd^c_\alpha \varphi := \alpha_\varphi = \alpha + dd^c \varphi$ for $\varphi \in C^\infty (X, \alpha)$. For the complex vector fields \[ \partial^\sharp f = g^{i \bar{\jmath}} f_{\bar{\jmath}} \frac{\partial}{\partial z^i} = \frac{1}{2} (\nabla f - \sqrt{-1} J\nabla f), \quad \bar{\partial}^\sharp f = g^{i \bar{\jmath}} f_i \frac{\partial}{\partial \bar{z}^{\jmath}} = \frac{1}{2} (\nabla f + \sqrt{-1} J\nabla f), \] we frequently use the following fact: for $f \in C^\infty_\mathbb{R} (X)$ and $u, v \in C^\infty_{\mathbb{C}} (X)$, we have \begin{align*} \int_X (\bar{\Box} - \bar{\partial}^\sharp f) u. \bar{v} ~e^f \omega^n &= \int_X (\bar{\partial} u, \bar{\partial} v) e^f \omega^n = \int_X u. (\Box - \partial^\sharp f) \bar{v} ~e^f \omega^n, \end{align*} where $(, )$ denotes the hermitian metric associated to the metric $\omega$. A \textit{real holomorphic vector field} is a real vector field $\xi$ such that $\xi^J := J\xi + \sqrt{-1} \xi$ is holomorphic ($\Leftrightarrow L_\xi J =0$). Let $\mathfrak{h} (X)$ denote the space of real holomorphic vector fields. We put \begin{align} \mathfrak{h}_0 (X) &:= \{ \xi \in \mathfrak{h} (X) ~|~ \exists \theta \in C^\infty_{\mathbb{C}} (X) \text{ s.t. } \xi = \mathrm{Im} \partial^\sharp \theta \}, \\ \mathfrak{h}_c (X, L) &:= \{ \xi \in \mathfrak{h}_0 (X) ~|~ \xi \in \mathfrak{t} \text{ for a closed torus } T \subset \mathrm{Aut} (X, L) \}, \\ {^\nabla \mathfrak{isom}} (X, \omega) &:= \{ \xi \in \mathfrak{h}_0 (X) ~|~ \exists \theta \in C^\infty_{\mathbb{R}} (X) \text{ s.t } \xi = \mathrm{Im} \partial^\sharp \theta \}. \end{align} The subspaces $\mathfrak{h}_0 (X), {^\nabla \mathfrak{isom}} (X, \omega) \subset \mathfrak{h}$ are linear. On the other hand, $\mathfrak{h}_c (X, L)$ is not linear but just the orbit of a linear subspace by the conjugate action. Since ${^\nabla \mathfrak{isom}} (X, \omega) \subset \mathfrak{isom} (X, \omega)$, we have ${^\nabla \mathfrak{isom}} (X, \omega) \subset \mathfrak{h}_c (X, L)$. In this series, we take a vector from $\mathfrak{h}_c (X, L)$ unless we specify the domain. \subsubsection*{Acknowledgement} I wish to thank Tomoyuki Hisamoto for his interest and frequent helpful discussions since I had been in Kyoto. I would like to express my deep gratitude to Abdellah Lahdili for his interest and stimulating discussions, which motivates me greatly and accelerates this study. I am also grateful to Ruadha\'i Dervan, Akito Futaki, Chi Li and Yuji Odaka for helpful comments and valuable questions. \newpage \section{Perelman's $\mu$-entropy and $\mu$-cscK metrics} \subsection{Perelman's $W$-entropy and $\mu$-entropy} \label{Perelman's W-functional and mu-entropy} We firstly study $\check{W}^\lambda$ restricted to the fibre direction $T_\omega \mathcal{H} \subset T \mathcal{H}$. \subsubsection{Variational formula on critical momentum} We put \[ s^\lambda_f (\omega) := (s (\omega)+ \bar{\Box} f) + (\bar{\Box} f - |\partial^\sharp f|^2) -\lambda f \] for $f \in C^\infty (X)$ and \[ \bar{s}^\lambda_f (\omega) \int_X (s (\omega) +|\bar{\partial}^\sharp f|^2 -\lambda f) e^f \omega^n \Big{/} \int_X e^f \omega^n \] for $f \in C^{0,1} (X)$. We note $\bar{s}^\lambda_f (\omega) = \int_X s^\lambda_f (\omega) e^f \omega^n / \int_X e^f \omega^n$ for $f \in C^\infty (X)$. The following is firstly observed in \cite{Per} and in \cite{TZ2} for Fano manifold. \begin{lem} \label{Variational formula} Fix a smooth K\"ahler metric $\omega$ on $X$. We have the following. \begin{enumerate} \item A momentum $f \in C^{0,1} (X)$ is a critical point of $\check{W}^\lambda (\omega, \cdot)$ iff it is smooth and satisfies $s^\lambda_f (\omega) = \bar{s}^\lambda_f (\omega)$. \item If $f \in C^\infty (X)$ is a critical point of $\check{W}^\lambda (\omega, \cdot)$, then we have \[ \frac{d^2}{dt^2}\Big{|}_{t=0} \check{W}^\lambda (\omega, f + t u) = -\int_X (2|\partial^\sharp u|^2 - \lambda u^2) e^f \omega^n \Big{/} \int_X e^f \omega^n \] for every $u \in C^\infty (X)$ with $\int_X u e^f \omega^n = 0$, which is negative if $\lambda$ is less than the first eigenvalue of $\Delta - \nabla f$, especially when $\lambda \le 0$. \end{enumerate} \end{lem} \begin{proof} We compute \begin{align*} \frac{d}{dt}\Big{|}_{t=0} \check{W}^\lambda (\omega, f + t u) &= - \int_X \Big{(} 2(\bar{\partial}^\sharp f, \bar{\partial}^\sharp u) + (s(\omega) + |\bar{\partial}^\sharp f|^2 -\lambda f ) u \Big{)} e^f \omega^n \Big{/} \int_X e^f \omega^n \\ &\qquad+ \bar{s}^\lambda_f \int_X u e^f \omega^n \Big{/} \int_X e^f \omega^n, \end{align*} which can be arranged as \[ - \int_X (s^\lambda_f (\omega) - \bar{s}^\lambda_f (\omega)) u ~e^f \omega^n \Big{/} \int_X e^f \omega^n \] when $f$ is smooth, using integration by parts. For a less regular critical point $f \in C^{0,1} (X)$, we have \[ (s (\omega) + \bar{\Box} f) + (\bar{\Box} f - |\partial^\sharp f|^2) - \lambda f = \bar{s}^\lambda_f \] in the distributional sense. The elliptic bootstrap argument shows that $f$ is indeed smooth and satisfies $s^\lambda_f (\omega) = \bar{s}^\lambda_f (\omega)$ in the usual sense. For the second claim, we firstly compute \begin{align*} \frac{d}{dt}\Big{|}_{t=0} \bar{s}^\lambda_{f + t u} &= \left( \int_X (\bar{\Box} u - \lambda u) e^ f \omega^n + \int_X (s (\omega) + \bar{\Box} f - \lambda f) u ~ e^f \omega^n \right) \Big{/} \int_X e^f \omega^n - \bar{s}^\lambda_f \int_X u e^f \omega^n \Big{/} \int_X e^f \omega^n \\ &= \int_X (s^\lambda_f - \bar{s}^\lambda_f) u ~ e^f \omega^n \Big{/} \int_X e^f \omega^n - \lambda \int_X u e^f \omega^n \Big{/} \int_X e^f \omega^n. \end{align*} We exhibit the second variation for general $u$ as an independent interest: \begin{align*} -\frac{d^2}{dt^2}\Big{|}_{t=0} &\check{W}^\lambda (\omega, f + t u) = \frac{d}{dt}\Big{|}_{t=0} \int_X (s^\lambda_{f + t u} - \bar{s}^\lambda_{f + t u}) u ~ e^{f + t u} \omega^n \Big{/} \int_X e^{f + t u} \omega^n \\ &= \left( \int_X (2 \bar{\Box} u - 2 \bar{\partial}^\sharp f (u) - \lambda u) u ~e^f \omega^n + \int_X s^\lambda_f u^2 ~e^f \omega^n \right) \Big{/} \int_X e^f \omega^n \\ &\qquad - \int_X s^\lambda_f u ~e^f \omega^n \Big{/} \int_X e^f \omega^n \cdot \int_X u~ e^f \omega^n \Big{/} \int_X e^f \omega^n \\ &\quad - \frac{d}{dt}\Big{|}_{t=0} \bar{s}^\lambda_{f + t u} \int_X u ~e^f \omega^n \Big{/} \int_X e^f \omega^n \\ &\qquad - \bar{s}^\lambda_f \int_X u^2 ~e^f \omega^n \Big{/} \int_X e^f \omega^n + \bar{s}^\lambda_f \Big{(} \int_X u ~e^f \omega^n \Big{/} \int_X e^f \omega^n \Big{)}^2 \\ &= \int_X \Big{(} 2 |\bar{\partial}^\sharp u|^2 + (s^\lambda_f - \bar{s}^\lambda_f - \lambda) (u - \bar{u}_f)^2 \Big{)} ~e^f \omega^n \Big{/} \int_X e^f \omega^n \end{align*} where we put $\bar{u}_f = \int_X u e^f \omega^n / \int_X e^f \omega^n$. This proves the claim by the first result. \end{proof} \subsubsection{Maximal momentum} The following is due to \cite{Rot}, which covers the case $\lambda > 0$ but also works for the case $\lambda \le 0$ with a minor change. Here we draw the proof to clarify the difference and to make our arguments self-contained. The case $\lambda \le 0$ is slightly simpler than the case $\lambda > 0$. \begin{thm} For each $\lambda \in \mathbb{R}$ and $\omega \in \mathcal{H} (X, L)$, there exists a smooth function $f \in C^\infty (X)$ which attains the maximum of the functional $\check{W}^\lambda (\omega, \cdot): C^{0, 1} (X) \to \mathbb{R}$. In fact, every maximizer is smooth. \end{thm} \begin{proof} We consider the following functional: \[ \mathcal{L}^\lambda (u) := - \int_X \big{(} s (\omega) u^2 + 2 |\nabla u|^2 - \lambda n u^2 - \lambda u^2 \log u^2 \big{)} \omega^n \Big{/} \int_X u^2 \omega^n - \lambda \log \Big{(} \frac{1}{n!} \int_X u^2 \omega^n \Big{)}. \] We have $\check{W}^\lambda (\omega, f) = \mathcal{L}^\lambda (e^{f/2})$ and $\mathcal{L}^\lambda (c u) = \mathcal{L}^\lambda (u)$ for $c \in \mathbb{R}^\times$. We can see as follows that $\mathcal{L}^\lambda$ gives a continuous function on $L^2_1 (X) \setminus 0$. For $u, v \neq 0$, we have a measurable function $\theta: X \to [0,1]$ such that \[ u^2 \log u^2 - v^2 \log v^2 = \Big{(} 2 u_\theta + 4 u_\theta \log u_\theta \Big{)} (|u| - |v|) \] for $u_\theta = \theta |u| + (1- \theta) |v|$ by the mean value theorem and the measurable selection theorem. Then we have \begin{align*} \left| \int_X u^2 \log u^2 \omega^n - \int_X v^2 \log v^2 \omega^n \right| &\le \int_X \left| \Big{(} 2 u_\theta + 4 u_\theta \log u_\theta \Big{)} (|u| - |v|) \right| \omega^n \\ &\le \int_X \Big{(} 2 u_\theta + 4 \max (e^{-1}, C u_\theta^{1+\delta}) \Big{)} \Big{|}|u| - |v|\Big{|} \omega^n \\ &\le C' \max (e^{-1}, \| u \|_{L^{2+ 2\delta}}^{1+\delta}, \| v \|_{L^{2+ 2\delta}}^{1+\delta} ) \Big{\|} |u| - |v| \Big{\|}_{L^2}, \end{align*} where we use the convexity of $x^{1+\delta}$ and H\"older's inequality in the last line. It follows by the Sobolev embedding $L^2_1 \hookrightarrow L^{2n/(n-1)} \subset L^{2+2 \delta}$ that the functional $\mathcal{L}^\lambda$ is continuous on the sphere $\{ u \in L^2_1 (X) ~|~ \| u \|_{L^2} = 1 \}$, hence is continuous on $L^2_1 (X) \setminus 0$ by the scaling invariance. To obtain the claim, we firstly see that there exists a non-negative maximizer $u$ of $\mathcal{L}^\lambda$ and then show that $u$ is a strictly positive smooth function, by which we obtain a smooth maximizer $f = 2 \log u$ of $\check{W}^\lambda$. Firstly, the functional $\mathcal{L}^\lambda$ is bounded from above. To see this, we may normalize $u$ as $\| u \|_{L^2} =1$. When $\lambda \le 0$, the boundedness follows by Jensen's inequality applied for $\varphi (x) = x \log x$: \[ \lambda \int_X u^2 \log u^2 \omega^n \le \lambda V \varphi (V^{-1}). \] When $\lambda > 0$, we can bound \[ - \int_X (|\nabla u|^2 - \lambda u^2 \log u^2) \omega^n \] from above as in \cite{Rot}. Secondly, we construct a non-negative maximizer $u$. A similar argument as above shows that we have a uniform constant $c$ such that \[ \int_X |\nabla u|^2 \omega^n \le -\mathcal{L}^\lambda (u) + c \] for every $u$ with $\| u \|_{L^2} = 1$. Take a sequence $u_i \in L^2_1$ so that $\mathcal{L}^\lambda (u_i) \to \sup \mathcal{L}^\lambda$ and $\| u_i \|_{L^2} =1$. Then $u_i$ is bounded in $L^2_1$-norm by the above inequality. By Sobolev embedding, we can take a subsequence $u_i$ converging weakly to $u$ in $L^2_1$ and strongly in $L^{2n/(n-1) - \epsilon}$. The weak convergence implies $\liminf \int_X |\nabla u_i |^2 \omega^n \ge \int_X |\nabla u |^2 \omega^n$, so we get \[ \sup \mathcal{L}^\lambda = \lim \mathcal{L}^\lambda (u_i) \le \mathcal{L}^\lambda (u). \] Thus the limit $u$ gives a maximizer of $\mathcal{L}^\lambda$. Since $|\nabla |u|| \le |\nabla u|$ in general, we obtain a non-negative maximizer. Since $\mathscr{L}^\lambda (t) = \mathcal{L}^\lambda (u+t \varphi)$ is $C^1$ for smooth $\varphi$, for the maximizer $u$, we have \begin{align*} 0 &= D_u \mathcal{L}^\lambda (\varphi) \\ &=- 2 \int_X \Big{(} (s (\omega) u - \lambda u \log u^2) \varphi + 2(\nabla u, \nabla \varphi) \Big{)} \omega^n \Big{/} \int_X u^2 \omega^n \\ &\qquad + 2 \bar{s}^\lambda_{(u)} \cdot \int_X u \varphi \omega^n \Big{/} \int_X u^2 \omega^n, \end{align*} where we put \[ \bar{s}^\lambda_{(u)} := \int_X \big{(} s (\omega) u^2 + 2 |\nabla u|^2 - \lambda u^2 \log u^2 \big{)} \omega^n \Big{/} \int_X u^2 \omega^n. \] This gives us the following distributional differential equation on $u$: \[ s (\omega) u - \lambda u \log u^2 + 2 \Delta u - \bar{s}^\lambda_{(u)} u = 0. \] Then as in \cite{Rot}, we can see the maximizer $u$ is indeed in $C^{2,1} (X)$ and satisfies the above differential equation in the ordinary sense. Now we assume $u$ takes zero at a point $p \in X$. We will show $u$ must be zero around $p$ and hence is zero on $X$ by the connectedness, which makes a contradiction to the fact $\| u \|_{L^2} =1$ and thus implies that the assumption was absurd. Once this is proved, we easily see the smoothness of $f = 2\log u$ by the elliptic bootstrap argument, so that we get the desired claim. Take a normal polar coordinate $(r, \theta) = (r, \theta_1, \ldots, \theta_{2n-1})$ at $p$. We put $A (r, \theta) := \omega^n/dr \wedge d\theta$, $S (r) = \int A (r, \theta) d\theta$ and $\sigma (r, \theta) := A (r, \theta) /S(r)$. We have $(\partial/\partial r) \log \sigma (r, \theta) \le C r$ as noted in \cite{Rot}. It suffices to see the function \[ F (r) := \int u (r, \theta) \sigma (r, \theta) d\theta \] is zero on $r \in (0, R)$ for sufficiently small $R$. To see this, we set up an induction on the decay rate: if we have $F (r) \le r^k$ on $(0, R)$, then we obtain $F (r) \le r^{k+1/2}$. We put \begin{align*} G (r) &:= \int \frac{\partial}{\partial r} u(r, \theta) \sigma (r, \theta) d\theta, \\ L (r) &:= \int u \log u^2 \sigma d\theta, \\ K (r) &:= \int (s (\omega) u - \bar{s}^\lambda_{(u)} u) \sigma d\theta. \end{align*} For a smooth function $\varphi (r, \theta) = \varphi (r)$ which is compactly supported on the geodesic ball $B (p, R)$ and depends only on the variable $r$, we have \[ D_u \mathcal{L}^\lambda (\varphi) = \int_0^R \Big{(} 2 G (r) S (r) \frac{d}{dr} \varphi (r) - \lambda L (r) S (r) \varphi (r) + K (r) S (r) \varphi (r) \Big{)} dr = 0 \] by $\int (\nabla u, \nabla \varphi) \omega^n = \int_0^R G (r) S (r) (d/dr) \varphi (r) dr$. Running all possible $\varphi$, we get \[ 2\frac{d}{dr} (G (r) S (r)) = K (r) S (r) - \lambda L (r) S (r) \] for $r \in (0, R)$. Since $u (p) = 0$ and $u$ is non-negative and continuous, we may assume $u \log u^2 \le 0$ on a small ball $B (p, R)$. So when $\lambda \le 0$, we obtain \[ 2\frac{d}{dr} (G (r) S (r)) \le K (r) S (r) \le C' F (r) S (r). \] Therefore, we get \begin{align*} F (r) &= \int_0^r F' (s) ds = \int_0^r G (s) ds + \int_0^r ds \int u (s, \theta) \Big{(} \frac{\partial}{\partial s} \log \sigma (s, \theta) \Big{)} \sigma (s, \theta) d\theta \\ &\le C' \int_0^r ds S (s)^{-1} \int_0^s F (t) S (t) dt + C \int_0^r s F (s) ds \\ &\le C'' \left( \int_0^r \frac{ds}{s^{n-1}} \int_0^s F (t) t^{n-1} dt + \int_0^r s F (s) ds \right), \end{align*} which provides us a machinery for running the induction as desired. We refer \cite{Rot} for the rest detail. \end{proof} \newpage \subsection{$\mu$-cscK metrics and Perelman's $W$-entropy} \label{mu-cscK metrics and Perelman's $W$-entropy} In this subsection we show that the critical points of $\check{W}^\lambda$ precisely correspond to $\mu^\lambda$-cscK metrics as stated in the first main theorem. \subsubsection{Variational formula on state} \begin{thm} \label{Critical points of W} Let $f$ be a critical point of the functional $\check{W}^\lambda (\omega, \cdot)$. For a perturbation $\omega_t = \omega + \sqrt{-1} \partial \bar{\partial} \phi_t$ with $\dot{\phi}_0 = \varphi$, we have \[ \frac{d}{dt}\Big{|}_{t=0} \check{W}^\lambda (\omega_t, f) = \int_X \mathrm{Re} (\mathcal{D} \varphi, \mathcal{D} f) e^f \omega^n \Big{/} \int_X e^f \omega^n. \] \end{thm} We prepare some convenient formulas. \begin{lem} We have \begin{align*} (\sqrt{-1} \partial \bar{\partial} f, \sqrt{-1} \partial \bar{\partial} \varphi) &= - \bar{\Box} (\bar{\partial}^\sharp f (\varphi)) + \bar{\partial}^\sharp f (\bar{\Box} \varphi) - g^{k \bar{l}} ((g^{i \bar{\jmath}} f_i)_k \varphi_{\bar{\jmath}})_{\bar{l}}, \\ (\sqrt{-1} \partial f \wedge \bar{\partial} f, \sqrt{-1} \partial \bar{\partial} \varphi) &= \bar{\partial}^\sharp f (\partial^\sharp f (\varphi)) - g^{i \bar{\jmath}} f_i (g^{k \bar{l}} f_{\bar{l}})_{\bar{\jmath}} \varphi_k \end{align*} and \begin{align*} \mathcal{D}^{f *} \mathcal{D} \varphi &= (\bar{\Box} - \bar{\partial}^\sharp f)^2 \varphi + (\mathrm{Ric} (\omega) - \sqrt{-1} \partial \bar{\partial} f, \sqrt{-1} \partial \bar{\partial} \varphi) + (\bar{\partial}^\sharp s^0_f (\omega), \nabla \varphi) \\ &\qquad + \Big{(} g^{i \bar{\jmath}} (g^{k \bar{l}} f_k)_{i \bar{l}} \varphi_{\bar{\jmath}} + g^{i \bar{\jmath}} g^{k \bar{q}} g_{k \bar{l}, \bar{q}} (g^{p \bar{l}} f_p)_i \varphi_{\bar{\jmath}} + g^{i \bar{\jmath}} (g^{k \bar{l}} f_k)_i f_{\bar{l}} \varphi_{\bar{\jmath}} \Big{)}. \end{align*} \end{lem} \begin{proof} Using $g^{k \bar{l}} {g^{i \bar{\jmath}}}_{, \bar{l}} = {g^{k \bar{\jmath}}}_{, \bar{l}} g^{i \bar{l}}$, we compute \begin{align*} (\sqrt{-1} \partial \bar{\partial} f, \sqrt{-1} \partial \bar{\partial} \varphi) &= g^{i \bar{\jmath}} g^{k \bar{l}} f_{i \bar{l}} \varphi_{k \bar{\jmath}} \\ &= g^{k \bar{l}} (g^{i \bar{\jmath}} f_i \varphi_{k \bar{\jmath}})_{\bar{l}} - g^{k \bar{l}} f_i (g^{i \bar{\jmath}} \varphi_{k \bar{\jmath}})_{\bar{l}} \\ &= g^{k \bar{l}} (g^{i \bar{\jmath}} f_i \varphi_{\bar{\jmath}})_{k \bar{l}} - g^{k \bar{l}} ((g^{i \bar{\jmath}} f_i)_k \varphi_{\bar{\jmath}})_{\bar{l}} - g^{k \bar{l}} f_i (g^{i \bar{\jmath}} \varphi_{k \bar{\jmath}})_{\bar{l}} \\ &= - \bar{\Box} (\bar{\partial}^\sharp f (\varphi)) - g^{k \bar{l}} ((g^{i \bar{\jmath}} f_i)_k \varphi_{\bar{\jmath}})_{\bar{l}} - g^{k \bar{l}} f_i ({g^{i \bar{\jmath}}}_{, \bar{l}} \varphi_{k \bar{\jmath}} + g^{i \bar{\jmath}} \varphi_{k \bar{\jmath} \bar{l}}) \\ &= - \bar{\Box} (\bar{\partial}^\sharp f (\varphi)) - g^{k \bar{l}} ((g^{i \bar{\jmath}} f_i)_k \varphi_{\bar{\jmath}})_{\bar{l}} - g^{k \bar{l}} f_i ({g^{i \bar{\jmath}}}_{, \bar{l}} \varphi_{k \bar{\jmath}} + (g^{i \bar{\jmath}} \varphi_{k \bar{l}})_{\bar{\jmath}} - {g^{i \bar{\jmath}}}_{, \bar{\jmath}} \varphi_{k \bar{l}}) \\ &= - \bar{\Box} (\bar{\partial}^\sharp f (\varphi)) - g^{k \bar{l}} ((g^{i \bar{\jmath}} f_i)_k \varphi_{\bar{\jmath}})_{\bar{l}} - g^{k \bar{l}} f_i ({g^{i \bar{\jmath}}}_{, \bar{l}} \varphi_{k \bar{\jmath}} + (g^{i \bar{\jmath}} \varphi_{k \bar{l}})_{\bar{\jmath}}) \\ &\qquad+ f_i (g^{k \bar{l}} g^{i \bar{\jmath}} \varphi_{k \bar{l}})_{\bar{\jmath}} - f_i g^{i \bar{\jmath}} (g^{k \bar{l}} \varphi_{k \bar{l}})_{\bar{\jmath}} \\ &= - \bar{\Box} (\bar{\partial}^\sharp f (\varphi)) + \bar{\partial}^\sharp f (\bar{\Box} \varphi) - g^{k \bar{l}} ((g^{i \bar{\jmath}} f_i)_k \varphi_{\bar{\jmath}})_{\bar{l}} - g^{k \bar{l}} f_i {g^{i \bar{\jmath}}}_{, \bar{l}} \varphi_{k \bar{\jmath}} \\ &\qquad - g^{k \bar{l}} f_i (g^{i \bar{\jmath}} \varphi_{k \bar{l}})_{\bar{\jmath}} + f_i (g^{k \bar{l}} g^{i \bar{\jmath}} \varphi_{k \bar{l}})_{\bar{\jmath}} \\ &= - \bar{\Box} (\bar{\partial}^\sharp f (\varphi)) + \bar{\partial}^\sharp f (\bar{\Box} \varphi) - g^{k \bar{l}} ((g^{i \bar{\jmath}} f_i)_k \varphi_{\bar{\jmath}})_{\bar{l}} \\ &\qquad - f_i {g^{k \bar{\jmath}}}_{, \bar{l}} g^{i \bar{l}} \varphi_{k \bar{\jmath}} + f_i {g^{k \bar{l}}}_{, \bar{\jmath}} g^{i \bar{\jmath}} \varphi_{k \bar{l}} \\ &= - \bar{\Box} (\bar{\partial}^\sharp f (\varphi)) + \bar{\partial}^\sharp f (\bar{\Box} \varphi) - g^{k \bar{l}} ((g^{i \bar{\jmath}} f_i)_k \varphi_{\bar{\jmath}})_{\bar{l}}. \end{align*} The second equality is simple: \begin{align*} (\sqrt{-1} \partial f \wedge \bar{\partial} f, \sqrt{-1} \partial \bar{\partial} \varphi) &= g^{i \bar{\jmath}} g^{k \bar{l}} f_i f_{\bar{l}} \varphi_{k \bar{\jmath}} \\ &= g^{i \bar{\jmath}} f_i (g^{k \bar{l}} f_{\bar{l}} \varphi_k )_{\bar{\jmath}} - g^{i \bar{\jmath}} f_i (g^{k \bar{l}} f_{\bar{l}})_{\bar{\jmath}} \varphi_k \\ &= \bar{\partial}^\sharp f (\partial^\sharp f (\varphi)) - g^{i \bar{\jmath}} f_i (g^{k \bar{l}} f_{\bar{l}})_{\bar{\jmath}} \varphi_k. \end{align*} As for the last one, we refer the computation in Proposition 3.4 in \cite{Ino2} for the detail. In that proof, we used the assumption that $\partial^\sharp f$ is holomorphic only to eliminate the last term \[ \Big{(} g^{i \bar{\jmath}} (g^{k \bar{l}} f_k)_{i \bar{l}} \varphi_{\bar{\jmath}} + g^{i \bar{\jmath}} g^{k \bar{q}} g_{k \bar{l}, \bar{q}} (g^{p \bar{l}} f_p)_i \varphi_{\bar{\jmath}} + g^{i \bar{\jmath}} (g^{k \bar{l}} f_k)_i f_{\bar{l}} \varphi_{\bar{\jmath}} \Big{)}. \] \end{proof} We put \begin{align} A_f (\varphi) &:= g^{k \bar{l}} ((g^{i \bar{\jmath}} f_i)_k \varphi_{\bar{\jmath}})_{\bar{l}} + g^{i \bar{\jmath}} f_i (g^{k \bar{l}} f_{\bar{l}})_{\bar{\jmath}} \varphi_k, \\ B_f (\varphi) &:= g^{i \bar{\jmath}} (g^{k \bar{l}} f_k)_{i \bar{l}} \varphi_{\bar{\jmath}} + g^{i \bar{\jmath}} g^{k \bar{q}} g_{k \bar{l}, \bar{q}} (g^{p \bar{l}} f_p)_i \varphi_{\bar{\jmath}} + g^{i \bar{\jmath}} (g^{k \bar{l}} f_k)_i f_{\bar{l}} \varphi_{\bar{\jmath}}, \end{align} which are globally defined complex valued functions by the above lemma. \begin{prop} We have \begin{equation} \int_X \mathrm{Re} A_f (\varphi) e^f \omega^n = 0 \end{equation} and \begin{equation} \mathrm{Re} (A_f (\varphi) - B_f (\varphi)) = \mathrm{Re} (\mathcal{D} \varphi, \mathcal{D} f). \end{equation} \end{prop} \begin{proof} Put $\omega_t := \omega + t \sqrt{-1} \partial \bar{\partial} \varphi$. To see the first claim, we compute the derivative of $\int_X (\bar{\Box}_t f - |\bar{\partial}_t^\sharp f|^2) e^f \omega_t^n = 0$. Using the above lemma, we compute \begin{align*} 0 &= \frac{d}{dt}\Big{|}_{t=0} \int_X (\bar{\Box}_t f - |\bar{\partial}_t^\sharp f|^2) e^f \omega_t^n \\ &= \int_X \Big{(} (\sqrt{-1} \partial \bar{\partial} \varphi ,\sqrt{-1} \partial \bar{\partial} f) + (\sqrt{-1} \partial \bar{\partial} \varphi, \sqrt{-1} \partial f \wedge \bar{\partial} f) - (\bar{\Box} f - |\bar{\partial}^\sharp f|^2) \bar{\Box} \varphi \Big{)} e^f \omega^n \\ &= \int_X \Big{(} (\sqrt{-1} \partial \bar{\partial} \varphi ,\sqrt{-1} \partial \bar{\partial} f) + (\sqrt{-1} \partial \bar{\partial} \varphi, \sqrt{-1} \partial f \wedge \bar{\partial} f) - \bar{\partial}^\sharp f (\bar{\Box} \varphi) \Big{)} e^f \omega^n \\ &= \int_X \Big{(} (\sqrt{-1} \partial \bar{\partial} \varphi ,\sqrt{-1} \partial \bar{\partial} f) + (\sqrt{-1} \partial \bar{\partial} \varphi, \sqrt{-1} \partial f \wedge \bar{\partial} f) - \bar{\Box}^2 \varphi \Big{)} e^f \omega^n \\ &= - \int_X A_f (\varphi) e^f \omega^n - \int_X (\bar{\Box} - \bar{\partial}^\sharp f) (\bar{\Box} + \bar{\partial}^\sharp f) (\varphi) e^f \omega^n + \int_X \bar{\partial}^\sharp f ((\partial^\sharp f -\bar{\partial}^\sharp f) (\varphi)) e^f \omega^n, \\ &= - \int_X A_f (\varphi) e^f \omega^n - \int_X (\bar{\Box} - \bar{\partial}^\sharp f) (\bar{\Box} + \bar{\partial}^\sharp f) (\varphi) e^f \omega^n + \int_X \bar{\partial}^\sharp f ((\partial^\sharp f -\bar{\partial}^\sharp f) (\varphi)) e^f \omega^n \\ &= - \int_X \mathrm{Re} A_f (\varphi) e^f \omega^n - \sqrt{-1} \Big{(} \int_X \mathrm{Im} A_f (\varphi) e^f \omega^n + \int_X \bar{\Box} (J \nabla f (\varphi)) e^f \omega^n \Big{)}, \end{align*} which proves the first claim. As for the second claim, we compare them at $p \in X$ on a normal coordinate. We have \begin{align*} \mathrm{Re} (A_f (\varphi) &- B_f (\varphi) ) (p) \\ &=\mathrm{Re} \Big{[} \Big{(} \sum_{i, k, \bar{\jmath}} {g^{i \bar{\jmath}}}_{, k \bar{k}} f_i \varphi_{\bar{\jmath}} + \sum_{i, k} f_{i k \bar{k}} \varphi_{\bar{\imath}} + \sum_{i, k} f_{ik} \varphi_{\bar{\imath} \bar{k}} + \sum_{i, k} f_i f_{\bar{i} \bar{k}} \varphi_k \Big{)} \\ &\qquad - \Big{(} \sum_{i, k, l} {g^{k \bar{l}}}_{, i \bar{l}} f_k \varphi_{\bar{\imath}} + \sum_{i, k} f_{k i \bar{k}} \varphi_{\bar{\imath}} + \sum_{i, k} f_{k i} f_{\bar{k}} \varphi_{\bar{\imath}} \Big{)} \Big{]}. \end{align*} This reduces to $\mathrm{Re} \sum_{i, k} f_{ik} \varphi_{\bar{i} \bar{k}} = \mathrm{Re} (\mathcal{D} \varphi, \mathcal{D} f) (p)$ by \[ \sum_{i,k,l} {g^{k \bar{l}}}_{, i \bar{l}} f_k \varphi_{\bar{\imath}} = - \sum_{i, k, l, p, q} g^{k \bar{q}} g^{p \bar{l}} g_{p \bar{q}, i \bar{l}} f_k \varphi_{\bar{\imath}} = \sum_{i, k, l} R_{l \bar{k} i \bar{l}} f_k \varphi_{\bar{\imath}} = \sum_{i, k, l} R_{i \bar{k} l \bar{l}} f_k \varphi_{\bar{\imath}} = \sum_{i, k, l} {g^{k \bar{\imath}}}_{, l \bar{l}} f_k \varphi_{\bar{\imath}} \] and \[ \mathrm{Re} (\sum_{i, k} f_i f_{\bar{i} \bar{k}} \varphi_k - \sum_{i, k} f_{k i} f_{\bar{k}} \varphi_{\bar{\imath}}) = 0. \] \end{proof} Now we show Theorem \ref{Critical points of W}. \begin{proof}[Proof of Theorem \ref{Critical points of W}] We exhibit the first variation for general $f$ as an independent interest. Using the above proposition, we compute \begin{align*} \frac{d}{dt}\Big{|}_{t=0} \check{W}^\lambda (\omega_t, f) &= \frac{-1}{\int_X e^f \omega^n} \int_X \Big{[} - \mathcal{D}^* \mathcal{D} \varphi + ({\bar{\partial}}^\sharp s, \nabla \varphi) + (\sqrt{-1} \partial \bar{\partial} \varphi, \sqrt{-1} \partial \bar{\partial} f) \\ &\qquad \qquad \qquad - \Big{(} (s (\omega) + \bar{\Box} f -\lambda (n+f)) - (\bar{s}^\lambda_f (\omega) -\lambda n - \lambda) \Big{)} \bar{\Box} \varphi \Big{]} ~e^f \omega^n \\ &= \frac{-1}{\int_X e^f \omega^n} \int_X \Big{[} - \mathcal{D}^* \mathcal{D} \varphi + ({\bar{\partial}}^\sharp s, \nabla \varphi) + (\sqrt{-1} \partial \bar{\partial} \varphi, \sqrt{-1} \partial \bar{\partial} f) \\ &\qquad \qquad \qquad + (\bar{\Box} f - |\partial^\sharp f|^2 - \lambda) \bar{\Box} \varphi - (s^\lambda_f - \bar{s}^\lambda_f) \bar{\Box} \varphi \Big{]} ~e^f \omega^n \\ &= \frac{-1}{\int_X e^f \omega^n} \int_X \Big{[} - \bar{\Box}^2 \varphi - (\mathrm{Ric} (\omega) - \sqrt{-1} \partial \bar{\partial} f, \sqrt{-1} \partial \bar{\partial} \varphi) \\ &\qquad \qquad \qquad + (\bar{\Box} f - |\partial^\sharp f|^2) \bar{\Box} \varphi - \lambda (\bar{\partial}^\sharp f, \nabla \varphi) - (s^\lambda_f - \bar{s}^\lambda_f) \bar{\Box} \varphi \Big{]} ~e^f \omega^n \\ &= \frac{-1}{\int_X e^f \omega^n} \int_X \Big{[} - (\bar{\Box} - \bar{\partial}^\sharp f)^2 \varphi - (\mathrm{Ric} (\omega) - \sqrt{-1} \partial \bar{\partial} f, \sqrt{-1} \partial \bar{\partial} \varphi) - (\bar{\partial}^\sharp s^0_f, \nabla \varphi) \\ &\qquad \qquad \qquad - \Big{(} \bar{\Box} (\bar{\partial}^\sharp f (\varphi)) + \bar{\partial}^\sharp f (\bar{\Box} \varphi) - \bar{\partial}^\sharp f (\bar{\partial}^\sharp f (\varphi)) \Big{)} + \bar{\partial}^\sharp f (\bar{\Box} \varphi) \\ &\qquad \qquad \qquad \quad + (\bar{\partial}^\sharp s^\lambda_f, \nabla \varphi) - (s^\lambda_f - \bar{s}^\lambda_f) \bar{\Box} \varphi \Big{]} ~e^f \omega^n \\ &= \frac{-1}{\int_X e^f \omega^n} \int_X \Big{[} -\mathcal{D}^{f *} \mathcal{D} \varphi + B_f (\varphi) - (\bar{\Box} - \bar{\partial}^\sharp f) (\bar{\partial}^\sharp f (\varphi)) - (s^\lambda_f - \bar{s}^\lambda_f) (\bar{\partial}^\sharp f, \nabla \varphi) \Big{]} ~e^f \omega^n \\ &= \frac{-1}{\int_X e^f \omega^n} \int_X B_f (\varphi) ~e^f \omega^n + \frac{1}{\int_X e^f \omega^n} \int_X (s^\lambda_f - \bar{s}^\lambda_f) (\bar{\partial}^\sharp f, \nabla \varphi) ~e^f \omega^n \\ &= \frac{-1}{\int_X e^f \omega^n} \mathrm{Re} \int_X B_f (\varphi) ~e^f \omega^n + \frac{1}{2 \int_X e^f \omega^n} \int_X (s^\lambda_f - \bar{s}^\lambda_f) (\nabla f, \nabla \varphi) ~e^f \omega^n \\ &= \frac{1}{\int_X e^f \omega^n} \int_X \mathrm{Re} (A_f (\varphi) - B_f (\varphi)) ~e^f \omega^n + \frac{1}{2 \int_X e^f \omega^n} \int_X (s^\lambda_f - \bar{s}^\lambda_f) (\nabla f, \nabla \varphi) ~e^f \omega^n \\ &= \frac{1}{\int_X e^f \omega^n} \int_X \mathrm{Re} (\mathcal{D} \varphi, \mathcal{D} f) ~e^f \omega^n + \frac{1}{2 \int_X e^f \omega^n} \int_X (s^\lambda_f - \bar{s}^\lambda_f) (\nabla f, \nabla \varphi) ~e^f \omega^n. \end{align*} \end{proof} \begin{cor} Critical points of $\check{W}^\lambda: T \mathcal{H} \to \mathbb{R}$ correspond to $\mu^\lambda$-cscK metrics. Namely, a state $(\omega, f) \in T \mathcal{H} (X, L) = \mathcal{H} (X, L) \times C^\infty (X)/\mathbb{R}$ is a critical point of $\check{W}^\lambda$ if and only if $\xi' = \partial^\sharp f$ gives a holomorphic vector field and the K\"ahler metric $\omega$ is a $\mu^\lambda$-cscK metric with respect to $\xi = \mathrm{Im} \xi'$. \end{cor} \begin{proof} We apply the above theorem for $\phi_t = t f$. It follows that a state $(\omega, f)$ is a critical point of $\check{W}^\lambda$ if and only if $f$ satisfies $s^\lambda_f (\omega) - \bar{s}^\lambda_f (\omega) = 0$ and $\mathcal{D} f = 0$, which is nothing but the condition in the claim. \end{proof} \subsubsection{$\mu$-entropy in high temperature} When $\lambda \le 0$, the critical momentum $f$ turns out to be a unique maximizer of the functional $\check{W}^\lambda (\omega, \cdot): C^{0,1} (X) \to \mathbb{R}$, which allows us to reduce all the information of the critical points of $\check{W}^\lambda$ on $T \mathcal{H}$ to that of a functional $\bm{\check{\mu}}^\lambda$ on $\mathcal{H} (X, L)$, analogous to the Calabi functional. \begin{thm} \label{Uniqueness of moment} Suppose $\lambda \le 0$, then we have the following. \begin{enumerate} \item The functional $\check{W}^\lambda (\omega, \cdot): C^\infty (X) \to \mathbb{R}$ admits a unique critical point/maximizer $f$ modulo constant for every K\"ahler metric $\omega$, which automatically maximizes $\check{W}^\lambda (\omega, \cdot)$. \item The functional $\bm{\check{\mu}}^\lambda: \mathcal{H} (X, L) \to \mathbb{R}$ is smooth. In this case, the following are equivalent for a K\"ahler metric $\omega$: \begin{itemize} \item $\omega$ is a $\mu^\lambda$-cscK metric \item $\omega$ is a critical point of $\bm{\check{\mu}}^\lambda$ \item There is $\xi \in {^\nabla \mathfrak{isom}} (X, \omega)$ such that $\bm{\check{\mu}}_{\mathrm{NA}}^\lambda (X, L; \xi) = \bm{\check{\mu}}^\lambda (\omega)$. \end{itemize} In one of the above cases, $\omega$ minimizes $\bm{\check{\mu}}^\lambda$ among all $\xi$-invariant metrics. \end{enumerate} \end{thm} \begin{proof} Suppose there are two critical points $f, g \in C^\infty (X)$ of $W^\lambda (\omega, \cdot)$. We may normalize $f, g$ so that $\int_X (f-g) e^{(f+g)/2} \omega^n = 0$. We have \[ \Delta f - \frac{1}{2} |\nabla f|^2 - \lambda f = \bar{s}^\lambda_f -s (\omega) = \Delta g - \frac{1}{2} |\nabla g|^2 - \lambda g + (\bar{s}^\lambda_f - \bar{s}^\lambda_g). \] Putting $u= f- g$ and $h = (f+g)/2$, we can arrange this as \[ \Delta u - (\nabla h, \nabla u) = \lambda u + (\bar{s}^\lambda_f - \bar{s}^\lambda_g). \] Then we get \[ \int_X |\nabla u|^2 e^h \omega^n = \int_X (\Delta u - (\nabla h, \nabla u)) u~ e^h \omega^n = \lambda \int_X u^2 e^h \omega^n, \] which implies $u = f - g$ is zero when $\lambda \le 0$ under the normalization condition. Next we show the second claim. If $(\omega, f)$ is a critical point of $\check{W}^\lambda$, then for any smooth perturbation $\omega_t$, we have a smooth family $f_t$ satisfying $s^\lambda_{f_t} (\omega_t) = \bar{s}^\lambda_{f_t} (\omega_t)$. To see this, we compute the derivative \begin{align*} D_0 \mathcal{S}^\lambda (0, u) &=\Delta u - (\nabla f, \nabla u) - \lambda u \end{align*} of the smooth map \[ \mathcal{S}^\lambda: C^{k+4, \alpha}_f (X) \times C^{k+2, \alpha}_f (X) \to C^{k, \alpha}_f (X): (\phi, u) \mapsto s^\lambda_{f+u} (\omega_\phi) - \bar{s}^\lambda_{f+u} (\omega_\phi). \] Here we put $C^{k, \alpha}_f (X) = \{ u \in C^{k, \alpha} (X) ~|~ \int_X u e^f \omega^n = 0 \}$. The above expression shows that $D_0 \mathcal{S}^\lambda|_{\{ 0 \} \times C^{k+2, \alpha}_f (X)}: C^{k+2, \alpha}_f (X) \to C^{k, \alpha}_f (X)$ has a right inverse when $\lambda \le 0$, so that $\mathrm{Ker} D_0 \mathcal{S}^\lambda$ maps onto $C^{k+4, \alpha}_f (X)$ by the projection to the first factor. Thus by the implicit function theorem, we get the desired smooth family $f_t$. By the uniqueness of the critical momentum, we have $\bm{\check{\mu}}^\lambda (\omega_t) = \check{W}^\lambda (\omega_t, f_t)$ for this $f_t$, which shows the smoothness of $\bm{\check{\mu}}^\lambda$. As for the equivalence, if $\omega$ is a critical point of $\bm{\check{\mu}}^\lambda$, then $(\omega, f)$ gives a critical point of $\check{W}^\lambda$ for the unique critical momentum $f$ as \[ \frac{d}{dt}\Big{|}_{t=0} \check{W}^\lambda (\omega_t, f) = \frac{d}{dt}\Big{|}_{t=0} \check{W}^\lambda (\omega_t, f_t) = \frac{d}{dt}\Big{|}_{t=0} \bm{\check{\mu}}^\lambda (\omega_t) = 0, \] so that it is a $\mu^\lambda_\xi$-cscK metric for the real holomorphic vector field $\xi = \mathrm{Im} \partial^\sharp f$ thanks to the above corollary. Conversely, if $\omega$ is a $\mu^\lambda$-cscK metric, then it is a critical point of $\check{W}^\lambda$, so that it is a critical point of $\bm{\check{\mu}}^\lambda$. By the uniqueness of the critical momentum, we have $f = \mu_\xi$ for a $\check{\mu}^\lambda_\xi$-cscK metric $\omega$, so that we have $\bm{\check{\mu}}_{\mathrm{NA}}^\lambda (X, L; \xi) = \bm{\check{\mu}}^\lambda (\omega)$. Conversely, if $\bm{\check{\mu}}_{\mathrm{NA}}^\lambda (X, L; \xi) = \bm{\check{\mu}}^\lambda (\omega)$ for $\xi \in {^\nabla \mathfrak{isom}} (X, \omega)$, then since $\bm{\check{\mu}}_{\mathrm{NA}}^\lambda (X, L; \xi) = \check{W}^\lambda (\omega, \mu_\xi)$, we have $\bm{\check{\mu}}^\lambda (\omega) = \check{W}^\lambda (\omega, \mu_\xi)$. Thus $\mu_\xi$ is a critical momentum, so that $\omega$ is a $\check{\mu}^\lambda_\xi$-cscK metric. As for the last claim, we have \[ \bm{\check{\mu}}^\lambda (\omega) = \check{W}^\lambda (\omega, \mu_{-2\xi}) = \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi_{-2\xi}) = \check{W}^\lambda (\omega_\varphi, \mu_{-2\xi}^\varphi) \le \bm{\check{\mu}}^\lambda (\omega_\varphi) \] for $\xi$-invariant metric $\omega_\varphi$, so that $\omega$ minimizes $\bm{\check{\mu}}^\lambda$ among all $\xi$-invariant metrics. \end{proof} We will refine the last claim in the next section as we claimed in Theorem \ref{main theorem on mu-entropy}. \newpage \section{$W$-entropy along geodesic rays} \label{W-functional along geodesic rays} \subsection{$W$-entropy is monotonic along geodesics} \label{Monotonicity} \subsubsection{First observation: monotonicity along smooth geodesic} \begin{prop} Let $\{ \phi_t \}$ be a smooth geodesic. Then we have \begin{equation} \frac{d}{dt} \check{W}^\lambda (\omega_{\phi_t}, -\dot{\phi}_t) = - \int_X |\mathcal{D}_{\phi_t} \dot{\phi}_t|^2 e^{- \dot{\phi}_t} \omega_{\phi_t}^n \Big{/} \int_X e^{-\dot{\phi}_t} \omega_{\phi_t}^n \le 0. \end{equation} \end{prop} \begin{proof} Put $\theta_t = -\dot{\phi}_t$. We have $\dot{\theta}_t = -|\bar{\partial}^\sharp_{\phi_t} \dot{\phi}_t|^2 = (\bar{\partial}^\sharp_{\phi_t} \theta_t, \nabla \dot{\phi}_t)$ as $\{ \phi_t \}$ is a geodesic. We easily see \[ \frac{d}{dt} \int_X e^{\theta_t} \omega_{\phi_t}^n = 0, \quad \frac{d}{dt} \int_X \theta_t e^{\theta_t} \omega_{\phi_t}^n = 0. \] We compute \begin{align*} \frac{d}{dt} \int_X & (s (\omega_{\phi_t}) + \bar{\Box}_{\phi_t} \theta_t) e^{\theta_t} \omega_{\phi_t}^n \\ &= \int_X \Big{(} - \mathcal{D}_{\phi_t}^* \mathcal{D}_{\phi_t} \dot{\phi}_t + (\bar{\partial}^\sharp s (\omega_{\phi_t}), \nabla \dot{\phi}_t) + (\sqrt{-1} \partial \bar{\partial} \dot{\phi}_t, \sqrt{-1} \partial \bar{\partial} \theta_t) + \bar{\Box}_{\phi_t} \dot{\theta}_t \Big{)} e^{\theta_t} \omega_{\phi_t}^n \\ &\qquad- \int_X (s (\omega_{\phi_t}) + \bar{\Box}_{\phi_t} \theta_t) (\bar{\Box} \dot{\phi}_t - \dot{\theta}_t) e^{\theta_t} \omega_{\phi_t}^n \\ &= \int_X \Big{(} - \bar{\Box}^2 \dot{\phi}_t - (\mathrm{Ric} (\omega_{\phi_t}), \sqrt{-1} \partial \bar{\partial} \dot{\phi}_t) + (\sqrt{-1} \partial \bar{\partial} \dot{\phi}_t, \sqrt{-1} \partial \bar{\partial} \theta_t) + \bar{\Box}_{\phi_t} (\bar{\partial}^\sharp \theta_t (\dot{\phi}_t)) \Big{)} e^{\theta_t} \omega_{\phi_t}^n \\ &\qquad+ \int_X \bar{\Box}_{\phi_t} (\dot{\theta}_t - \bar{\partial}^\sharp \theta_t (\dot{\phi}_t)) e^{\theta_t} \omega_{\phi_t}^n \\ &\qquad- \int_X (s (\omega_{\phi_t}) + \bar{\Box}_{\phi_t} \theta_t) (\bar{\Box} - \bar{\partial}^\sharp_{\phi_t} \theta_t) (\dot{\phi}_t) e^{\theta_t} \omega_{\phi_t}^n \\ &\qquad \quad + \int_X (s (\omega_{\phi_t}) + \bar{\Box}_{\phi_t} \theta_t) (\dot{\theta}_t - \bar{\partial}^\sharp \theta_t (\dot{\phi}_t)) e^{\theta_t} \omega_{\phi_t}^n \\ &= - \int_X \Big{(} \bar{\Box}_{\phi_t} (\bar{\Box}_{\phi_t} - \bar{\partial}^\sharp \theta_t) \dot{\phi}_t + (\mathrm{Ric} (\omega_{\phi_t}) - \sqrt{-1} \partial \bar{\partial} \theta_t, \sqrt{-1} \partial \bar{\partial} \dot{\phi}_t) \Big{)} e^{\theta_t} \omega_{\phi_t}^n \\ &\qquad- \int_X (\bar{\partial}^\sharp (s (\omega_{\phi_t}) + \bar{\Box}_{\phi_t} \theta_t), \nabla \dot{\phi}_t) e^{\theta_t} \omega_{\phi_t}^n \\ &= - \int_X \Big{(} (\bar{\Box}_{\phi_t} - \bar{\partial}^\sharp \theta_t)^2 \dot{\phi}_t + (\mathrm{Ric} (\omega_{\phi_t}) - \sqrt{-1} \partial \bar{\partial} \theta_t, \sqrt{-1} \partial \bar{\partial} \dot{\phi}_t) + (\bar{\partial}^\sharp s^0_{\theta_t} (\omega_{\phi_t}) , \nabla \dot{\phi}_t) \Big{)} e^{\theta_t} \omega_{\phi_t}^n \\ &= - \int_X \Big{(} \mathcal{D}_{\phi_t}^{\theta_t *} \mathcal{D}_{\phi_t} \dot{\phi}_t - B_{\theta_t} (\dot{\phi}_t) \Big{)} e^{\theta_t} \omega_{\phi_t}^n \\ &= \mathrm{Re} \int_X B_{\theta_t} (\dot{\phi}_t) e^{\theta_t} \omega_{\phi_t}^n = - \int_X \mathrm{Re} (A_{\theta_t} (\dot{\phi}_t) - B_{\theta_t} (\dot{\phi}_t)) e^{\theta_t} \omega_{\phi_t}^n \\ &= - \int_X \mathrm{Re} (\mathcal{D}_{\phi_t} \dot{\phi}_t, \mathcal{D}_{\phi_t} \theta_t) e^{\theta_t} \omega_{\phi_t}^n = \int_X |\mathcal{D}_{\phi_t} \theta_t|^2 e^{\theta_t} \omega_{\phi_t}^n. \end{align*} Thus we get \[ \frac{d}{dt} \check{W}^\lambda (\omega_{\phi_t}, - \dot{\phi}_t) = - \int_X |\mathcal{D}_{\phi_t} \theta_t|^2 e^{\theta_t} \omega_{\phi_t}^n \Big{/} \int_X e^{\theta_t} \omega_{\phi_t}. \] \end{proof} \subsubsection{Relaxed action functional and integration by parts} For smooth rays $\bm{\phi} = \{ \phi_s \}_{s \in [0,\infty)} \subset C^\infty (X, \omega)$ and $\bm{\psi} = \{ \psi_s \}_{s \in [0, \infty)} \subset C^\infty (X, \sigma)$ of $(1,1)$-forms, we put \[ \mathcal{A}^{\bm{\psi}}_{\bm{\phi}} (t) := - \int_0^t ds \int_X (dd^c_\sigma \psi_s - \dot{\psi}_s) e^{dd^c_\omega \phi_s -\dot{\phi}_s}. \] When $\omega_{\phi_s} = dd^c_\omega \phi_s$ is positive, $\bm{\psi} = \{ \log (\omega_{\phi_s}^n/\omega^n) \}_{s \in [0, \infty)} \subset C^\infty (X, -\mathrm{Ric} (\omega))$ gives a smooth ray, and we have \[ \mathcal{A}^{\bm{\psi}}_{\bm{\phi}} (t) = \frac{1}{n!} \int_0^t ds \int_X (s (\omega_{\phi_s}) - \bar{\Box}_{\phi_s} \dot{\phi}_s) e^{-\dot{\phi}_s} \omega_{\phi_s}^n. \] We simply write this as $\mathcal{A}_{\bm{\phi}} (t)$. For $A_t := \{ \tau \in \mathbb{C}_- ~|~ 1 \le |\tau| < e^t \}$, we consider functionals $\Phi (x, \tau) := \phi_{\log |\tau|} (x)$ and $\Psi (x, \tau) := \psi_{\log |\tau|} (x)$ on $X \times A_t$. The integration by parts shows the following. \begin{prop} We have \[ \mathcal{A}^{\bm{\psi}}_{\bm{\phi}} (t) = \int_X \psi_t e^{-\dot{\phi}_t} \frac{(dd^c_\omega \phi_t)^n}{n!} + \frac{1}{\pi} \int_{X \times A_t} \Psi e^{-\dot{\Phi}} \frac{(dd^c_\omega \Phi)^{n+1}}{(n+1)!} - \int_0^t ds \int_X \sigma \wedge e^{-\dot{\phi}_s} \frac{(dd^c_\omega \phi_s)^{n-1}}{(n-1)!}. \] \end{prop} \begin{proof} By definition, we have \[ n!. \mathcal{A}^{\bm{\psi}}_{\bm{\phi}} (t) = - \int_0^t ds \int_X n \sigma \wedge e^{-\dot{\phi}} (dd^c_\omega \phi)^{n-1} + \int_0^t ds \int_X n dd^c \psi \wedge e^{- \dot{\phi}} (dd^c_\omega \phi)^{n-1} + \int_0^t ds \int_X \dot{\psi} e^{-\dot{\phi}} (dd^c_\omega \phi)^n. \] We can compute the integrand of the last term as \begin{align*} \int_X \dot{\psi} e^{- \dot{\phi}} (dd^c_\omega \phi)^n &= \frac{d}{ds} \int_X \psi e^{-\dot{\phi}} (dd^c_\omega \phi)^n + \int_X \psi (\ddot{\phi} e^{-\dot{\phi}} (dd^c_\omega \phi)^n - n d \dot{\phi} \wedge d^c \dot{\phi} \wedge e^{-\dot{\phi}} (dd^c_\omega \phi)^{n-1}) \\ &\qquad- \int_X n dd^c \psi \wedge e^{-\dot{\phi}} (dd^c_\omega \phi)^{n-1} \end{align*} by using \begin{align*} \frac{d}{dt} \Big{(} \psi e^{-\dot{\phi}} (dd^c_\omega \phi)^n \Big{)} &= \dot{\psi} e^{- \dot{\phi}} (dd^c_\omega \phi)^n - \psi (\ddot{\phi} e^{-\dot{\phi}} (dd^c_\omega \phi)^n - n d \dot{\phi} \wedge d^c \dot{\phi} \wedge e^{-\dot{\phi}} (dd^c_\omega \phi)^{n-1}) \\ &\qquad + n \psi (dd^c \dot{\phi} - d \dot{\phi} \wedge d^c \dot{\phi}) e^{-\dot{\phi}} \wedge (dd^c_\omega \phi)^{n-1} \\ &= \dot{\psi} e^{- \dot{\phi}} (dd^c_\omega \phi)^n - \psi (\ddot{\phi} e^{-\dot{\phi}} (dd^c_\omega \phi)^n - n d \dot{\phi} \wedge d^c \dot{\phi} \wedge e^{-\dot{\phi}} (dd^c_\omega \phi)^{n-1}) \\ &\qquad + n \psi dd^c (e^{- \dot{\phi}} (dd^c_\omega \phi)^{n-1}). \end{align*} We can arrange the integration of the second term as \[ \int_0^t ds \int_X \psi (\ddot{\phi} e^{-\dot{\phi}} (dd^c_\omega \phi)^n - n d \dot{\phi} \wedge d^c \dot{\phi} \wedge e^{-\dot{\phi}} (dd^c_\omega \phi)^{n-1}) = \frac{1}{\pi} \int_{X \times A_t} \Psi e^{-\dot{\Phi}} (dd_\omega^c \Phi)^{n+1}, \] which shows the claim. \end{proof} \subsubsection{Subgeodesic and momentum} We put $A_t := \{ \tau \in \mathbb{C}_- ~|~ 1 \le |\tau| < e^t \}$, where $\mathbb{C}_-$ denotes $\mathbb{C}$ endowed with the reversed scaling action $z. t = t^{-1} z$. A \textit{subgeodesic ray} $\Phi$ on $X$ is a $U (1)$-invariant $\omega$-plurisubharmonic function on $X \times A_\infty$. We also use the notation $\bm{\phi} = \{ \phi_t (x) = \Phi (x, e^t) \}_{t \in [0, \infty)}$ to denote the subgeodesic $\Phi$. In this article, we restrict our interest to locally bounded $\Phi$ to simplify arguments on the \textit{momentum} $\dot{\Phi}$ introduced later. A (locally bounded) \textit{geodesic ray} is a locally bounded subgeodesic ray satisfying \[ (dd_\omega^c \Phi)^{n+1} = 0 \] in the sense of Bedford--Taylor. For a normal test configuration $(\mathcal{X}, \mathcal{L})$ and any initial metric $\omega$, there exists a unique geodesic ray $\Phi$ emanating from $\omega$ such that the $\omega$-psh $\Phi (x, t^{-1})$ on $X \times \Delta^* \subset \mathcal{X}_\Delta$ extends to a locally bounded psh metric on the line bundle $\mathcal{L}_\Delta$ on $\mathcal{X}_\Delta$ (cf. \cite{PS1, BBJ}). We write it as $\Phi_{(\mathcal{X}, \mathcal{L})}$ or $\bm{\phi}_{(\mathcal{X}, \mathcal{L})}$. We say $\Phi$ is \textit{subordinate to} a normal test configuration $(\mathcal{X}, \mathcal{L}; \tau)$ if $\Phi (x, t) = \Phi_{(\mathcal{X}, \mathcal{L})} (x, \tau t)$ for some $(\mathcal{X}, \mathcal{L})$. If we shift the $\mathbb{C}^\times$-action on $\mathcal{L}$ by a weight $m$, the associated geodesic ray shifts by $m \log |\tau|$. It is shown by \cite{CTW2} (cf. \cite{PS2}) that a geodesic ray $\Phi$ subordinate to some test configuration is in $C^{1,1}_{\mathrm{loc}} (X \times A_\infty, \omega)$. A \textit{$C^{1,1}$-(sub)geodesic ray} is a (sub)geodesic ray in $C^{1,1}_{\mathrm{loc}} (X \times A_\infty, \omega)$. We note functions in this class are regular than $C^{1, \bar{1}}_{\mathrm{loc}}$ consisting of $L^p_2$-functions with locally bounded Laplacian ($\forall p < \infty$). We can identify the space $C^{1,1}_{\mathrm{loc}}$ with the Sobolev space $L^\infty_{2, \mathrm{loc}}$. The Bedford--Taylor product $dd^c_\omega \phi_1 \wedge \dotsb \wedge dd^c_\omega \phi_k$ for $C^{1,1}$-regular $\omega$-psh $\phi_1, \ldots, \phi_k$ coincides with the usual product of differential forms $dd^c_\omega \phi_i$ with $L^\infty$-coefficients. We note the closure of $C^\infty \subset C^{k,1}$ in the $C^{k,1}$-norm is $C^{k+1} \subsetneq C^{k,1}$, so a purely $C^{k,1}$-function cannot be approximated by smooth functions in the strong $C^{k,1}$-topology. Still, every $C^{k,1}$-function can be approximated by smooth functions in the following weak topology, which is slightly weaker than the usual weak topology in functional analysis: Identifying $C^{k, 1}$ with a subspace of $C^k \times L^\infty$ by $f \mapsto (f, \nabla^{k+1} f)$, our weak topology on $C^{k,1}$ is defined as the topology induced from the strong topology of $C^k$ and the weak$^*$ topology of $L^\infty = (L^1)^*$. In this topology, $f_i$ converges to $f$ in $C^{k,1}$ if and only if it converges in $C^k$ and $\int_X \nabla^{k+1} f_i g d\mu \to \int_X \nabla^{k+1} f g d\mu$ for every function $g$ integrable with respect to the Lebesgue measure $\mu$. (The usual weak topology further assumes the weak convergence of $\nabla^{k+1} f_i \to \nabla^{k+1} f$. ) For $f \in C^{k,1}$, the convolution $\rho_\epsilon * f$ with a mollifyer $\rho_\epsilon$ gives a desired approximation, thanks to the uniform boundedness \[ \| \nabla^{k+1} (\rho_\epsilon * f) \|_{L^\infty} = \| \rho_\epsilon * \nabla^{k+1} f \|_{L^\infty} \le \| \rho_\epsilon \|_{L^1} \cdot \| \nabla^{k+1} f \|_{L^\infty} = \| \nabla^{k+1} f \|_{L^\infty}. \] We refer to approximation in this weak topology as \textit{approximation in $C^{k,1}$}. If $f_i \to f$ and $g_i \to g$ are approximation in $C^{k,1}$, then $f_i g_i \to fg$ is also approximation in $C^{k,1}$. We call attention to the difference between ``everywhere'' and ``almost everywhere'' for two reasons: \begin{itemize} \item When dealing with locally bounded psh, the Monge--Amp\`ere measures $(dd_\omega \phi_t)^n$ are not necessarily absolutely continuous with respect to the Lebesgue measure. So ``equal almost everywhere with respect to the Lebesgue measure'' does not imply ``equal almost everywhere with respect to $(dd^c \phi_t)^n$''. \item For a smooth metric $\omega$ and a test configuration $(\mathcal{X}, \mathcal{L}; \tau)$, we will construct a (possibly uncontinuous) monotonically decreasing functional $\check{W}^+$ on $[0, \infty)$ such that (1) $\bm{\check{\mu}} (\omega) \ge \check{W}^+ (0)$ and (2) $\lim_{t \to \infty} \check{W}^+ (t) = \bm{\check{\mu}}_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \tau)$, so that $\bm{\check{\mu}} (\omega) \ge \bm{\check{\mu}}_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \tau)$. If $\check{W}^+$ is just a functional satisfying (1) and (2), and $\check{W}^+ = \tilde{W}^+$ almost everywhere for a monotonically decreasing functional $\tilde{W}^+$, we fail to conclude $\bm{\check{\mu}} (\omega) \ge \bm{\check{\mu}}_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \tau)$ as we may have $\tilde{W}^+ (0) \ge \bm{\check{\mu}} (\omega)$. \end{itemize} When we clarify this point, we use the notation $\mathfrak{B} (X)$ rather than $L^\infty (X)$ to denote the space of (locally) bounded Borel functions. (Usually, $L^\infty (X)$ denotes the quotient of $\mathfrak{B} (X)$ which identifies two functions equal almost everywhere with respect to the Lebesgue measure. ) We use the notation $\mathfrak{B} (X, \sigma)$ similarly as $C^\infty (X, \sigma)$. We call a family $\{ \psi_t \}$ of locally bounded Borel functions \textit{equicontinuous} if the family $\{ \psi_t (x) \}_{x \in X}$ of functions on $t$ is equicontinuous. Namely, for every $t \in [0, \infty)$ and $\varepsilon > 0$, there is $\delta > 0$ such that $\sup_{x \in X} |\psi_t (x) - \psi_{t'} (x)| < \epsilon$ for every $t' \in [0, \infty)$ with $|t-t'| < \delta$. For a subgeodesic $\Phi$, for each $x \in X$, $\Phi (x, \tau)$ is a $U (1)$-invariant psh on $\tau$. This implies $\phi_t (x)$ is convex on $t$ for each $x \in X$. By the convexity, $\delta^{-1} (\phi_{t+\delta} (x) - \phi_t (x))$ is monotonically decreasing with respect to $\delta$, so that we get the right derivative: \begin{equation} \dot{\phi}_t (x) := \lim_{\delta \to +0} \frac{\phi_{t+\delta} (x) - \phi_t (x)}{\delta}. \end{equation} Moreover, $\dot{\phi}_t$ is monotonically increasing with respect to $t$. We put $\dot{\Phi} (x, \tau) := \dot{\phi}_{\log |\tau|} (x)$. This function is Borel as the decreasing limit of Borel functions is Borel. For $t > 0$, we have \[ \frac{\phi_t (x) - \phi_{t-\delta} (x)}{\delta} \le \dot{\phi}_t (x) \le \frac{\phi_{t+\delta} (x) - \phi_t (x)}{\delta} \] for any small $\delta > 0$ and every $x \in X$. Thus, suppose $\Phi$ is locally bounded, $\dot{\phi}_t$ is bounded on $X$ for each $t > 0$ and bounded from above for $t=0$. Moreover, by the monotonicity, $\dot{\Phi}|_{X \times A^\circ}$ is locally bounded when $\Phi$ is so. Furthermore, by \[ -\delta C \le \delta \dot{\phi}_t \le \phi_{t+ \delta} - \phi_t \le \delta \dot{\phi}_{t+\delta} \le \delta \dot{\phi}_{t+\delta_0} \le \delta C, \] the family $\{ \phi_t \}$ is equicontinuous in the above sense. In particular, for $t_i \to t > 0$, we have the weak convergence $(dd^c_\omega \phi_{t_i})^n \to (dd^c \phi_t)^n$ of measures as the Bedford--Taylor product is continuous with respect to the pointwise uniform convergence. In particular, for a $C^{1,1}$-geodesic $\Phi$, the $L^\infty$-differential forms $(dd^c_\omega \phi_{t_i})^n$ weakly converge to $(dd^c_\omega \phi_t)^n$ as currents. The following is well known. \begin{lem} \label{affine functional} For a $C^{1,1}$-geodesic ray $\bm{\phi}$, the integrations \[ \int_X e^{-\dot{\phi}_t} (dd^c_\omega \phi_t)^n, \quad \int_X \dot{\phi}_t e^{-\dot{\phi}_t} (dd^c_\omega \phi_t)^n \] are independent of $t$. \end{lem} \begin{proof} One can show this by firstly computing the derivative for smooth (not necessarily subgeodesic) ray, then approximating the geodesic ray by smooth rays in $C^{1,1}_{\mathrm{loc}}$. As the derivative consists of integration with respect to $(dd_\omega^c \Phi)^{n+1}$, it vanishes. As we will exhibit and repeat such arguments later, we omit the detail for this well-known case. \end{proof} \subsubsection{Relaxed action functional along subgeodesic ray} Here we study the relaxed action functional $\mathcal{A}_{\bm{\phi}}^{\bm{\psi}}$ for weakly regular $\bm{\phi}$ and $\bm{\psi}$. Though we only deal with $C^{1,1}$-geodesic ray $\bm{\phi}$ except for this subsection, we introduce the functional for locally bounded subgeodesics $\bm{\phi}$ in order to clarify a sufficient condition for continuity. \begin{defin} For a locally bounded subgeodesic ray $\Phi$ and a locally bounded Borel function $\Psi \in \mathfrak{B} (X \times A_\infty, \sigma)$, for each $t \in (0, \infty)$, we put \[ \mathcal{A}_\Phi^\Psi (t) := \int_X \psi_t e^{-\dot{\phi}_t} \frac{(dd^c_\omega \phi_t)^n}{n!} + \frac{1}{\pi} \int_{X \times A_t} \Psi e^{-\dot{\Phi}} \frac{(dd_\omega^c \Phi)^{n+1}}{(n+1)!} - \int_0^t ds \int_X \sigma \wedge e^{-\dot{\phi}_s} \frac{(dd^c_\omega \phi_s)^{n-1}}{(n-1)!}. \] If moreover $\dot{\phi}_0$ is bounded from below, we put $\mathcal{A}_\Phi^\Psi (0) := \int_X \psi_0 e^{-\dot{\phi}_0} \omega^n/n!$. \end{defin} For a smooth $\Phi$ and $\Psi$, we have $\mathcal{A}_\Phi^\Psi = \mathcal{A}_{\bm{\phi}}^{\bm{\psi}}$ for $\mathcal{A}_{\bm{\phi}}^{\bm{\psi}}$ in the previous subsection. The relaxed action $\mathcal{A}_\Phi^\Psi$ gives a locally bounded Borel function on $(0, \infty) \text{ or } [0, \infty)$. We give a sufficient condition for continuity. As the measure $(dd^c \Phi)^{n+1}$ may charge the boundary $\partial A_t$, we focus on geodesic ray. \begin{lem} For a geodesic ray $\Phi$ with continuous $\dot{\Phi}$ and for a continuous $\Psi$, the relaxed action $\mathcal{A}_\Phi^\Psi$ is continuous on $[0, \infty)$. \end{lem} \begin{proof} As we already noted, we have $(dd_\omega^c \phi_{t_i})^k \to (dd_\omega^c \phi_t)^k$ weakly as currents of order $0$. By our assumption, $\psi_{t_i}$ (resp. $\dot{\phi}_{t_i}$) are continuous and converge uniformly to $\psi_t$ (resp. $\dot{\phi}_t$) as $t_i \to t$. The desired continuity follows from the following fact: for a uniformly convergent sequence $f_i \to f$ of continuous functions and a weakly convergent sequence $\mu_i \to \mu$ of Radon measures on $X$, we have $\int_X f_i \mu_i \to \int_X f \mu$. Indeed, we have \begin{align*} \left| \int_X f_i \mu_i - \int_X f \mu \right| &\le \left| \int_X f \mu_i - \int_X f \mu \right| + \int_X |f- f_i| \mu_i \\ &\le \left| \int_X f \mu_i - \int_X f \mu \right| + \| \mu_i \| \| f- f_i \|_{\sup}. \end{align*} Since $\mu_i$ is weakly convergent, the uniform boundedness principle on $\| \mu_i \|$ shows $\| \mu_i \| \le C$. This shows the convergence. \end{proof} \begin{prop} For a $C^{1,1}$-ray $\Phi$ (not necessarily a subgeodesic) and a smooth ray $\Psi$ (up to the boundary), $\mathcal{A}_\Phi^\Psi$ is $C^1$ (up to the boundary). In this case, we have \[ \frac{d}{dt} \mathcal{A}^\Psi_\Phi (t) = -\int_X (dd^c_\sigma \psi_t - \dot{\psi}_t) e^{dd^c_\omega \phi_t - \dot{\phi}_t}. \] \end{prop} \begin{proof} The claim holds for smooth $\Phi$ and $\Psi$. Take a $C^{1,1}_{\mathrm{loc}}$-approximation $\{ \Phi_i \}$ of $\Phi$ by smooth rays. The claim follows from the following pointwise locally uniform convergence: \begin{gather*} \mathcal{A}^\Psi_{\Phi_i} \to \mathcal{A}^\Psi_\Phi, \\ \int_X (dd^c_\sigma \psi - \dot{\psi}) e^{dd^c_\omega \phi_i - \dot{\phi}_i} \to \int_X (dd^c_\sigma \psi - \dot{\psi}) e^{dd^c_\omega \phi - \dot{\phi}}. \end{gather*} \end{proof} We denote $\mathcal{A}^\Psi_\Phi$ by $\mathcal{A}_{\bm{\phi}}^{\bm{\psi}}$ in the rest of this article. We note for a $C^{1,1}$-regular $\phi$, ``almost everywhere with respect to the Lebesgue measure'' implies ``almost every where with respect to the measure $(dd^c \phi)^n$''. \subsubsection{Proof of the convexity} For a $C^{1,1}$-geodesic ray $\bm{\phi} = \{ \phi_t \}_{t \in [0, \infty)}$ ($C^{1, \bar{1}}$-regularity suffices in this section), $v_t := \omega_{\phi_t}^n/\omega^n$ is a non-negative $L^\infty$-function, so that $v_t \log v_t$ gives an $L^\infty$-function. Then we put \[ \mathcal{A}_{\bm{\phi}} (t) := \frac{1}{n!} \int_X v_t \log v_t e^{- \dot{\phi}_t} \omega^n + \frac{1}{n!} \int_0^t ds \int_X n \mathrm{Ric} (\omega) \wedge e^{-\dot{\phi}_s} \omega_{\phi_s}^{n-1}. \] Using $\mu_t = e^{-\dot{\phi}_t} \omega_{\phi_t}^n$, we get the following expression of the first part: \[ \int_X v_t \log v_t e^{- \dot{\phi}_t} \omega^n = \int_X \frac{d\mu_t}{d\mu_0} \log \frac{d\mu_t}{d\mu_0} d\mu_0 - \int_X (\dot{\phi}_t - \dot{\phi}_0) e^{-\dot{\phi}_t} \omega_{\phi_t}^n. \] The term $\int_X \frac{d\mu_t}{d\mu_0} \log \frac{d\mu_t}{d\mu_0} d\mu_0$ is known to be lower semi-continuous with respect to the weak convergence of measures. The rest terms are $C^1$ when $\dot{\Phi}$ is continuous (up to the boundary). Therefore, $\mathcal{A}_{\bm{\phi}}$ is lower semi-continuous (up to the boundary). Since the entropy part is non-negative, we have the following. \begin{lem} \label{W plus} Let $\bm{\phi} = \{ \phi_s \}_{s \in [0, \infty)}$ be a $C^{1, 1}$-geodesic ray emanating from a smooth initial metric $\omega$. Then we have \[ -\frac{d}{dt}_+\Big{|}_{t =0} \mathcal{A}_{\bm{\phi}} (t) \le \check{W} (\omega, -\dot{\phi}_0). \] \end{lem} \begin{proof} As the entropy $\int_X \frac{d\mu_t}{d\mu_0} \log \frac{d\mu_t}{d\mu_0} d\mu_0$ is non-negative and is zero when $t= 0$, we have \[ \frac{d}{dt}_+\Big{|}_{t=0} \int_X v_t \log v_t e^{- \dot{\phi}_t} \omega^n \ge \frac{d}{dt}_+\Big{|}_{t=0} \int_X \dot{\phi}_0 e^{-\dot{\phi}_t} \omega_{\phi_t}^n. \] Take a $C^{1,1}_{\mathrm{loc}}$-approximation $\{ \Phi_i \}$ of the geodesic $\Phi$ by smooth rays. Each $f_i (t) = \int_X \dot{\phi}_{i, 0} e^{-\dot{\phi}_{i, t}} \omega_{\phi_{i, t}}^n$ is $C^\infty$ on $[0, \infty)$ and its derivative is given by \[ f_i' (t) = -\int_X \dot{\phi}_{i, 0} e^{-\dot{\phi}_{i, t}} (\ddot{\phi}_{i, t} \omega_{\phi_{i, t}}^n - n d\dot{\phi}_{i, t} \wedge d^c \dot{\phi}_{i, t} \wedge \omega_{\phi_{i, t}}^{n-1}) + \int_X n d \dot{\phi}_{i, 0} \wedge d^c \dot{\phi}_{i, t} e^{-\dot{\phi}_{i, t}} \omega_{\phi_{i, t}}^{n-1}. \] We have the pointwise locally uniform convergence \begin{gather*} f_i (t) \to \int_X \dot{\phi}_0 e^{-\dot{\phi}_t} \omega_{\phi_t}^n, \\ f_i' (t) \to \int_X n d \dot{\phi}_0 \wedge d^c \dot{\phi}_t e^{-\dot{\phi}_t} \omega_{\phi_t}^{n-1}. \end{gather*} Thus the functional $\int_X \dot{\phi}_0 e^{-\dot{\phi}_t} \omega_{\phi_t}^n$ is $C^1$ on $[0, \infty)$ and \[ \frac{d}{dt}_+ \Big{|}_{t=0} \int_X \dot{\phi}_0 e^{-\dot{\phi}_t} \omega_{\phi_t}^n = \int_X |\partial^\sharp \dot{\phi}_0|^2 e^{-\dot{\phi}_0} \omega^n. \] We similarly obtain \[ \frac{d}{dt}_+ \Big{|}_{t=0} \int_0^t ds \int_X n \mathrm{Ric} (\omega) \wedge e^{-\dot{\phi}_s} \omega_{\phi_s}^{n-1} = \int_X s (\omega) e^{-\dot{\phi}_0} \omega^n, \] which shows the claim. \end{proof} The rest of this section is devoted to the proof of the following. \begin{thm} The action functional $\mathcal{A}_{\bm{\phi}}$ is continuous and pointwise convex on $[0, \infty)$ \end{thm} To show the pointwise convexity of $\mathcal{A}_{\bm{\phi}}$, we make use of the following convergence similarly as in \cite{BB}. \begin{itemize} \item If a locally uniformly bounded sequence $\psi_{i, t}$ converges to $\psi_t$ almost everywhere, we have $\mathcal{A}_{\bm{\phi}}^{\bm{\psi}_i} (t) \to \mathcal{A}_{\bm{\phi}}^{\bm{\psi}} (t)$ by the dominated convergence theorem. \item If $\psi_{i, t}$ is a decreasing sequence of bounded functions pointwisely converging to $\log v_t$, we have $\mathcal{A}_{\bm{\phi}}^{\bm{\psi}_i} (t) \to \mathcal{A}_{\bm{\phi}} (t)$ again by the dominated convergence theorem as $v_t \psi_{1, t} \ge v_t \psi_{i, t} \ge v_t \log v_t \ge -e^{-1}$. \end{itemize} Note also the following generalities on convex function: \begin{itemize} \item An upper semi-continuous function $f$ on $[0, \infty)$ is convex (and hence continuous in the interior) iff $F (z) := f (\log |z|)$ satisfies $dd^c F \ge 0$ on $A^\circ$ in the weak sense of current. (weakly subharmonic) \item A pointwise limit of convex functions on $[0, \infty)$ is automatically convex (possibly not continuous on the boundary). \item A pointwise convex function on $[0, \infty)$ is continuous in the interior and upper-semi continuous on the boundary. \end{itemize} By these, the convexity and the continuity of $\mathcal{A}_{\bm{\phi}}$ up to the boundary reduces to the construction of a collection $\{ \Psi_{B, i} \}_{B, i \in \mathbb{N}}$ of $U(1)$-invariant continuous functions on $X \times A$ (up to the boundary) with the following properties: \begin{itemize} \item $\mathscr{A}_{\bm{\phi}}^{\bm{\psi}_{B, i}} (\tau) := \mathcal{A}_{\bm{\phi}}^{\bm{\psi}_{B, i}} (\log |\tau|)$ is weakly subharmonic for each $B, i \in \mathbb{N}$. \item For each $B$, $\{ \Psi_{B, i} \}_i$ are locally uniformly bounded and converges to a locally bounded function $\Psi_B$ almost everywhere. \item $\Psi_B$ is a decreasing sequence converging to $\Psi (z) := \log (\omega_{\phi_{\log |z|}}^n/\omega^n)$ pointwisely. \end{itemize} Now, we check the sequence constructed in \cite{BB} is the desired one. Indeed, they constructed a collection $\{ \Psi_{B, i} \}_{B, i \in \mathbb{N}}$ of \textit{Lipschitz functions} satisfying the last two conditions, plus the following property: \[ dd^c_\sigma \Psi_{B, i} \wedge (dd^c_\omega \Phi)^n := \sigma \wedge (dd^c_\omega \Phi)^n + dd^c (\Psi_{B, i} (dd^c_\omega \Phi)^n) \ge 0 \] for $\sigma = - \mathrm{Ric} (\omega)$. (We note in the notation of \cite{BB}, their $\Psi_{B, i}$ stands for our $\psi + \Psi_{B, i}$, taking a local potential $\psi$ of $\sigma = dd^c \psi$. ) Thus the following is the last piece in the proof of the convexity. \begin{thm} \label{key claim for convexity} Let $\Phi$ be the $C^{1,1}$-geodesic ray and $\Psi$ be a Lipschitz function on $X \times A_\infty^\circ$ with $dd^c_\sigma \Psi \wedge (dd^c_\omega \Phi)^n := \sigma \wedge (dd^c_\omega \Phi)^n + dd^c (\Psi (dd^c_\omega \Phi)^n) \ge 0$. We have the following equality: \[ dd^c \mathscr{A}_{\bm{\phi}}^{\bm{\psi}} = \frac{1}{n!} \varpi_* (e^{-\dot{\Phi}} dd^c_\sigma \Psi \wedge (dd^c_\omega \Phi)^n). \] Here the product current $e^{-\dot{\Phi}} dd^c_\sigma \Psi \wedge (dd^c_\omega \Phi)^n$ is well-defined as $dd^c_\sigma \Psi \wedge (dd^c_\omega \Phi)^n$ is a current of order $0$ by the positivity assumption. In particular, $\mathscr{A}_{\bm{\phi}}^{\bm{\psi}}$ is weakly subharmonic (since its $dd^c$ is the push-forward of a positive current). \end{thm} In the proof, we make use of the $C^{0,1}$-regularity of each $\Psi_{B, i}$, not just its continuity. As this readily follows from the construction, here we recall it: \[ \Psi_{B, j} (x, \tau) := \max \{ \log \frac{\beta_{j, \log |\tau|} (x)}{\omega^n (x)} ,-B \}, \] where $\beta_{j, t}$ is the Bergman measure defined as follows. Let $(s_{j, t}^k)_k \subset H^0 (K_X +j L)$ be an orthogonal basis with respect to the hermitian metric: \[ (s, s')_t = (\sqrt{-1})^{n^2} \int_X s \wedge \bar{s}' e^{-j \phi_t}. \] Then the top form $\beta_{i, t}$ is defined as \[ \beta_{j, t} = \frac{n!}{j^n} (\sqrt{-1})^{n^2} \sum_k s_{i, t}^k \wedge \bar{s}_{j, t}^k e^{-j \phi_t}, \] which is $C^{1,1}$ as $\phi_t$ is so. This shows the Lipschitz regularity of $\Psi_{B, j}$. Now we prepare the following key computation for smooth $\Phi$ and $\Psi$. \begin{prop} For the functional $\mathscr{A}^{\bm{\psi}}_{\bm{\phi}} (\tau) := \mathcal{A}^{\bm{\psi}}_{\bm{\phi}} (\log |\tau|)$ on the annulus $A_\infty^\circ = \{ 1 < |z| < \infty \}$, we have \[ d d^c \mathscr{A}^{\bm{\psi}}_{\bm{\phi}} = \varpi_* \Big{(} d_{U (1)} d_\sigma^c \Psi \wedge e^{d_{U (1)} d_\omega^c \Phi}; \pi^{-1} \eta \Big{)}. \] Here we put $\Psi (x, \tau) := \psi_{\log |\tau|} (x)$, $d_{U (1)} d_\sigma^c \Psi := \sigma + d_{U (1)} d^c \Psi = \omega + dd^c \Psi + i_\eta d^c \Psi. \eta^\vee$ (similarly for $\Phi$) and consider the $U (1)$-action on $A_\infty$ by the anti-scalar multiplication: its fundamental vector is $\eta = - 2\pi \partial/ \partial \theta$. \end{prop} Remark: If we put $\Psi (x, \tau) = \psi_{-\log |\tau|} (x)$, the $U (1)$-action on $A_\infty$ must be reversed to the usual scalar multiplication. \begin{proof} For a function $f (r e^{i \theta})$ on $A$, we have \[ d^c f = f_r d^c r + f_\theta d^c \theta = \frac{1}{2} (r f_r d\theta - f_\theta r^{-1} dr). \] For a $U(1)$-invariant function $\Psi (x, r e^{i \theta}) = \psi_{\log r} (x)$ on $X \times A$, we have $d^c \Psi = d^c_X \psi_{\log r} + \dot{\psi}_{\log r} \frac{d\theta}{2}$, so we get \begin{gather*} i_{\eta} d^c \Psi = - \pi \dot{\psi}_{\log r}, \\ dd^c_\sigma \Psi = d_X d_{X, \sigma}^c \psi_{\log r} + \gamma_\psi, \end{gather*} where we put \[ \gamma_\psi := -d_X^c \dot{\psi}_{\log r} \wedge r^{-1} dr + d_X \dot{\psi}_{\log r} \wedge \frac{d\theta}{2} + \ddot{\psi}_{\log r} \frac{r^{-1} d r \wedge d \theta}{2}. \] We have \[ \gamma_\phi \wedge \gamma_\psi = -(d_X \dot{\phi} \wedge d_X^c \dot{\psi} + d_X \dot{\psi} \wedge d_X^c \dot{\phi}) \wedge \frac{r^{-1} dr \wedge d \theta}{2} \] and $\gamma_\phi^2 \wedge \gamma_\psi = 0$. We also have $\nu \wedge \gamma_\psi = \ddot{\psi}. \nu \wedge \frac{r^{-1} dr \wedge d \theta}{2}$ for $2n$-form $\nu$ on $X$. We put $\sigma_\psi := d_X d^c_{X, \sigma} \psi$. Using this, we compute \begin{align*} \Big{[} d_{U (1)} d_\sigma^c \Psi &\wedge e^{d_{U (1)} d_\omega^c \Phi}; x \eta \Big{]}_{n+1} = [((\sigma_\psi + \gamma_\psi) - \pi x \dot{\psi}) \wedge e^{(\omega_\phi + \gamma_\phi) - \pi x \dot{\phi}}]_{n+1} \\ &= \frac{1}{n!} e^{- \pi x \dot{\phi}} (\sigma_\psi + \gamma_\psi) (\omega_\phi + \gamma_\phi)^n - \frac{1}{(n+1)!} \pi x \dot{\psi} e^{- \pi x \dot{\phi}} (\omega_\phi + \gamma_\phi)^{n+1} \\ &= \frac{1}{n!} e^{- \pi x \dot{\phi}} \Big{(} n \sigma_\psi \wedge \omega_\phi^{n-1} \wedge \gamma_\phi + \binom{n}{2} \sigma_\psi \wedge \omega_\phi^{n-2} \wedge \gamma_\phi^2 \\ &\qquad \qquad \qquad + \gamma_\psi \wedge \omega_\phi^n + n \gamma_\psi \wedge \omega_\phi^{n-1} \wedge \gamma_\phi - \pi x \dot{\psi} \omega_\phi^n \wedge \gamma_\phi - \pi x \dot{\psi} \frac{n}{2} \omega^{n-1}_\phi \wedge \gamma_\phi^2 \Big{)} \\ &= \frac{1}{n!} e^{- \pi x \dot{\phi}} \frac{r^{-1} dr \wedge d \theta}{2} \\ &\qquad \quad \wedge \Big{[} n \ddot{\phi} \sigma_\psi \wedge \omega_\phi^{n-1} + \ddot{\psi} \omega_\phi^n - \mathrm{tr}_\phi (d_X \dot{\phi} \wedge d_X^c \dot{\psi} + d_X \dot{\psi} \wedge d_X^c \dot{\phi}) \omega_\phi^n \\ &\qquad \qquad \qquad - \pi x \dot{\psi} \Big{(} \ddot{\phi} - \mathrm{tr}_\phi (d_X \dot{\phi} \wedge d_X^c \dot{\phi}) \Big{)} \omega_\phi^n \\ &\qquad \qquad \qquad \quad - n (n-1) d_X \dot{\phi} \wedge d_X^c \dot{\phi} \wedge \sigma_\phi \wedge \omega_\phi^{n-2} \Big{]} \\ &= \frac{1}{n!} \frac{r^{-1} dr \wedge d \theta}{2} \\ &\qquad \quad \wedge \Big{[} n \ddot{\phi} \sigma_\psi \wedge e^{-\pi x \dot{\phi}} \omega_\phi^{n-1} + \Big{(} \ddot{\psi} - \nabla_X \dot{\phi} (\dot{\psi}) - \pi x \dot{\psi} (\ddot{\phi} - |\partial_X^\sharp \dot{\phi}|^2) \Big{)} e^{- \pi x \dot{\phi}} \omega_\phi^n \\ &\qquad \qquad \qquad \quad - \frac{1}{\pi x} e^{- \pi x \dot{\phi}} n (n-1) d_X d_X^c \dot{\phi} \wedge \sigma_\psi \wedge \omega_\phi^{n-2} \Big{]} \\ &\qquad + \frac{1}{n!} \frac{r^{-1} dr \wedge d \theta}{2} \wedge \frac{1}{\pi x} d_X (e^{- \pi x \dot{\phi}} n (n-1) d_X^c \dot{\phi} \wedge \sigma_\psi \wedge \omega_\phi^{n-2}) \\ &= \frac{r^{-1} dr \wedge d \theta}{2} \\ &\qquad \quad \wedge \Big{(} \sigma_\psi \ddot{\phi} + \ddot{\psi} - \nabla_X \dot{\phi} (\dot{\psi}) - \pi x \dot{\psi} (\ddot{\phi} - |\partial^\sharp \dot{\phi}|^2) - \frac{1}{\pi x} \sigma_\psi \wedge d_X d_X^c \dot{\phi} \Big{)} \wedge e^{\omega_\phi - \pi x \dot{\phi}} \\ &\qquad \qquad + \frac{1}{n!} \frac{r^{-1} dr \wedge d \theta}{2} \wedge \frac{1}{\pi x} d_X (e^{- \pi x \dot{\phi}} n (n-1) d_X^c \dot{\phi} \wedge \sigma_\psi \wedge \omega_\phi^{n-2}). \end{align*} Substituting $x = \pi^{-1}$, the integration along fibre is computed as follows: \begin{align*} \int_X \Big{(} \sigma_\psi \ddot{\phi} &+ \ddot{\psi} - \nabla \dot{\phi} (\dot{\psi}) - \dot{\psi} (\ddot{\phi} - |\partial^\sharp \dot{\phi}|^2) - \sigma_\psi \wedge d_X d_X^c \dot{\phi} \Big{)} \wedge e^{\omega_\phi - \dot{\phi}} \\ &= -\int_X \Big{(} (d_X d_X^c \dot{\psi} - \ddot{\psi}) + (\sigma_\psi - \dot{\psi}) \wedge (d_X d_X^c \dot{\phi} -\ddot{\phi}) \Big{)} \wedge e^{\omega_\phi - \dot{\phi}} \\ &= - \frac{d}{dt} \int_X (\sigma_\psi - \dot{\psi}) \wedge e^{\omega_\phi - \dot{\phi}} \\ &= \frac{d^2}{dt^2} \mathcal{A}^{\bm{\psi}}_{\bm{\phi}} (t), \end{align*} where we used \begin{gather*} \int_X \nabla \dot{\phi} (\dot{\psi}) e^{\omega_\phi -\dot{\phi}} = -2 \int_X \bar{\Box} \dot{\psi} e^{\omega_\phi -\dot{\phi}} = 2 \int_X dd^c \dot{\psi} \wedge e^{\omega_\phi - \dot{\phi}}, \\ \ddot{\phi} - |\partial^\sharp \dot{\phi}|^2 = (\ddot{\phi} + \bar{\Box} \dot{\phi}) - (\bar{\Box} \dot{\phi} + |\partial^\sharp \dot{\phi}|^2), \\ \int_X \dot{\psi} (\ddot{\phi} + \bar{\Box} \dot{\phi}) e^{\omega_\phi - \dot{\phi}} = \int_X \dot{\psi} (\ddot{\phi} - dd^c \dot{\phi}) \wedge e^{\omega_\phi - \dot{\phi}}, \\ \int_X \dot{\psi} (\bar{\Box} \dot{\phi} + |\partial^\sharp \dot{\phi}|^2) e^{\omega_\phi -\dot{\phi}} = - \int_X \bar{\Box} \dot{\psi} e^{\omega_\phi -\dot{\phi}} = \int_X dd^c \dot{\psi} \wedge e^{\omega_\phi -\dot{\phi}}. \end{gather*} This proves the claim. \end{proof} \begin{proof}[Proof of Theorem \ref{key claim for convexity}] We firstly note for a smooth ray $\bm{\psi}$ and for a $C^{1,1}_{\mathrm{loc}}$-approximation $\{ \bm{\phi}_i \}_i$ of the geodesic ray $\bm{\phi}$ by smooth rays, we have the locally uniform convergence $\mathcal{A}_{\bm{\phi}_i}^{\bm{\psi}} \to \mathcal{A}_{\bm{\phi}}^{\bm{\psi}}$. So we get the weak convergence $dd^c \mathcal{A}_{\bm{\phi}_i}^{\bm{\psi}} \to dd^c \mathcal{A}_{\bm{\phi}}^{\bm{\psi}}$ of currents. On the right hand side, we get the weak convergence of currents \[ ( d_{U (1)} d_\sigma^c \Psi \wedge e^{d_{U (1)} d_\omega^c \Phi_i}; \pi^{-1} \eta ) \to e^{-\dot{\Phi}} dd_\sigma^c \Psi \wedge (dd_\omega^c \Phi)^n \] by the continuity of the Monge--Ampere operator with respect to the uniform convergence, which especially implies $(dd_\omega^c \Phi_i)^{n+1} \to (dd_\omega^c \Phi)^{n+1} = 0$. Thus the theorem holds for smooth $\bm{\psi}$. Next, for a Lipschitz ray $\bm{\psi}$, take a $C^{0,1}_{\mathrm{loc}}$-approximation $\{ \bm{\psi}_i \}_i$ of $\bm{\psi}$ by smooth rays. We easily see the locally uniform convergence $\mathcal{A}_{\bm{\phi}}^{\bm{\psi}_i} \to \mathcal{A}_{\bm{\phi}}^{\bm{\psi}}$, hence also the weak convergence $dd^c \mathcal{A}_{\bm{\phi}}^{\bm{\psi}_i} \to dd^c \mathcal{A}_{\bm{\phi}}^{\bm{\psi}}$ of currents. On the other hand, we compute the right hand side as \[ e^{-\dot{\Phi}} d d^c_\sigma \Psi_i \wedge (dd_\omega^c \Phi)^n = e^{-\dot{\Phi}} \sigma \wedge (dd_\omega^c \Phi)^n + d (e^{-\dot{\Phi}} d^c \Psi_i \wedge (dd_\omega^c \Phi)^n) + e^{-\dot{\Phi}} d \dot{\Phi} \wedge d^c \Psi_i \wedge (dd^c_\omega \Phi)^n. \] Since $\{ \Psi_i \}_i$ is a $C^{0,1}_{\mathrm{loc}}$-approximation of $\Psi$ and $\dot{\Phi}$ is Lipschitz, we have \begin{gather*} e^{-\dot{\Phi}} d^c \Psi_i \wedge (dd_\omega^c \Phi)^n \to e^{-\dot{\Phi}} d^c \Psi \wedge (dd_\omega^c \Phi)^n, \\ e^{-\dot{\Phi}} d \dot{\Phi} \wedge d^c \Psi_i \wedge (dd^c_\omega \Phi)^n \to e^{-\dot{\Phi}} d \dot{\Phi} \wedge d^c \Psi \wedge (dd^c_\omega \Phi)^n \end{gather*} as currents. Thus we get \[ e^{-\dot{\Phi}} d d_\sigma \Psi_i \wedge (dd_\omega^c \Phi)^n \to e^{-\dot{\Phi}} \sigma \wedge (dd_\omega^c \Phi)^n + d (e^{-\dot{\Phi}} d^c \Psi \wedge (dd_\omega^c \Phi)^n) + e^{-\dot{\Phi}} d \dot{\Phi} \wedge d^c \Psi \wedge (dd^c_\omega \Phi)^n \] as currents. Finally, we note \[ G S + d (G d^c F \wedge T) = dG \wedge d^c F \wedge T + G (S+dd^c (F T)) \] for a closed $(k,k)$-current $T$ with $L^\infty$-coefficients, a $(k+1,k+1)$-current $S$ with $L^\infty$-coefficients and Lipschitz functions $F, G$ satisfying $S + dd^c (FT) \ge 0$ (hence the product $G (S+dd^c (FT))$ is well-defined). We can check this as follows. If we take a $C^{0,1}$-approximation $G_i \to G$ by smooth functions, $dG_i$ converges to $dG$ in the weak$^*$ topology of $L^\infty = (L^1)^*$, so we have the following weak convergence of each term by the regularity/order assumptions: \begin{gather*} G_i S \to GS, \quad G_i d^c F \wedge T \to G d^c F \wedge T, \\ dG_i \wedge d^c F \wedge T \to dG \wedge d^c F \wedge T, \quad G_i (S+dd^c (F T)) \to G (S+dd^c (F T)). \end{gather*} So by regularizing $G$, we may assume $G$ is smooth. Then the formula reduces to $d^c F \wedge T = d^c (F T)$. Similarly regularizing $F$, we may assume $F$ is smooth, in which case the formula is evident. Using this, we compute \begin{align*} e^{-\dot{\Phi}} \sigma \wedge (dd_\omega^c \Phi)^n &+ d (e^{-\dot{\Phi}} d^c \Psi \wedge (dd_\omega^c \Phi)^n) + e^{-\dot{\Phi}} d \dot{\Phi} \wedge d^c \Psi \wedge (dd_\omega^c \Phi)^n \\ &= e^{-\dot{\Phi}} (\sigma \wedge (dd_\omega^c \Phi)^n + dd^c (\Psi (dd^c_\omega \Phi)^n)) \\ &= e^{-\dot{\Phi}} dd_\sigma^c \Psi \wedge (dd^c_\omega \Phi)^n \end{align*} and complete the proof. \end{proof} \begin{rem} For a uniformly convergent sequence $\Psi_i \to \Psi$, $\Psi_i (dd^c \Phi)^n$ converges weakly to $\Psi (dd^c \Phi)^n$ as $(dd^c \Phi)^n$ is order $0$. This implies that $dd^c_\sigma \Psi_i \wedge (dd^c \Phi)^n = dd^c_\sigma (\Psi_i (dd^c \Phi)^n)$ weakly converges to $dd^c_\sigma \Psi \wedge (dd^c \Phi)^n := dd^c_\sigma (\Psi (dd^c \Phi)^n)$ as currents. However, this does not readily imply the product $e^{-\dot{\Phi}} dd^c_\sigma \Psi_i \wedge (dd^c \Phi)^n$ converges to the product $e^{-\dot{\Phi}} dd^c_\sigma \Psi \wedge (dd^c \Phi)^n$ as $\dot{\Phi}$ is only Lipschitz continuous. In the above proof, we discussed with ''the integration by parts'' and made use of the Lipschitz regularity of $\dot{\Phi}, \Psi$ to show this convergence. We note the above proof can be relaxed to $C^{1, \bar{1}}$-geodesics by carefully tracking with the boundedness of $(dd^c \Phi)^n$ and $L^p_2$-regularity for $p \gg 0$. \end{rem} \newpage \subsection{Slope formula of the action functional} \label{Slope formula} \subsubsection{Useful lemmas in $C^{1,1}$-regularity} \begin{lem}[Equivariant $\partial \bar{\partial}$-lemma] Let $X$ be a compact K\"ahler manifold endowed with a Hamiltonian holomorphic aciton of a compact Lie group $K$, where Hamiltonian means that there is a $K$-invariant K\"ahler form $\omega$ admitting a moment map. \begin{enumerate} \item For a $d_K$-exact equivariant $(1,1)$-form $\sigma + \nu$, the smooth function $f$ with $\sigma = dd^c f$, which exists by the usual $\partial \bar{\partial}$-lemma, satisfies $\sigma + \nu = d_K d^c f$. \item For a $K$-invariant divisor $D$, take a smooth $K$-equivariant closed $(1,1)$-form $\Delta +\delta$ in the equivariant cohomology class $[D^K]$. Then there is an integrable function $G$ on $X$ which is smooth away from $D$ such that $(\Delta +\delta) + d_{U (1)} d^c G = D^K$ as equivariant currents. \end{enumerate} \end{lem} \begin{rem} For a $K$-invariant divisor $D$, $D + \mu$ for a constant $\mu \in (\mathfrak{k}^\vee)^K$ gives a $d_K$-closed equivariant current. Indeed, for $\xi \in \mathfrak{k}$, we have $i_\xi D = 0$ as currents: for a test $2n-1$-form $\varphi$, we compute \[ (i_\xi D, \varphi) = \pm (D, i_\xi \varphi) = \pm \int_D (i_\xi \varphi)|_D = \pm \int_D i_{\xi|_D} (\varphi|_D) = 0, \] where we used the $K$-invariance of $D$. We can construct equivariant cohomology via equivariant currents (cf. \cite{Ino3, GS}), so that the $d_K$-closed equivariant current $D^K := D + 0$ defines the equivariant cohomology class $[D^K]$. \end{rem} \begin{proof} (1) For a $K$-equivariant $1$-form, which is just a $K$-invariant $1$-form, we have $(d_K \gamma; \xi) = d \gamma + \gamma (\xi)$ for $\xi \in \mathfrak{k}$. Suppose $d \gamma = 0$, then by $K$-invariance, we have $L_\xi \gamma = d (\gamma (\xi)) = 0$, so that $\gamma (\xi)$ is constant. Since the action is Hamiltonian, $\xi$ has a non-empty zero set. It follows that a $d_K$-exact equivariant $2$-form $\sigma + \nu$ is zero iff $\sigma = 0$. Since $\sigma' + \nu' := (\sigma + \nu) - d_K d^c f$ is a $d_K$-exact equivariant $2$-form with $\sigma' = 0$, it must be zero as equivariant form. We note the proof works also for $d_K$-closed equivariant $(1,1)$-current. (2) The claim is well-known in the non-equivariant case: we have an integrable function $G$ smooth away from $D$ such that $\Delta + dd^c G = D$. Then $\sigma' + \nu' = (\Delta + \delta) - D^K - d_K d^c G$ is $d_K$-exact with $\sigma' = 0$, we must have $\nu' = 0$. \end{proof} \begin{lem}[Equivariant Stokes theorem] Let $\mathcal{X}$ be a manifold with boundary and $K$ be a compact Lie group acting smoothly on $\mathcal{X}$. Let $\alpha, \gamma$ be a $K$-equivariant differential form with Lipschitz coefficients and $\beta$ be a $K$-equivariantly closed differential form with $C^\infty$-coefficients. Then we have \[ \int_{\mathcal{X}} d_K \alpha \wedge (\beta + d_K \gamma)^l = \int_{\partial \mathcal{X}} \alpha \wedge (\beta + d_K (\gamma|_{\partial \mathcal{X}}))^l. \] \end{lem} \begin{proof} This is well-known when $\alpha$ and $\gamma$ is smooth. For Lipschitz $\alpha$ and $\gamma$, we can take approximation by smooth equivariant forms $\{ \alpha_i \}_i, \{ \gamma_i \}_i$ in $C^{0,1}$. When restricted to the boundary, $\alpha_i|_{\partial \mathcal{X}}$, $\gamma_i|_{\partial \mathcal{X}}$ are still approximation in $C^{0,1}$ of $\alpha|_{\partial \mathcal{X}}$, $\gamma|_{\partial \mathcal{X}}$, respectively. Thus we have \begin{gather*} \int_X d_K \alpha_i \wedge (\beta + d_K \gamma_i)^l \to \int_X d_K \alpha \wedge (\beta + d_K \gamma)^l, \\ \int_{\partial \mathcal{X}} \alpha_i \wedge (\beta + d_K (\gamma_i|_{\partial \mathcal{X}}))^l \to \int_{\partial \mathcal{X}} \alpha \wedge (\beta + d_K (\gamma|_{\partial \mathcal{X}}))^l. \end{gather*} Now the claim follows from the smooth case. \end{proof} \begin{rem} We will apply this to $\gamma = d^c \phi$ with $C^{1,1}$-regular $\phi$. For the weaker regularity $C^{1, \bar{1}}$, the current $d ((d^c \phi)|_{\partial \mathcal{X}})$ may not be bounded. Actually, it is not even clear if the restriction $(d^c \phi)|_{\partial \mathcal{X}}$ is in $L^p_1$ as the trace operator $L^p_1 (\mathcal{X}) \to W^{1-1/p, p} (\partial \mathcal{X})$ losses regularity. \end{rem} \subsubsection{Equivariant tensor calculus} Let $X$ be a compact K\"ahler manifold. Let $\mathcal{X}$ be a test configuration of $X$ and $\mathcal{M}$ be a $U(1)$-equivariant class in $H^{1,1}_{U (1)} (\mathcal{X}, \mathbb{R})$. Take a resolution $\beta: \tilde{\mathcal{X}} \to \mathcal{X}$ so that $\tilde{\mathcal{X}}$ dominates the trivial by $\rho: \tilde{\mathcal{X}} \to X \times \mathbb{C}P^1$. Take a smooth $U (1)$-equivariant $(1,1)$-form $\Sigma + \nu \in \beta^* \bar{\mathcal{M}}$ on $\tilde{\mathcal{X}}$ and a smooth $(1,1)$-form $\sigma$ on $X$ such that $[\sigma] = i^* \mathcal{M} \in H^2 (X, \mathbb{R})$. Let $j_-: X \times \mathbb{C}_- \hookrightarrow \bar{\mathcal{X}}$ denote the natural inclusion. We can construct a smooth function $\Psi_0$ on $X \times \mathbb{C}_{-}$ so that $\sigma + d_{U (1)} d^c \Psi_0 = j_-^* (\Sigma + \nu)$ as forms on $X \times \mathbb{C}_{-}$ as follows. Note by the following exact sequence (cf. \cite{Ino3}) \[ H^{\mathrm{lf}}_{2n} (\mathcal{X}_0) \to H_{U (1)}^2 (\tilde{\mathcal{X}}) \to H^2_{U (1)} (X \times \mathbb{C}_{-}), \] the equivariant form $\beta^* \bar{\mathcal{M}} - \rho^* [\sigma]$ can be written as $[D^{U (1)}]$ for a $U (1)$-invariant Cartier divisor $D$ on $\tilde{\mathcal{X}}$ supported on the central fibre. By the above lemma, we have a smooth $U (1)$-equivariant $(1,1)$-form $\Delta + \delta$ on $\tilde{\mathcal{X}}$ representing $[D^{U (1)}]$ and an integrable function $G$ on $\tilde{\mathcal{X}}$ which is smooth away from the central fibre such that $(\Delta + \delta) + d_{U (1)} d^c G = D$ as currents. On the other hand, we have a smooth function $F$ on $\tilde{\mathcal{X}}$ such that $(\beta^* \Sigma + \beta^* \nu) - \rho^* \sigma - (\Delta + \delta) = d_{U (1)} d^c F$. Thus we get $(\beta^* \Sigma + \beta^* \nu) - \rho^* \sigma - (\Delta + \delta) - d_{U (1)} d^c G = d_{U (1)} d^c (F -G)$ as currents on $\tilde{\mathcal{X}}$. Since $(\Delta + \delta) + d_{U (1)} d^c G$ is zero away from the central fibre, we get $j_-^* (\Sigma + \nu) = \sigma + d_{U (1)} d^c (F -G)$ on $\tilde{\mathcal{X}} \setminus \tilde{\mathcal{X}}_0 = X \times \mathbb{C}_{-}$. Put $\Psi_0 := F - G$. \begin{defin} We say \begin{gather} \bm{\psi} \in C^{1,1} (X, \sigma; \mathcal{M}) \end{gather} if for $\Psi (x, \tau) := \psi_{-\log |\tau|} (x)$, $\Psi - \Psi_0$ extends to a $C^{1,1}$-function on $\tilde{\mathcal{X}}_\Delta$ for some $\tilde{\mathcal{X}}$. The notion is independent of the choice of equivariant forms $\Sigma + \nu \in \beta^* \bar{\mathcal{M}}$, $\Delta + \delta \in [D^{U(1)}]$. Note we assume the regularity across the central fibre $\mathcal{X}_0$ in this notation. \end{defin} For a smooth test configuration $(\mathcal{X}, \mathcal{L}; \tau)$, we have $C^{1,1}$-regularity across the central fibre $\mathcal{X}_0$. \begin{thm} \label{limit formula} Let $(\mathcal{X}, \mathcal{L})$ be a test configuration and $\mathcal{M}$ be a $U(1)$-equivariant class in $H^{1,1}_{U (1)} (\mathcal{X}, \mathbb{R})$. Take smooth $(1,1)$-forms $\omega \in i^* \mathcal{L}$, $\sigma \in i^* \mathcal{M}$. For rays of functions $\bm{\phi} \in C^{1, 1} (X, \omega; \mathcal{L})$ and $\bm{\psi} \in C^{1,1} (X, \sigma; \mathcal{M})$, we have \begin{gather*} (e^{\mathcal{L}|_{\mathcal{X}_0}}; \tau) = \lim_{t \to \infty} \int_X e^{\omega_{\phi_{\tau t}} - \pi \tau \dot{\phi}_{\tau t}}, \\ (\mathcal{M}|_{\mathcal{X}_0}. e^{\mathcal{L}|_{\mathcal{X}_0}}; \tau) = \lim_{t \to \infty} \int_X (\sigma_{\psi_{\tau t}} - \pi \tau \dot{\psi}_{\tau t}) e^{\omega_{\phi_{\tau t}}- \pi \tau \dot{\phi}_{\tau t}}. \end{gather*} We note $d\phi_{\tau t}/dt = \tau \dot{\phi}_{\tau t}$. \end{thm} \begin{rem} We do not need any positivity on $\mathcal{L}$ and $\omega_{\phi_t}$ in the above theorem as we compute the integration directly, making use of $C^{1,1}$-regularity and equivariant Stokes theorem. By the localization formula, we have \begin{gather*} (e^{\mathcal{L}|_{\mathcal{X}_0}}; \tau) = (e^L) - \tau (e^{\bar{\mathcal{L}}}; \tau), \\ (\mathcal{M}|_{\mathcal{X}_0}. e^{\mathcal{L}|_{\mathcal{X}_0}}; \tau) = (M. e^L) - \tau (\bar{\mathcal{M}}. e^{\bar{\mathcal{L}}}; \tau). \end{gather*} For another test configuration $\mathcal{X}'$ dominating $\mathcal{X}$, we can also consider the intersection $(\bar{\mathcal{M}}'. e^{\bar{\mathcal{L}}}; \tau)$ for $\mathcal{M}' \in H^{1,1}_{U (1)} (\mathcal{X}', \mathbb{R})$ by either pushing $\bar{\mathcal{M}}'$ to $\bar{\mathcal{X}}$ as divisor or pulling back $\bar{\mathcal{L}}$ to $\bar{\mathcal{X}}'$. (The output is unique by the projection formula. ) The above theorem holds also for this intersection and $\bm{\psi}' \in C^{1,1} (X, \sigma, \mathcal{M}')$. When $\mathcal{X}'$ is smooth, we can take $\mathcal{M}' = K_{\mathcal{X}'/\mathbb{C}} \text{ or } K_{\mathcal{X}'/\mathbb{C}}^{\log}$. Since we have $(K_{\bar{\mathcal{X}}/\mathbb{P}^1}. e^{\bar{\mathcal{L}}}; \tau) = (K_{\bar{\mathcal{X}}'/\mathbb{P}^1}. e^{\bar{\mathcal{L}}}; \tau)$, we get \begin{align*} (K_{\bar{\mathcal{X}}/\mathbb{P}^1}. e^{\bar{\mathcal{L}}}; \tau) = \lim_{t \to \infty} \int_X (\sigma_{\psi'_t} - \pi \tau \dot{\psi}'_t) e^{\omega_{\phi_t} - \pi \tau \dot{\phi}_t} \end{align*} \end{rem} We prepare the following key formula. \begin{prop} In the setup of the above theorem, we have \[ (\bar{\mathcal{M}}. \bar{\mathcal{L}}^{n+k}; \tau) = \lim_{t \to \infty} - \frac{1}{\tau} \int_X (\sigma_{\psi_t} - \pi \tau \dot{\psi}_t) (\omega_{\phi_t} - \pi \tau \dot{\phi}_t)^{n+k} \] for $k \ge 0$. \end{prop} \begin{proof} Replacing $\mathcal{X}$ with the resolution $\tilde{\mathcal{X}}$, we may assume $\mathcal{X}$ is smooth dominating the trivial test configuration (as the equivariant intersection can be computed on $\tilde{\mathcal{X}}$ thanks to the projection formula). Take smooth equivariant forms $\Omega + \mu \in \bar{\mathcal{L}}$, $\Sigma + \nu \in \bar{\mathcal{M}}$. Then by the assumption there is a $C^{1,1}$-function $\tilde{\Phi}, \tilde{\Psi}$ on $\bar{\mathcal{X}}$ such that $\omega + d_{U (1)} d^c \Phi = (\Omega + \mu) + d_{U (1)} d^c \tilde{\Phi}$, $\sigma + d_{U (1)} d^c \Psi = (\Sigma + \nu) + d_{U (1)} d^c \tilde{\Psi}$ on $X \times \mathbb{C}_{-}$. \begin{align*} (\bar{\mathcal{M}}. \bar{\mathcal{L}}^{n+k}) &= \int_{\bar{\mathcal{X}}} (\Sigma + \nu) (\Omega + \mu)^{n+k} \\ &= \int_{\bar{\mathcal{X}}} (\Sigma + \nu + d_{U (1)} d^c \tilde{\Psi}) (\Omega + \mu + d_{U (1)} d^c \tilde{\Phi})^{n+k} \end{align*} by the equivariant Stokes theorem. Since the equivariant forms have $L^\infty$-coeffiecients, we have \[ \int_{\bar{\mathcal{X}}} (\Sigma + \nu + d_{U (1)} d^c \tilde{\Psi}) (\Omega + \mu + d_{U (1)} d^c \tilde{\Phi})^{n+k} = \lim_{t \to \infty} \int_{\varpi^{-1} (\Delta_t)} (\Sigma + \nu + d_{U (1)} d^c \tilde{\Psi}) (\Omega + \mu + d_{U (1)} d^c \tilde{\Phi})^{n+k}. \] Again by the equivariant Stokes theorem, we have \begin{align*} \int_{\varpi^{-1} (\Delta_t)} &(\Sigma + \nu + d_{U (1)} d^c \tilde{\Psi}) (\Omega + \mu + d_{U (1)} d^c \tilde{\Phi})^{n+k} \\ &= \int_{X \times \Delta_t} (\sigma + d_{U (1)} d^c \Psi) \wedge (\omega + d_{U (1)} d^c \Phi)^{n+k} \\ &= \int_{X \times \Delta_t} d_{U (1)} d^c \Psi \wedge (\omega+ d_{U (1)} d^c \Phi)^{n+k} \\ &\qquad + \sum_{l=0}^{n-1} \binom{n+k}{l} \int_{X \times \Delta_t} d_{U (1)} d^c \Phi \wedge \sigma \wedge \omega^l \wedge (d_{U (1)} d^c \Phi)^{n-1-l+k} \\ &= \int_{X \times \partial \Delta_t} (d^c \Psi)|_{X \times \partial \Delta_t} \wedge (\omega + d_{U (1)} (d^c \Phi)|_{X \times \partial \Delta_t})^{n+k} \\ &\qquad +\sum_{l=0}^{n-1} \binom{n+k}{l} \int_{X \times \partial \Delta_t} (d^c \Phi)|_{X \times \partial \Delta_t} \wedge \sigma \wedge \omega^l \wedge (d_{U (1)} (d^c \Phi)|_{X \times \partial \Delta_t})^{n-1-l+k} \\ &= \int_{X \times \partial \Delta_t} (d^c_X \psi + \dot{\psi} \frac{d\theta}{2}) \wedge (\omega_\phi + d_X \dot{\phi} \wedge \frac{d\theta}{2} - \pi \dot{\phi}. \eta^\vee)^{n+k} \\ &\qquad +\sum_{l=0}^{n-1} \binom{n+k}{l} \int_{X \times \partial \Delta_t} (d^c_X \phi + \dot{\phi} \frac{d\theta}{2}) \wedge \sigma \wedge \omega^l \wedge (d_X d_X^c \phi + d_X \dot{\phi} \wedge \frac{d\theta}{2} - \pi \dot{\phi}. \eta^\vee)^{n-1-l+k}. \end{align*} (1) We compute the first term in the last line as \begin{align*} \int_{X \times \partial \Delta_t} &(d^c_X \psi + \dot{\psi} \frac{d\theta}{2}) \wedge (\omega_\phi + d_X \dot{\phi} \wedge \frac{d\theta}{2} - \pi \dot{\phi}. \eta^\vee)^{n+k} \\ &= (n+k) \int_X d^c \psi \wedge d (\pi \dot{\phi}) \wedge (\omega_\phi - \pi \dot{\phi}. \eta^\vee)^{n+k-1} + \int_X \pi \dot{\psi} (\omega_\phi - \pi \dot{\phi}. \eta^\vee)^{n+k} \\ &= (n+k) \binom{n+k-1}{k} \int_X d^c \psi \wedge d (\pi \dot{\phi}) \wedge \omega_\phi^{n-1} (- \pi \dot{\phi}. \eta^\vee)^k + \binom{n+k}{k} \int_X \pi \dot{\psi} \omega_\phi^n (- \pi \dot{\phi}. \eta^\vee)^k. \end{align*} We can compute the integrand of the first term in two ways as \begin{align*} [d^c \psi \wedge &d (\pi \dot{\phi}) \wedge (\omega_\phi - \pi \dot{\phi}. \eta^\vee)^{n+k-1} ]_{2n} \\ &= -d ([(\pi \dot{\phi}) d^c \psi \wedge (\omega_\phi - \pi \dot{\phi}. \eta^\vee)^{n+k-1}]_{2n-1}) + [(\pi \dot{\phi}) dd^c \psi \wedge (\omega_\phi - \pi \dot{\phi}. \eta^\vee)^{n+k-1}]_{2n} \\ & \qquad - (\pi \dot{\phi}) d^c \psi \wedge \binom{n+k-1}{k} \omega_\phi^{n-1} \wedge k (- \pi \dot{\phi}. \eta^\vee)^{k-1}. d (- \pi \dot{\phi}). \eta^\vee \end{align*} and \begin{align*} [d^c \psi \wedge d (\pi \dot{\phi}) &\wedge (\omega_\phi - \pi \dot{\phi}. \eta^\vee)^{n+k-1} ]_{2n} \\ &= \binom{n+k-1}{k} d^c \psi \wedge d (\pi \dot{\phi}) \wedge \omega_\phi^{n-1}. (-\pi \dot{\phi}. \eta^\vee)^k. \end{align*} Putting these together, we obtain \begin{align*} (n+k) \int_X d^c \psi \wedge d (\pi \dot{\phi}) \wedge (\omega_\phi - \pi \dot{\phi}. \eta^\vee)^{n+k-1} &= \frac{n+k}{k+1} \int_X (\pi \dot{\phi}) dd^c \psi \wedge (\omega_\phi - \pi \dot{\phi}. \eta^\vee)^{n+k-1} \\ &= - (\eta^\vee)^{-1} \int_X dd^c \psi \wedge (\omega_\phi - \pi \dot{\phi}. \eta^\vee)^{n+k} \end{align*} and so we get \begin{align*} \int_{X \times \partial \Delta_t} &(d^c_X \psi + \dot{\psi} \frac{d\theta}{2}) \wedge (\omega_\phi + d_X \dot{\phi} \wedge \frac{d\theta}{2} - \pi \dot{\phi}. \eta^\vee)^{n+k} \\ &= - (\eta^\vee)^{-1} \int_X dd^c \psi \wedge (\omega_\phi - \pi \dot{\phi}. \eta^\vee)^{n+k} + (\eta^\vee)^{-1} \int_X (\pi \dot{\psi}. \eta^\vee) (\omega_\phi - \pi \dot{\phi}. \eta^\vee)^{n+k}. \end{align*} (2) Similarly, we compute the second term as \begin{align*} \int_{X \times \partial \Delta_t} &(d^c_X \phi + \dot{\phi} \frac{d\theta}{2}) \wedge \sigma \wedge \omega^l \wedge (d_X d_X^c \phi + d_X \dot{\phi} \wedge \frac{d\theta}{2} - \pi \dot{\phi}. \eta^\vee)^{n-1-l+k} \\ &= (n-1-l+k) \int_X d^c \phi \wedge \sigma \wedge \omega^l \wedge d (\pi \dot{\phi}) \wedge (dd^c \phi - \pi \dot{\phi}. \eta^\vee)^{n-1-l+k-1} \\ &\qquad + \binom{n-1-l+k}{k} \int_X (\pi \dot{\phi}) \sigma \wedge \omega^l \wedge (dd^c \phi)^{n-1-l} (- \pi \dot{\phi}. \eta^\vee)^k, \end{align*} unless $l= n-1$ and $k=0$, in which case we have \[ \int_{X \times \partial \Delta_t} (d^c_X \phi + \dot{\phi} \frac{d\theta}{2}) \wedge \sigma \wedge \omega^l \wedge (d_X d_X^c \phi + d_X \dot{\phi} \wedge \frac{d\theta}{2} - \pi \dot{\phi}. \eta^\vee)^{n-1-l+k} = \int_X (\pi \dot{\phi}) \sigma \wedge \omega^{n-1}. \] When $l \le n-2$, we compute the integrand of the first part in two ways as \begin{align*} [d^c \phi \wedge &\sigma \wedge \omega^l \wedge d (\pi \dot{\phi}) \wedge (dd^c \phi - \pi \dot{\phi}. \eta^\vee)^{n-1-l+k-1}]_{2n} \\ &= - d [(\pi \dot{\phi}) d^c \phi \wedge \sigma \wedge \omega^l \wedge (dd^c \phi - \pi \dot{\phi}. \eta^\vee)^{n-1-l+k-1}]_{2n} \\ &\qquad + [(\pi \dot{\phi}) dd^c \phi \wedge \sigma \wedge \omega^l \wedge (dd^c \phi - \pi \dot{\phi}. \eta^\vee)^{n-1-l+k-1}]_{2n} \\ &\qquad \quad - (\pi \dot{\phi}) d^c \phi \wedge \sigma \wedge \omega^l \wedge \binom{n-1-l+k-1}{k} (dd^c \phi)^{n-2-l} \wedge k (- \pi \dot{\phi}. \eta^\vee)^{k-1}. d (-\pi \dot{\phi}). \eta^\vee \end{align*} and \begin{align*} [d^c \phi \wedge &\sigma \wedge \omega^l \wedge d (\pi \dot{\phi}) \wedge (dd^c \phi - \pi \dot{\phi}. \eta^\vee)^{n-1-l+k-1}]_{2n} \\ &= \binom{n-1-l+k-1}{k} d^c \phi \wedge \sigma \wedge \omega^l \wedge d (\pi \dot{\phi}) \wedge (dd^c \phi)^{n-2-l} (- \pi \dot{\phi}. \eta^\vee)^k. \end{align*} When $l= n-1$, it is zero. Combining these together, we obtain \begin{align*} (n-1-l &+k) \int_X d^c \phi \wedge \sigma \wedge \omega^l \wedge d (\pi \dot{\phi}) \wedge (dd^c \phi - \pi \dot{\phi}. \eta^\vee)^{n-1-l+k-1} \\ &= \binom{n-1-l+k}{k+1} \int_X (\pi \dot{\phi}) \sigma \wedge \omega^l \wedge (dd^c \phi)^{n-1-l} (- \pi \dot{\phi}. \eta^\vee)^k \end{align*} for $l \ge n-2$ and $= 0$ when $l=n-1$. Thus we get \begin{align*} \int_{X \times \partial \Delta_t} &(d^c_X \phi + \dot{\phi} \frac{d\theta}{2}) \wedge \sigma \wedge \omega^l \wedge (d_X d_X^c \phi + d_X \dot{\phi} \wedge \frac{d\theta}{2} - \pi \dot{\phi}. \eta^\vee)^{n-1-l+k} \\ &= \binom{n-l+k}{k+1} \int_X (\pi \dot{\phi}) \sigma \wedge \omega^l \wedge (dd^c \phi)^{n-1-l} (- \pi \dot{\phi}. \eta^\vee)^k, \end{align*} which holds also for $l=n-1$. Finally, using \[ \sum_{l=0}^{n-1} \binom{n+k}{l} \binom{n-l+k}{k+1} \omega^l \wedge (dd^c \phi)^{n-1-l} = \binom{n+k}{k+1} \omega_\phi^{n-1}, \] we obtain \begin{align*} \sum_{l=0}^{n-1} \binom{n+k}{l} &\int_{X \times \partial \Delta_t} (d^c_X \phi + \dot{\phi} \frac{d\theta}{2}) \wedge \sigma \wedge \omega^l \wedge (d_X d_X^c \phi + d_X \dot{\phi} \wedge \frac{d\theta}{2} - \pi \dot{\phi}. \eta^\vee)^{n-1-l+k} \\ &=\binom{n+k}{k+1} \int_X (\pi \dot{\phi}) \sigma \wedge \omega_\phi^{n-1} (- \pi \dot{\phi}. \eta^\vee)^k \\ &= -(\eta^\vee)^{-1} \int_X \sigma \wedge (\omega_\phi - \pi \dot{\phi}. \eta^\vee)^{n+k}. \end{align*} \vspace{2mm} Combining the all, we get \[ (\bar{\mathcal{M}}. \bar{\mathcal{L}}^{n+k}) = \lim_{t \to \infty} - (\eta^\vee)^{-1} \int_X (\sigma_\psi - \pi \dot{\psi}. \eta^\vee) (\omega_\phi - \pi \dot{\phi}. \eta^\vee)^{n+k}. \] \end{proof} \begin{proof}[Proof of Theorem \ref{limit formula}] By the above proposition, we get \begin{gather*} (e^{\mathcal{L}|_{\mathcal{X}_0}}; \tau) = \frac{1}{n!} \int_X \omega_{\phi_t}^n + \sum_{k=1}^\infty \frac{1}{(n+k)!} \lim_{t \to \infty} \int_X (\omega_{\phi_t} - \pi \tau \dot{\phi}_t)^{n+k} \\ (\mathcal{M}|_{\mathcal{X}_0}. e^{\mathcal{L}|_{\mathcal{X}_0}}; \tau) = \frac{1}{(n-1)!} \int_X \sigma_{\psi_t} \wedge \omega_{\phi_t}^{n-1} + \sum_{k=0}^\infty \frac{1}{(n+k)!} \lim_{t \to \infty} \int_X (\sigma_{\psi_t} - \pi \tau \dot{\psi}_t) (\omega_{\phi_t} - \pi \tau \dot{\phi}_t)^{n+k}. \end{gather*} Since $\dot{\phi}_t = i_\eta d^c \Phi|_{\mathcal{X}_{-\log t}} = (\mu_\eta + i_\eta d^c \tilde{\Phi})|_{\mathcal{X}_{-\log t}}$, it is uniformly bounded. Thus we have \[ \left| \int_X (\omega_{\phi_t} - \pi \tau \dot{\phi}_t)^{n+k} \right| \le \binom{n+k}{k} \int_X |\pi \tau \dot{\phi}_t|^k \omega_{\phi_t}^n \le \binom{n+k}{k} (L^{\cdot n}) C^k. \] Then by the dominated convergence theorem, we get \begin{align*} \sum_{k=1}^\infty \frac{1}{(n+k)!} \lim_{t \to \infty} \int_X (\omega_{\phi_t} - \pi \tau \dot{\phi}_t)^{n+k} &= \lim_{t \to \infty} \sum_{k=1}^\infty \frac{1}{(n+k)!} \int_X (\omega_{\phi_t} - \pi \tau \dot{\phi}_t)^{n+k} \\ &= \lim_{t \to \infty} \int_X \sum_{k=1}^\infty \frac{1}{(n+k)!} (\omega_{\phi_t} - \pi \tau \dot{\phi}_t)^{n+k} \\ &= - \frac{1}{n!} \int_X \omega_{\phi_t}^n + \lim_{t \to \infty} \int_X e^{\omega_{\phi_t} - \pi \tau \dot{\phi}_t}. \end{align*} This proves the first equality in the claim. We can similarly discuss on $\psi$. \end{proof} \begin{cor} \label{slope formula} Let $(\mathcal{X}, \mathcal{L})$ be a smooth test configuration and $\bm{\phi}$ be the $C^{1,1}$-geodesic ray subordinate to $(\mathcal{X}, \mathcal{L})$ emanating from a smooth metric. \begin{enumerate} \item We have \begin{gather*} \frac{1}{n!} \int_X e^{-\tau \dot{\phi}_{\tau t}} \omega_{\phi_{\tau t}}^n = (e^{\mathcal{L}|_{\mathcal{X}_0}}; \pi^{-1} \tau), \\ \frac{1}{n!} \int_X \tau \dot{\phi}_{\tau t} e^{-\tau \dot{\phi}_{\tau t}} \omega_{\phi_{\tau t}}^n = n (e^{\mathcal{L}|_{\mathcal{X}_0}}; \pi^{-1} \tau) - (\mathcal{L}|_{\mathcal{X}_0}. e^{\mathcal{L}|_{\mathcal{X}_0}}; \pi^{-1} \tau). \end{gather*} \item For $\bm{\psi} \in C^\infty (X, -\mathrm{Ric} (\omega), 2\pi K^{\log}_{\bar{\mathcal{X}}/\mathbb{P}^1})$ and $\bm{\psi}' \in C^\infty (X, -\mathrm{Ric} (\omega), 2\pi K^{\log}_{\bar{\mathcal{X}}/\mathbb{P}^1})$, we have \begin{gather*} \lim_{t \to \infty} \frac{d}{dt} \mathcal{A}_{\bm{\phi}_\tau}^{\bm{\psi}_\tau} (t) = -2\pi (\kappa_{\mathcal{X}_0}. e^{\mathcal{L}|_{\mathcal{X}_0}}; \pi^{-1} \tau), \\ \lim_{t \to \infty} \frac{d}{dt} \mathcal{A}_{\bm{\phi}_\tau}^{\bm{\psi}'_\tau} (t) = -2\pi ((K_X. e^L) - \pi^{-1} \tau (K^{\log}_{\bar{\mathcal{X}}/\mathbb{P}^1}. e^{\bar{\mathcal{L}}}; \pi^{-1} \tau)). \end{gather*} \end{enumerate} \end{cor} \begin{proof} As we already noted, we may assume $\bar{\mathcal{L}}$ is ample. Take a K\"ahler form $\Omega$ in $\bar{\mathcal{L}}$, which can be written as $\Omega = \omega + dd^c \Phi_\Omega$ on $\bar{\mathcal{X}} \setminus \mathcal{X}_0$. It is known by \cite[Corollary 1.3]{CTW1} that there exists a unique $\Omega$-psh function $\tilde{\Phi}$ on $\mathcal{X}_\Delta$ which is $C^{1,1}$ on $\mathcal{X}_\Delta$ and solves \[ (\Omega + dd^c \tilde{\Phi})^{n+1} = 0, \quad \tilde{\Phi}|_{\partial \mathcal{X}_\Delta} = \tilde{\phi}_0. \] Thus the geodesic ray $\bm{\phi} = \{ \phi_t = \Phi_\Omega (\cdot, e^{-t}) + \tilde{\Phi} (\cdot, e^{-t}) \}_{t \in [0, \infty)}$ is of class $C^{1,1} (X, \omega; \mathcal{L})$. Thus we can apply the above theorem. \end{proof} \subsubsection{Slope formula of the action functional} \begin{thm} Let $(\mathcal{X}, \mathcal{L})$ be an snc smooth test configuration which dominates the trivial configuration. For the geodesic ray $\bm{\phi}$ subordinate to $(\mathcal{X}, \mathcal{L})$ emanating from a smooth metric, we have \[ \lim_{t \to \infty} \frac{d}{dt}_+ \mathcal{A}_{\bm{\phi}_\tau} (t) = -2\pi ((K_X. e^L) - \pi^{-1} \tau (K^{\log}_{\bar{\mathcal{X}}/\mathbb{P}^1}. e^{\bar{\mathcal{L}}}; \pi^{-1} \tau)). \] \end{thm} \begin{proof} We firstly note $\bm{\phi}_{(\mathcal{X}, \mathcal{L}), \tau} = \bm{\phi}_{(\mathcal{X}_d, \mathcal{L}_d), d^{-1} \tau}$ for the geodesic $\bm{\phi}_{(\mathcal{X}, \mathcal{L}), \tau}$ subordinate to $(\mathcal{X}, \mathcal{L}; \tau)$. On the right hand side, we have \[ \tau (K^{\log}_{\bar{\mathcal{X}}/\mathbb{P}^1}. e^{\bar{\mathcal{L}}}; \tau) = d^{-1} \tau (K^{\log}_{\bar{\mathcal{X}}_d/\mathbb{P}^1}. e^{\bar{\mathcal{L}}_d}; d^{-1} \tau) \] for the normalized base change $(\mathcal{X}_d, \mathcal{L}_d)$, as we already explained in the introduction. For large $d \gg 0$ the central fibre of $\mathcal{X}_d$ is reduced, so that we have $\tau (K^{\log}_{\bar{\mathcal{X}}/\mathbb{P}^1}. e^{\bar{\mathcal{L}}}; \tau) = d^{-1} \tau (K_{\bar{\mathcal{X}}_d/\mathbb{P}^1}. e^{\bar{\mathcal{L}}}_d; d^{-1} \tau)$ for large $d$. Therefore, it suffices to show \[ \mathcal{A}_{\bm{\phi}_\tau} - \mathcal{A}_{\bm{\phi}_\tau}^{\bm{\psi}_\tau} \le O (1) \] and \[ o (t) \le \mathcal{A}_{\bm{\phi}_\tau} - \mathcal{A}_{\bm{\phi}_\tau}^{\bm{\psi}'_\tau} \] for smooth $\bm{\psi}, \bm{\psi'}$ in the previous corollary. As in the proof of \cite[Theorem 5.1]{S-D}, we have a uniform upper bound on $\log (\omega^n_{\phi_t}/e^{-\psi_t} \omega^n)$. So we get \[ \mathcal{A}_{\bm{\phi}_\tau} - \mathcal{A}_{\bm{\phi}_\tau}^{\bm{\psi}_\tau} = \frac{1}{n!} \int_X \log (\omega^n_{\phi_{\tau t}}/e^{-\psi_{\tau t}} \omega^n) e^{- \tau \dot{\phi}_{\tau t}} \omega_{\phi_{\tau t}}^n \le C (e^{\mathcal{L}|_{\mathcal{X}_0}}; \pi^{-1} \tau) \] as desired. To see \[ o (t) \le \mathcal{A}_{\bm{\phi}_\tau} - \mathcal{A}_{\bm{\phi}_\tau}^{\bm{\psi}'}, \] we put $d\mu_t := e^{-\tau \dot{\phi}_{\tau t}} \omega^n_{\phi_{\tau t}}/(n! (e^{\mathcal{L}|_{\mathcal{X}_0}}))$, $d\nu_t := e^{-\psi'_{\tau t}} \omega^n/\int_X e^{-\psi'_{\tau t}} \omega^n$ and compute \begin{align*} \mathcal{A}_{\bm{\phi}_\tau} - \mathcal{A}_{\bm{\phi}_\tau}^{\bm{\psi}'_\tau} &= \frac{1}{n!} \int_X \log (\omega^n_{\phi_{\tau t}}/e^{-\psi'_{\tau t}} \omega^n) e^{- \tau \dot{\phi}_{\tau t}} \omega_{\phi_{\tau t}}^n \\ &= (e^{\mathcal{L}|_{\mathcal{X}_0}}) \log (e^{\mathcal{L}|_{\mathcal{X}_0}}) + \frac{1}{n!} \int_X \tau \dot{\phi}_{\tau t} e^{-\tau \dot{\phi}_{\tau t}} \omega_{\phi_{\tau t}}^n \\ &\qquad + (e^{\mathcal{L}|_{\mathcal{X}_0}}) \int_X \log \frac{d\mu_t}{d\nu_t} d\mu_t - (e^{\mathcal{L}|_{\mathcal{X}_0}}) \log \frac{1}{n!} \int_X e^{-\psi'_{\tau t}} \omega^n \\ & \ge C - (e^{\mathcal{L}|_{\mathcal{X}_0}}) \log \frac{1}{n!} \int_X e^{-\psi'_{\tau t}} \omega^n, \end{align*} where we used $\int_X \log \frac{d\mu_t}{d\nu_t} d\mu_t \ge 0$ in the last inequality. The last term is of class $O (\log t) \subset o (t)$ by \cite[Lemma 3.11]{BHJ2}. This proves the claim. \end{proof} \subsubsection{Proof of main theorems} Now we can prove the rest of our main theorems. \begin{cor} For $\lambda \in \mathbb{R}$, we have \[ \sup_{(\mathcal{X}, \mathcal{L}), \tau \ge 0} \bm{\check{\mu}}^\lambda (\mathcal{X}, \mathcal{L}; \tau) \le \inf_{\omega_\varphi \in \mathcal{H} (X, L)} \bm{\check{\mu}}^\lambda (\omega_\varphi), \] where $(\mathcal{X}, \mathcal{L}; \tau)$ runs over all test configurations. \end{cor} \begin{proof} Now by Lemma \ref{W plus}, Lemma \ref{affine functional}, the convexity of $\mathcal{A}_{\bm{\phi}}$, Corollary \ref{slope formula} and the above theorem, we get \[ \bm{\check{\mu}} (\omega) \ge \check{W} (\omega, - \tau \dot{\phi}_0) \ge \lim_{t \to \infty} -\frac{d}{dt}_+ \frac{\mathcal{A}_{\bm{\phi}_\tau} (t)}{\int_X e^{-\tau \dot{\phi}_{\tau t}} \omega_{\phi_{\tau t}}^n/n!} = \bm{\check{\mu}}_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \pi^{-1} \tau) \] for an snc smooth test configuration $(\mathcal{X}, \mathcal{L})$ and $\tau \ge 0$. On the other hand, by Lemma \ref{affine functional} and Corollary \ref{slope formula}, we have \[ \check{S} (\omega, -\tau \dot{\phi}_0) = \check{S} (\omega_{\phi_{\tau t}}, -\tau \dot{\phi}_{\tau t}) = \bm{\check{\sigma}} (\mathcal{X}, \mathcal{L}; \pi^{-1} \tau). \] Thus the claim holds for snc smooth test configurations. For a general test configuration $(\mathcal{X}, \mathcal{L})$, shifting $\mathcal{L}$ by a weight so that $\bar{\mathcal{L}}$ is ample and then taking a resolution $\tilde{\mathcal{X}} \to \mathcal{X}$ and approximating the pull-back $\tilde{\mathcal{L}}$ by ample series $\bar{\mathcal{L}}_\epsilon$, we can approximate $\bm{\check{\mu}}_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \tau)$ by $\bm{\check{\mu}}_{\mathrm{NA}} (\tilde{\mathcal{X}}, \mathcal{L}_\epsilon; \tau)$. This shows $\bm{\check{\mu}} (\omega) \ge \bm{\check{\mu}}_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \tau)$ for general $(\mathcal{X}, \mathcal{L}; \tau)$. \end{proof} \begin{cor} \label{minimizer} Suppose $\lambda \le 0$. If $\omega$ is a $\mu^\lambda$-cscK metric, then it minimizes $\bm{\check{\mu}}^\lambda$. \end{cor} \begin{proof} We firstly note $\bm{\check{\mu}}^\lambda (X, L; \xi) \le \bm{\check{\mu}}^\lambda (\omega_\varphi)$ for every vector field $\xi$ and metric $\omega_\varphi$ (not necessarily $\xi$-invariant). Indeed, when $\xi$ is rational, i.e. generating a $\mathbb{C}^\times$-action, this is a consequence of the above corollary. Since $\bm{\check{\mu}}^\lambda (X, L; \bullet)$ is continuous on any torus $\mathfrak{t}$ and rational $\xi$ is dense in $\mathfrak{t}$, we obtain the inequality for general $\xi$. If $\omega$ is a $\mu^\lambda$-cscK metric for $\lambda \le 0$, then there is $\xi$ such that $\bm{\check{\mu}}^\lambda (\omega) = \bm{\check{\mu}}^\lambda (X, L; \xi)$. Thus we have $\bm{\check{\mu}}^\lambda (\omega) \le \bm{\check{\mu}}^\lambda (\omega_\varphi)$ for every metric $\omega_\varphi$. \end{proof} If $\omega$ is a $\check{\mu}^\lambda_\xi$-cscK metric for $\lambda \le 0$, then $\xi$ maximizes $\bm{\check{\mu}}^\lambda (X, L; \bullet)$ among all vectors. Thus we get the following slight refinement of \cite[Corollary 3.10]{Ino2}. \begin{cor} Conjecture \ref{uniqueness} reduces to the uniqueness of the maximizer of $\bm{\check{\mu}}^\lambda (X, L; \bullet)$ on a maximal torus $\mathfrak{t}$ or on the center of a maximal compact (cf. \cite[Corollary 3.19]{Ino2}). \end{cor} \newpage \section{Observations on He and Calabi functional} \label{Observations} Here we briefly observe relation of our framework with He and Calabi functional. \subsection{He functional} \label{Observations, He} \subsubsection{$H$-functional and $\mu$-entropy} It is observed in \cite{Per, TZ2} that on a Fano manifold $X$ the critical points of the $\mu$-entropy for the polarization $L = -K_X$ and $\lambda = 2\pi$ are precisely K\"ahler--Ricci solitons, implicitly assuming the smoothness of the $\mu$-entropy in this ``mild temperature'' case $\lambda = 2 \pi$. Though the author could not recover the actual proof of the smoothness in this case, we can rephrase this observation in the following two ways. \begin{itemize} \item The critical points of $\check{W}^{2\pi}$ are precisely K\"ahler--Ricci solitons. \item The minimizers of $\bm{\check{\mu}}^{2\pi}$ are precisely K\"ahler--Ricci solitons. \end{itemize} The first one follows from the main theorem of this article and the fact that $\mu^{2\pi}$-cscK metrics in the polarization $L=-K_X$ on a Fano manifold are precisely K\"ahler--Ricci solitons (cf. \cite{Ino1, Ino2}). If $\omega$ is a K\"ahler--Ricci soliton, i.e. $\mathrm{Ric} (\omega) - L_{J\xi} \omega = 2\pi \omega$, we have the equality $\bm{\check{\mu}}^{2\pi} (\omega) = \check{W}^{2\pi} (\omega, \theta_\xi) = \bm{\check{\mu}}^{2\pi} (-2\xi)$ as observed in \cite{TZ2}. Since we have $\bm{\check{\mu}}^{2\pi} (\omega_\varphi) \ge \bm{\check{\mu}}^{2\pi} (-2\xi)$ for general metric $\omega_\varphi$, the K\"ahler--Ricci soliton $\omega$ minimizes the $\mu$-entropy. To see the converse, we discuss as follows: if $\omega$ minimizes $\mu^{2\pi}$, it also minimizes the $H$-entropy by the equality $\inf_\omega \bm{\check{\mu}}^{2\pi} (\omega) = \inf_\omega H (\omega)$ from \cite{DS}, so that it is a K\"ahler--Ricci soliton. (Note the sign of our $\mu$-entropy is reversed from Perelman's original convention. ) The \textit{$H$-entropy} of a K\"ahler metric $\omega$ in $\mathcal{H} (X, -K_X)$ is given by \[ \frac{1}{2\pi} H (\omega) := \int_X h e^h \omega^n \Big{/} \int_X e^h \omega^n - \log \int_X e^h \frac{\omega^n}{n!}, \] where $h$ is a Ricci potential: $\sqrt{-1} \partial \bar{\partial} h = \mathrm{Ric} (\omega) - 2\pi \omega$. As noticed in \cite{He}, we have \[ \check{W}^{2\pi} (\omega, h) = H (\omega), \] so that the following inequality holds \begin{equation} \bm{\check{\mu}}^{2\pi} (\omega) \ge H (\omega), \end{equation} which is analogous to the inequality $M \ge D$ on Mabuchi and Ding functional, and the equality holds iff $\omega$ is a K\"ahler--Ricci soliton (cf. \cite{He}, \cite[Theorem 2.4.3]{Fut}). \subsubsection{Legendre duality} The following formula on the $H$-entropy is known as Legendre duality: \begin{equation} \frac{1}{2\pi} H (\omega) = \sup_{f \in C^0 (X)} \Big{(} \int_X f e^h \omega^n \Big{/} \int_X e^h \omega^n - \log \int_X e^f \frac{\omega^n}{n!} \Big{)}, \end{equation} which follows from Jensen's inequality: \[ \int_X (f- h) \frac{e^h \omega^n}{\int_X e^h \omega^n} \le \log \int_X e^{f-h} \frac{e^h \omega^n}{\int_X e^h \omega^n}. \] Now we consider the following functional \begin{equation} \frac{1}{2 \pi} L (\omega, f) := \int_X f e^h \omega^n \Big{/} \int_X e^h \omega^n - \log \int_X e^f \frac{\omega^n}{n!} \end{equation} defined on the tangent bundle $T \mathcal{H} (X, -K_X)$. We easily see that critical points of $L$ are K\"ahler--Ricci solitons. It is shown in \cite{DS} that this functional is monotonic along geodesics. \subsection{Calabi functional} \label{Observations, Calabi} \subsubsection{Extremal limit $\lambda^{-1} \to 0$} As in \cite{Ino2}, we consider the following rescaled $W$-entropy for $\kappa \neq 0$: \begin{align} W_\kappa (\omega, f) &:= \kappa^{-1} \Big{(} \check{W}^{\kappa^{-1}} (\omega, \kappa f) - \check{W}^{\kappa^{-1}} (\omega, 0) \Big{)} \\ \notag &= \kappa^{-1} \Big{(} \frac{\int_X (s (\omega) + \kappa \bar{\Box} f) e^{\kappa f} \omega^n}{\int_X e^{\kappa f} \omega^n} - \bar{s} \Big{)} \\ \notag &\qquad- \kappa^{-2} \Big{(} \frac{\int_X \kappa f e^{\kappa f} \omega^n}{\int_X e^{\kappa f} \omega^n} - \log \int_X e^{\kappa f} \omega^n \Big{)}. \end{align} The computation in \cite[Section 5.2]{Ino2} shows the limit as $\kappa \to 0$ is given by \begin{equation} W_{\mathrm{ext}} (\omega, f) := \lim_{\kappa \to 0} W_\kappa (\omega, f) = - \frac{1}{2} \frac{\int_X (\hat{s} (\omega) - \hat{f})^2 \omega^n}{\int_X \omega^n} + \frac{1}{2} \frac{\int_X \hat{s}^2 (\omega) \omega^n}{\int_X \omega^n}, \end{equation} where we put $\hat{u} := u - \int_X u \omega^n / \int_X \omega^n$. We easily see that $f$ is a critical point of $W_{\mathrm{ext}} (\omega, \cdot)$ iff $f = s (\omega) + \mathrm{const}.$, which obviously maximizes $W_{\mathrm{ext}} (\omega, \cdot)$. Since \[ C (\omega) := \sup_{f \in C^\infty (X)} W_{\mathrm{ext}} (\omega, f) = \frac{1}{2} \frac{\int_X \hat{s}^2 (\omega) \omega^n}{\int_X \omega^n} \] is nothing but the Calabi functional, the critical points of $W_{\mathrm{ext}}$ are precisely extremal metrics. We can similarly check the derivative of the functionals $W_\kappa$ also converge to that of $W_{\mathrm{ext}}$ as in \cite{Ino2} again. It is also observed in the paper when $\kappa$ approaches to zero from the negative side $\kappa < 0$ ($\lambda \to -\infty$), the holomorphic vector fields $\partial^\sharp f_\kappa$ associated to some critical points $(\omega_\kappa, f_\kappa)$ of $W_\kappa$ always converge to an extremal vector field modulo holomorphic gauges: it suffices to take gauges $g_\kappa \in \mathrm{Aut} (X, L)$ so that $g_\kappa^* \omega_\kappa$ are invariant with respect to the center of a fixed maximal compact. In contrast, when $\kappa$ approaches to zero from the positive side $\kappa > 0$ ($\lambda \to + \infty$), there may be critical points whose associated holomorphic vector fields $\partial^\sharp f_\kappa$ fade away to infinity, so that $(\omega_\kappa, f_\kappa)$ never converges. \subsubsection{Donaldson's lower bound on Calabi functional --- a simple proof} Here, standing on our Lagrangian perspective, we give a simple proof of Donaldson's inequality \cite{Don}: \[ 2 \cdot 4\pi^2 \frac{\mathrm{DF} (\mathcal{X}, \mathcal{L})^2}{\| (\mathcal{X}, \mathcal{L}) \|^2} \le C (\omega) \] for any test configuration $(\mathcal{X}, \mathcal{L})$ with $(M_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}) \le) \mathrm{DF} (\mathcal{X}, \mathcal{L}) < 0$. R. Dervan kindly informed the author that the proof is very close to the Kempf--Ness approach in \cite{Der1} pursuing the idea in \cite{Chen}. We put \begin{gather*} \| (\mathcal{X}, \mathcal{L}) \|^2 := - \Big{(} \frac{2 (\bar{\mathcal{L}}_{\mathbb{C}^{\times}}^{\cdot n+2}; \eta)}{(n+2)(n+1)} + \Big{(} \frac{(\bar{\mathcal{L}}^{\cdot n+1})}{n+1} \Big{)}^2 \Big{)}, \\ C_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \tau) := \frac{-1}{2 (L^{\cdot n})} \left( \tau 4\pi M_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}) + \tau^2 \| (\mathcal{X}, \mathcal{L}) \|^2 \right). \end{gather*} By the localization, we have \[ \frac{2(\bar{\mathcal{L}}_{\mathbb{C}^{\times}}^{\cdot n+2}; \eta)}{(n+2)(n+1)} + \Big{(} \frac{(\bar{\mathcal{L}}^{\cdot n+1})}{n+1} \Big{)}^2 = - \frac{2 (\mathcal{L}_{\mathbb{C}^\times}|_{\mathcal{X}_0}^{\cdot n+2}; \eta)}{(n+2)(n+1)} + \Big{(} \frac{(\mathcal{L}_{\mathbb{C}^\times}|_{\mathcal{X}_0}^{\cdot n+1}; \eta)}{n+1} \Big{)}^2 = - n! \int_\mathbb{R} (t- b)^2 \mathrm{DH} < 0 \] for $b := \int_\mathbb{R} t \mathrm{DH}$, so that $\| (\mathcal{X}, \mathcal{L}) \|^2$ is positive unless $(\mathcal{X}, \mathcal{L})$ is trivial. Since we have \[ \frac{d}{d\tau}\Big{|}_{\tau= 0} C_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \tau) = - \frac{2\pi}{(L^{\cdot n})} M_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}), \] $(X, L)$ is K-semistable if $C_{\mathrm{NA}}$ is maximized at the trivial configuration among all test configurations. In the same way as we will do for the non-archimedean $\mu$-entropy in the subsequent article, we can also extend this to the extremal case: $(X, L)$ is relatively K-semistable if $C_{\mathrm{NA}}$ is maximized at a product configuration among all test configurations. When $(X, L)$ is K-semistable, we can show the converse since if $\mathrm{DF} (\mathcal{X}, \mathcal{L}) \ge 0$, the maximum of $C_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \tau)$ on $\tau \ge 0$ is achieved at $\tau = 0$, so the trivial configuration maximizes $C_{\mathrm{NA}}$. When $\mathrm{DF} (\mathcal{X}, \mathcal{L}) < 0$, the maximum is achieved at $\tau = (n+2) (n+1) \mathrm{DF} (\mathcal{X}, \mathcal{L})/(\bar{\mathcal{L}}_{\mathbb{C}^\times}^{\cdot n+2}; \eta)$ and is given by \[ \max_{\tau \ge 0} C_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \tau) = 2 \cdot 4\pi^2 \frac{M_{\mathrm{NA}} (\mathcal{X}, \mathcal{L})^2}{\| (\mathcal{X}, \mathcal{L}) \|^2} > 0. \] Therefore Donaldson's theorem \cite{Don} on the lower bound of the Calabi functional can be rephrased as \[ \sup_{(\mathcal{X}, \mathcal{L}; \tau)} C_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \tau) \le \inf_{\omega \in \mathcal{H} (X, L)} C (\omega). \] It is conjectured that these values are indeed the same. (cf. \cite{His, Xia, Li2}) Based on the developments on the convexity (\cite{BB}) of the Mabuchi functional along geodesic rays and the slope formula (\cite{S-D}. See also \cite{Xia, Li2}. ) for geodesic rays with algebraic singularities, we can present an alternative simple proof of the above inequality as follows. Firstly, we note that \[ \int_0^t W_{\mathrm{ext}} (\omega_s, \dot{\phi}_s) ds = -\mathcal{M} (\omega_t) - \frac{1}{2 (L^{\cdot n})} \int_0^t ds \int_X \hat{\dot{\phi}}_s^2 \omega_s^n \] for smooth paths $\omega_s$. Along $C^{1, 1}$-geodesics, the integrand $\int_X \hat{\dot{\phi}}_s^2 \omega_s^n$ is constant and equal to $\| (\mathcal{X}, \mathcal{L}) \|^2$. Since the right derivative $\frac{d}{dt}^+ \Big{|}_{t=s} \mathcal{M} (\omega_t)$ exists for a geodesic $\omega_t$ by the convexity of the Mabuchi functional, we can define $W^+_{\mathrm{ext}} (\omega_s, \dot{\phi}_s)$ for a $C^{1,1}$-geodesic by \[ W^+_{\mathrm{ext}} (\omega_s, \dot{\phi}_s) := - \frac{d}{dt}_+ \Big{|}_{t=s} \mathcal{M} (\omega_t) - \frac{1}{2\int_X \omega^n} \int_X \hat{\dot{\phi}}_s^2 \omega_s^n, \] which is monotonically decreasing again by the convexity. Meanwhile, if $\omega_0$ is smooth, $W_{\mathrm{ext}} (\omega_0, \dot{\phi}_0)$ is well-defined in the usual sense as $\dot{\phi}_0$ is $L^2$-integrable. Recall the Chen--Tian expression of the Mabuchi functional \[ \mathcal{M} (\omega_t) = \int_X \log \frac{\omega_t^n}{\omega_0^n} \omega_t^n - n \int_0^t ds \int_X \dot{\phi}_s \mathrm{Ric} (\omega_0) \wedge \omega_s^{n-1}. \] Since the entropy part $\int_X \log \frac{\omega_t^n}{\omega_0^n} \omega_t^n$ is non-negative, we have \[ \frac{d}{dt}_+ \Big{|}_{t=0} \mathcal{M} (\omega_t) \ge - \int_X \dot{\phi}_0 s (\omega_0) \omega_0^n. \] Thus we get $W_{\mathrm{ext}} (\omega_0, \dot{\phi}_0) \ge W^+_{\mathrm{ext}} (\omega_0, \dot{\phi}_0)$. On the other hand, the slope formula on the Mabuchi functional shows \[ C_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \tau) = \lim_{t \to \infty} W^+_{\mathrm{ext}} (\omega_{\tau t}, \dot{\phi}_{\tau t}) \] for the $C^{1,1}$-geodesic ray subordinate to the test configuration $(\mathcal{X}, \mathcal{L})$ with any smooth initial metric $\omega_0$. Now we obtain the inequality by \[ C_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \tau) = \lim_{t \to \infty} W^+_{\mathrm{ext}} (\omega_{\tau t}, \dot{\phi}_{\tau t}) \le W^+_{\mathrm{ext}} (\omega_0, \tau \dot{\phi}_0) \le W_{\mathrm{ext}} (\omega_0, \tau \dot{\phi}_0) \le C (\omega_0) \] for every $(\mathcal{X}, \mathcal{L}; \tau)$ and $\omega_0$. \subsection{Experiment: Are they Lagrangian?} We saw that $\check{W}^\lambda$, $W_{\mathrm{ext}}$ and $L$ in different frameworks are monotonic along geodesics. It is natural to ask if there is some common general background for these functionals. The author have not yet reached a good answer to this naive question. Here we describe such attempt, without mentioning detail. Though the observation does not conclude anything so far, the author thinks it is worth to share. The functionals are defined on the tangent bundle of the space of K\"ahler metrics. Lagrangian in physics is also a functional on tangent bundle. This motivates us to observe the Euler--Lagrange equation of these functional. It turns out that there may be a deep common feature as for $W_{\mathrm{ext}}$ and $L$. We briefly review Lagrangian formalism in physics. It explains the motion of a particle by the principle of least action. The formalism is formulated in a coordinate free expression, so that it is suitable to deal with holonomic constraints, such as pendulum. A Lagrangian is a functional $\mathcal{L}: T M \to \mathbb{R}$ on the tangent bundle of a space $M$. The action functional $\mathscr{A}$ is a functional on the path space $C^\infty ([a,b], M)$ which is defined as \[ \mathscr{A} (\phi) := \int_a^b \mathcal{L} (\phi (t), \dot{\phi} (t)) dt. \] The principle of least action states that the motion of a particle minimizes this functional, among all the curve $\phi$ with the fixed initial and end positions $\phi (a) = x, \phi (b) = y$. Critical points of the functional is characterized by the Euler--Lagrange equation: \begin{equation} \frac{\partial \mathcal{L}}{\partial q} (\phi (t), \dot{\phi} (t)) - \frac{d}{dt} \Big{(} \frac{\partial \mathcal{L}}{\partial \dot{q}} (\phi (t), \dot{\phi} (t)) \Big{)} = 0, \end{equation} where $(q, \dot{q})$ is a local (canonical) coordinate of $T M$. This equation is known to be coordinate free. In the Cartesian coordinate, Lagrangian is given by $\mathcal{L} = T -V$, where $T = \frac{1}{2} m |v|^2$ is the kinematic energy and $V$ is a potential energy of the system. Lagrangian formalism appears also in mathematics. Geodesics in Riemannian geometry can be characterized by the Euler--Lagrange equation on the kinematic Lagrangian $\mathcal{L} (x, v) = \frac{1}{2} |v|_x^2$. Perelman's $\mathscr{L}$-geometry \cite{Per} may be regarded as a Lagrangian formalism on the spacetime of Ricci flow. Lempert \cite{Lem} observed that parallel fibrewise convex Lagrangians on $T \mathcal{H} (X, L)$ is minimized by geodesics. We note our `Lagrangians' $\check{W}^\lambda, W_{\mathrm{ext}}, L$ are defined on the same space, but are not parallel (as for $\check{W}^\lambda$, not even fibrewise convex). Now, let us observe what happens for our `Lagrangians'. \subsubsection{$L$ is a Lagrangian} Recall we put \[ \frac{1}{2 \pi} L (\omega, f) = \int_X f e^h \omega^n \Big{/} \int_X e^h \omega^n - \log \int_X e^f \frac{\omega^n}{n!}. \] We regard $L (\omega, - f)$ as a Lagrangian. Then the Euler--Lagrange equation turns into \begin{equation} \label{EL} \frac{\partial L}{\partial \omega} (\omega_t, - \dot{\phi}_t) + \frac{d}{dt} \frac{\partial L}{\partial f} (\omega_t, -\dot{\phi}_t) = 0. \end{equation} A simple calculation shows the following. \begin{prop} The Euler--Lagrange equation (\ref{EL}) is equivalent to the geodesic equation. \end{prop} The \textit{energy} of this Lagrangian system is given by \begin{equation} U (\omega, f) := \langle \frac{\partial L}{\partial f} (\omega, f), f \rangle - L (\omega, f) = -2\pi \Big{(} \frac{\int_X f e^f \omega^n}{\int_X e^f \omega^n} - \log \int_X e^f \frac{\omega^n}{n!} \Big{)}. \end{equation} It explains the reason that this functional is conserved along geodesics. \subsubsection{$W_{\mathrm{ext}}$ is a Lagrangian} Recall we put \[ W_{\mathrm{ext}} (\omega, f) = - \frac{1}{2} \frac{\int_X (\hat{s} (\omega) - \hat{f})^2 \omega^n}{\int_X \omega^n} + \frac{1}{2} \frac{\int_X \hat{s}^2 (\omega) \omega^n}{\int_X \omega^n}. \] We regard $W_{\mathrm{ext}} (\omega, f)$ as Lagrangian. Similarly as before, we can observe the following. \begin{prop} The Euler--Lagrange equation \[ \frac{\partial W_{\mathrm{ext}}}{\partial \omega} (\omega_t, \dot{\phi}_t) - \frac{d}{dt} \frac{\partial W_{\mathrm{ext}}}{\partial f} (\omega_t, \dot{\phi}_t) = 0 \] is equivalent to the geodesic equation. \end{prop} The \textit{energy} of this Lagrangian system is given by \begin{equation} \mathcal{H}_{\mathrm{ext}} (\omega, f) := \langle \frac{\partial W_{\mathrm{ext}}}{\partial f} (\omega, f), f \rangle - W_{\mathrm{ext}} (\omega, f) = -\frac{1}{2} \frac{\int_X \hat{f}^2 \omega^n}{\int_X \omega^n}. \end{equation} It again explains the reason that this functional is conserved along geodesics. \subsubsection{$\check{W}^\lambda$ is non-conservative} Recall we put \begin{align*} \check{W} (\omega, f) &= - \frac{\int_X (s (\omega) + \bar{\Box} f) e^f \omega^n}{\int_X e^f \omega^n}, \\ \check{S} (\omega, f) &= \frac{\int_X (n+f) e^f \omega^n}{\int_X e^f \omega^n} - \log \int_X e^f \frac{\omega^n}{n!}. \end{align*} We regard $\check{W}^\lambda (\omega, - f) = \check{W} (\omega, - f) + \lambda \check{S} (\omega, -f)$ as Lagrangian. \begin{prop} We have \[ \Big{(} \frac{\partial \check{S}}{\partial \omega} (\omega_t, -\dot{\phi}_t) + \frac{d}{dt} \frac{\partial \check{S}}{\partial f} (\omega_t, -\dot{\phi}_t) \Big{)} (\varphi) = \frac{\int_X (1 + \hat{f} ) \widehat{(\dot{f} + |\partial f|^2)} \hat{\varphi} ~ e^f \omega^n}{\int_X e^f \omega^n} \] and \begin{align*} \Big{(} \frac{\partial \check{W}}{\partial \omega} (\omega_t, -\dot{\phi}_t) &+ \frac{d}{dt} \frac{\partial \check{W}}{\partial f} (\omega_t, -\dot{\phi}_t) \Big{)} (\varphi) \\ &= - \frac{\int_X \Big{(} |\mathcal{D} f|^2 + (\Delta - \nabla f) \widehat{(\dot{f} +|\partial f|^2)} + \hat{s}_f \widehat{(\dot{f} + |\partial f|^2)} \Big{)} \hat{\varphi} ~e^f \omega^n}{\int_X e^f \omega^n}, \end{align*} where we put $f := -\dot{\phi}$. \end{prop} It follows that $\check{W}^\lambda$ does not characterize geodesics by the principle of least action, obstructed by the integration $\int_X |\mathcal{D} \dot{\phi}|^2 \hat{\varphi} ~e^{-\dot{\phi}} \omega^n$. Unfortunately! \section{A trailer} \subsection{A trailer for the second article} \subsubsection{Non-archimedean $\mu$-entropy} While $\bm{\check{\mu}}^\lambda$ gives a right concept in view of GIT on Hilbert scheme \cite{Ino3}, it does not fits into the non-archimedean pluripotential theory. Similarly as Donaldson--Futaki invariant, $\bm{\check{\mu}}^\lambda$ is not well-behaved with respect to the normalized base change along $z^d: \mathbb{C} \to \mathbb{C}$ due to the ill behavior of the canonical divisor: $\bm{\check{\mu}}^\lambda (\mathcal{X}_d, \mathcal{L}_d; \tau) \neq \bm{\check{\mu}}^\lambda (\mathcal{X}, \mathcal{L}; d\tau)$. This prevents us to interpret $\bm{\check{\mu}}^\lambda$ as the invariant of non-archimedean metrics since the attempt $\bm{\check{\mu}}^\lambda (\phi_{(\mathcal{X}, \mathcal{L}; \tau)}) := \bm{\check{\mu}}^\lambda (\mathcal{X}, \mathcal{L}; \tau)$ is not well-defined for the non-archimedean metric $\phi_{(\mathcal{X}, \mathcal{L}; \tau)} = \phi_{(\mathcal{X}_d, \mathcal{L}_d; d^{-1} \tau)}$. We can refine this by using the equivariant log canonical divisor: \[ K_{\bar{\mathcal{X}}/\mathbb{C}P^1}^{\log, \mathbb{C}^\times} := (K_{\bar{\mathcal{X}}}^{\mathbb{C}^\times} + [\mathcal{X}_0^{\mathrm{red}, \mathbb{C}^\times}]) - (\varpi^* K_{\mathbb{C}P^1}^{\mathbb{C}^\times} + [\mathcal{X}_0^{\mathbb{C}^\times}]) \in H_{2n}^{\mathrm{lf}, \mathbb{C}^\times} (\bar{\mathcal{X}}, \mathbb{Z}). \] The following variant fits into non-archimedean formalism: \begin{align} \bm{\check{\mu}}_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \tau) &:= 2 \pi \frac{(K_X. e^L) - \tau. (K_{\bar{\mathcal{X}}/\mathbb{C}P^1}^{\log, \mathbb{C}^\times}. e^{\bar{\mathcal{L}}_{\mathbb{C}^\times}}; \tau)}{(e^L) - \tau. (e^{\bar{\mathcal{L}}_{\mathbb{C}^\times}}; \tau)}, \\ \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\mathcal{X}, \mathcal{L}; \tau) &:= \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\mathcal{X}, \mathcal{L}; \tau) + \lambda \bm{\check{\sigma}} (\mathcal{X}, \mathcal{L}; \tau). \end{align} In general, $\bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\mathcal{X}, \mathcal{L}; \tau) \ge \bm{\check{\mu}}^\lambda (\mathcal{X}, \mathcal{L}; \tau)$. The equality holds when the central fibre is reduced. Similarly to Mabuchi invariant, we have $\bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\mathcal{X}_d, \mathcal{L}_d; \tau) = \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\mathcal{X}, \mathcal{L}; d\tau)$ (cf. \cite{BHJ1}), as we explain below. We review the base change of the $\mathbb{C}^\times$-equivariant log canonical class $K^{\log, \mathbb{C}^\times}$. We write the effective divisor $\mathcal{X}_0$ as $\sum_i d_i E_i$ and put $Z := \mathcal{X}^{\mathrm{sing}} \cup \bigcup_i E_i^{\mathrm{sing}} \cup (\bigcup_{i \neq j} E_i \cap E_j) \subset \mathcal{X}$. Around $E_i \setminus Z$, the test configuration $\mathcal{X} \to \mathbb{C}$ is locally expressed as $\Delta^{n+1} \to \mathbb{C}: (z_i) \mapsto z_i^{d_i}$, so that the normalized base change is given by \[ \begin{tikzcd} \sqcup^{(d, d_i)} \Delta^{n+1} \ar{rr}{(z_0, \ldots, z_i^{\frac{d}{(d, d_i)}}, \ldots, z_n)} \ar{d}[swap]{z_i^{\frac{d_i}{(d, d_i)}}} &~ & \Delta^{n+1} \ar{d}{z_i^{d_i}} \\ \mathbb{C} \ar{rr}{w^d} &~ & \mathbb{C} \end{tikzcd} \] We put $\mathcal{X}^\circ := \mathcal{X} \setminus Z$ and $\mathcal{X}_d^\circ := \mathcal{X} \setminus f^{-1} Z$. The $\mathbb{C}^\times$-equivariant Chow class $K^{\log, \mathbb{C}^\times}_{\mathcal{X}}$ (resp. $K^{\log, \mathbb{C}^\times}_{\mathcal{X}_d}$) is the push-forward of the $\mathbb{C}^\times$-Chern class of the log cotangent bundle $T^{\log, *} \mathcal{X}^\circ$ (resp. $T^{\log, *} \mathcal{X}_d^\circ$), which is locally spanned by $dz_1, \ldots, z_i^{-1} dz_i, \ldots, dz_n$ around the boundary $E_i \setminus Z$. By the above local expression, we deduce that the derivative of the normalized base change $f: \mathcal{X}_d \to \mathcal{X}$ induce the natural isomorphisms of log tangent bundles $df: T^{\log} \mathcal{X}_d^\circ \cong f^* T^{\log} \mathcal{X}^\circ$. Since the derivative is functorial, this is equivariant with respect to the $d$-times scaled $\mathbb{C}^\times$-action on $f^* T^{\log} \mathcal{X}^\circ$. (Note $f$ is equivariant with respect to $w^d: \mathbb{C}^\times \to \mathbb{C}^\times$. ) Thus we get $f_* K_{\bar{\mathcal{X}}_d}^{\log, \mathbb{C}^\times} = d K_{\bar{\mathcal{X}}}^{\log, \mathbb{C}^\times}$ as $\mathbb{C}^\times$-equivariant Chow classes, with $d$-times scaled $\mathbb{C}^\times$-action on $\bar{\mathcal{X}}$. So we obtain \[ (K_{\bar{\mathcal{X}}_d/\mathbb{C}P^1}^{\log, \mathbb{C}^\times}. e^{\bar{\mathcal{L}}_d}; \tau) = (d K_{\bar{\mathcal{X}}/\mathbb{C}P^1}^{\log, \mathbb{C}^\times}. e^{\bar{\mathcal{L}}}; d\tau) \] by the equivariant projection formula. This shows $\bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\mathcal{X}_d, \mathcal{L}_d; \tau) = \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\mathcal{X}, \mathcal{L}; d\tau)$. \subsubsection{Main results in the second article (to appear)} In general, we cannot expect the existence of a maximizer of the $\mu$-entropy of test configurations. For instance, if $(X, L)$ admits a $\mu^\lambda_\xi$-cscK metric, then the vector $\xi$ attains a maximum, but it can be represented by a test configuration only if it is rational. It is natural to expect there is an extension of the $\mu$-entropy to some `completion' of the space of test configurations which includes irrational vectors. There are three ways to achieve this. The second article is devoted to arrange such concepts and to introduce extension of non-archiemedean $\mu$-entropy. The author decided to extract this part in order to make the content accessible for the readers usually working on algebraic geometry, by describing it in purely algebraic words. As some results in the second article explains appealing features of the non-archimedean $\mu$-entropy, we collect some results here as a trailer. We do not explain terminologies in the claims here as it is the main theme of the second article, but the author believes the readers can guess. The following claim just rephrases the last main theorem of this first article. \begin{thm} \label{HTS ineq} For $\lambda \in \mathbb{R}$, we have \[ \sup_{\phi \in \mathcal{H}^{\mathbb{R}}_{\mathrm{NA}} (X, L)} \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi) \le \inf_{\omega_\varphi \in \mathcal{H} (X, L)} \bm{\check{\mu}}^\lambda (\omega_\varphi). \] \end{thm} The following corollaries can be stated in the words of this first article actually. We write these here just because the author believes that these non-archiemedean pluripotential words provides us a better inspiration. \begin{cor} Suppose $\lambda \le 0$. If $\omega$ is a $\check{\mu}^\lambda_\xi$-cscK metric for some $\xi$, then \[ \bm{\check{\mu}}^\lambda (\omega) = \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi_\xi) = \sup_{\phi \in \mathcal{H}^{\mathbb{R}}_{\mathrm{NA}} (X, L)} \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi) = \inf_{\omega_\varphi \in \mathcal{H} (X, L)} \bm{\check{\mu}}^\lambda (\omega_\varphi), \] for the associated non-archimedean metric $\phi_\xi$. \end{cor} \begin{proof} If $\omega$ is a $\check{\mu}^\lambda_\xi$-cscK metric for $\lambda \le 0$, then by Theorem \ref{HTS ineq}, we get $\bm{\check{\mu}}^\lambda (\omega) = \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi_\xi)$. Thus by the above theorem, we obtain \[ \bm{\check{\mu}}^\lambda (\omega) = \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi_\xi) \le \sup_{\phi \in \mathcal{H}^{\mathbb{R}}_{\mathrm{NA}} (X, L)} \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi) \le \inf_{\omega_\varphi \in \mathcal{H} (X, L)} \bm{\check{\mu}}^\lambda (\omega_\varphi) \le \bm{\check{\mu}}^\lambda (\omega). \] \end{proof} \begin{cor} \label{mu-cscK characterization} Suppose $\lambda \le 0$. The following are equivalent for a K\"ahler metric $\omega \in \mathcal{H} (X, L)$. \begin{enumerate} \item[(a)] $\omega$ is a $\mu^\lambda$-cscK metric. \item[(b)] $\bm{\check{\mu}}^\lambda (\omega) = \inf_{\omega_\varphi \in \mathcal{H} (X, L)} \bm{\check{\mu}}^\lambda (\omega_\varphi)$. \item[(c)] $\bm{\check{\mu}}^\lambda (\omega) = \sup_{\phi \in \mathcal{H}^{\mathbb{R}}_{\mathrm{NA}} (X, L)} \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi)$. \end{enumerate} \end{cor} \begin{proof It suffices to show (c) $\Rightarrow$ (b) and (a) $\Rightarrow$ (c). The implication (c) $\Rightarrow$ (b) follows by the inequality in Theorem \ref{HTS ineq}. As for (a) $\Rightarrow$ (c), if $\omega$ is a $\mu^\lambda$-cscK metric, then the above corollary implies \[ \sup_{\phi \in \mathcal{H}^{\mathbb{R}}_{\mathrm{NA}} (X, L)} \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi) \ge \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi_\xi) = \bm{\check{\mu}}^\lambda (\omega). \] The converse inequality is nothing but Theorem \ref{HTS ineq}. \end{proof} The main theorem of the second article can be stated in three ways: geometric, algebraic and non-archimedean analytic. Geometric expression makes the proof rather simple. \begin{thm} \label{NAmu maximizer} Let $(X, L)$ be a polarized scheme. For arbitrary $\lambda \in \mathbb{R}$, we have the following. \begin{description} \item[Geometric expression] Suppose for every \textit{polyhedral configuration} $(\mathcal{X}/B_\sigma, \mathcal{L}; \zeta)$, there is a vector $\xi$ on $X$ such that \[ \bm{\check{\mu}}^\lambda (X, L; \xi) \ge \bm{\check{\mu}}^\lambda (\mathcal{X}, \mathcal{L}; \zeta), \] then $(X, L)$ is $\mu^\lambda$K-semistable. \item[Algebraic expression] Suppose for every finitely generated $\mathbb{R}$-filtration $\mathcal{F}$, there is a vector $\xi$ on $X$ such that \[ \bm{\check{\mu}}^\lambda (\mathcal{F}_\xi) \ge \bm{\check{\mu}}^\lambda (\mathcal{F}), \] then $(X, L)$ is $\mu^\lambda$K-semistable. \item[Analytic expression] Suppose for every non-archimedean metric $\phi \in \mathcal{H}^{\mathbb{R}}_{\mathrm{NA}} (X, L)$, there is a vector $\xi$ on $X$ such that \[ \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi_\xi) \ge \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi), \] then $(X, L)$ is $\mu^\lambda$K-semistable. \end{description} \end{thm} If one hopes to check $\mu^\lambda$K-semistability using this theorem, we only need to check the inequality for test configurations (with rational $\tau$). A good aspect of this theorem is that we no longer need to detect the vector $\xi$ for which $(X, L)$ must be $\mu^\lambda_\xi$K-semistable to check its $\mu^\lambda$K-semistability. We only need to find a vector $\xi$ for each test configuration $(\mathcal{X}, \mathcal{L}; \tau)$ so that the above inequality holds. As a corollary, we obtain the following. \begin{cor} \label{muK-stability characterization} For $\lambda \in \mathbb{R}$, (c) implies (b), and (b) implies (a). \begin{enumerate} \item[(a)] $(X, L)$ is $\check{\mu}^\lambda_\xi$K-semistable. \item[(b)] $\bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi_\xi) = \sup_{\phi \in \mathcal{H}^{\mathbb{R}}_{\mathrm{NA}} (X, L)} \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi)$ \item[(c)] $\bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi_\xi) = \inf_{\omega_\varphi \in \mathcal{H} (X, L)} \bm{\check{\mu}}^\lambda (\omega_\varphi)$ \end{enumerate} \end{cor} \begin{proof Theorem \ref{HTS ineq} directly implies (c) $\Rightarrow$ (b): \[ \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi_\xi) = \inf_{\omega_\varphi \in \mathcal{H} (X, L)} \bm{\check{\mu}}^\lambda (\omega_\varphi) \ge \sup_{\phi \in \mathcal{H}^{\mathbb{R}}_{\mathrm{NA}} (X, L)} \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi) \ge \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi_\xi). \] The implication (b) $\Rightarrow$ (a) is nothing but Theorem \ref{NAmu maximizer}. \end{proof} Now we can conclude the following. \begin{cor} For $\lambda \le 0$, if there is a $\mu^\lambda_\xi$-cscK metric on $(X, L)$, $(X, L)$ is $\mu^\lambda_\xi$K-semistable. (More strongly, we have $\bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi_\xi) = \sup_{\phi \in \mathcal{H}^{\mathbb{R}}_{\mathrm{NA}} (X, L)} \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi)$. ) \end{cor} Though this result is not new, the proof relies on a completely different perspective from the previous one concerned with the boundedness and the slope of $\mu$/weighted-Mabuchi functional. In general, entropy maximization implies the semistability of optimal degeneration as follows. This result is analogous to \cite{Der2} and \cite{HL2}. \begin{thm} If a polyhedral configuration $(\mathcal{X}/B_\sigma, \mathcal{L}; \xi)$ maximizes $\bm{\check{\mu}}^\lambda$, then the central fibre $(\underline{X}, \underline{L}) = (\mathcal{X}_0, \mathcal{L}|_{\mathcal{X}_0})$ is $\check{\mu}^\lambda_\xi$K-semistable with respect to all $T$-equivariant test configurations with $T = \overline{\exp \mathbb{R} \xi}_\mathbb{C} \subset \mathrm{Aut} (\underline{X}, \underline{L})$. \end{thm} For toric test configurations, we can compute the non-archimedean $\mu$-entropy by integrations on the moment polytope. \begin{prop} Let $(X, L)$ be a polarized toric (normal) variety and $P$ be the associated moment polytope. For a toric test configuration $(\mathcal{X}, \mathcal{L})$ with ample $\bar{\mathcal{L}}$, take the piecewise affine convex function $q$ on $P$ so that $Q = \{ (\mu, t) \in P \times \mathbb{R} ~|~ 0 \le t \le - q (\mu) \}$ denotes the moment polytope of $(\bar{\mathcal{X}}, \bar{\mathcal{L}})$, then we have \begin{align} \bm{\check{\mu}}_{\mathrm{NA}} (\mathcal{X}, \mathcal{L}; \tau) &= -2\pi \frac{\int_{\partial P} e^{\tau q} d\sigma}{\int_P e^{\tau q} d\mu}, \\ \bm{\check{\sigma}} (\mathcal{X}, \mathcal{L}; \tau) &= \frac{\int_P (n+\tau q) e^{\tau q} d\mu}{\int_P e^{\tau q} d\mu} - \log \int_P e^{\tau q} d \mu. \end{align} \end{prop} \subsubsection{Conjecture} We expect the non-archimedean $\mu$-entropy characterizes $\mu$K-semistability and optimal degeneration in our context, similarly as Perelman's $\mu$-entropy characterizes $\mu$-cscK metrics. The first conjecture is reminiscent of the result on $\widehat{\mathrm{vol}}$ in Sasaki geometry \cite{Li1}. \begin{conj} The following are equivalent for $\lambda \le 0$. \begin{enumerate} \item $(X, L)$ is $\check{\mu}^\lambda_\xi$K-semistable \item $\bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi_\xi) = \sup_{\phi \in \mathcal{H}^{\mathbb{R}}_{\mathrm{NA}} (X, L)} \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi)$ \item $\bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi_\xi) = \inf_{\omega_\varphi \in \mathcal{H} (X, L)} \bm{\check{\mu}}^\lambda (\omega_\varphi)$ \end{enumerate} \end{conj} The following conjecture is about a far-reaching generalization of the existence of maximizer of $\bm{\check{\mu}}^\lambda (X, L; \bullet)$ defined on a torus (\cite[Proposition 3.14]{Ino2}). It is analogous to the work of \cite{CSW}, \cite{DS}, \cite{Xia} ($+$ \cite{Li2}) and \cite{HL2}. Hisamoto \cite{His} develops a way to show the equality without using the existence of maximizer. \begin{conj} For $\lambda \in \mathbb{R}$ and a polarized variety $(X, L)$, we have \[ \sup_{\phi \in \mathcal{H}^{\mathbb{R}}_{\mathrm{NA}} (X, L)} \bm{\check{\mu}}_{\mathrm{NA}}^\lambda (\phi) = \inf_{\omega_\varphi \in \mathcal{H} (X, L)} \bm{\check{\mu}}^\lambda (\omega_\varphi). \] Moreover, the non-archimedean $\mu$-entropy $\bm{\check{\mu}}_{\mathrm{NA}}^\lambda$ admits a maximizer in $\mathcal{H}^{\mathbb{R}}_{\mathrm{NA}} (X, L)$ which is unique modulo the action of $\mathrm{Aut} (X, L)$, at least when $\lambda \le 0$. \end{conj} As we will prove in the second article, if a maximizer $\phi$ exists in $\mathcal{H}^{\mathbb{R}}_{\mathrm{NA}} (X, L)$, then its central fibre is $\mu^\lambda$K-semistable. The maximizer $\phi$ is so-called optimal degeneration in our $\mu$-framework. As the reflection of Perelman's $\mu$-entropy, the maximizer $\phi$ should be regarded as a `non-archimedean $\mu$-cscK metric'. Therefore we are inspired to raise an appealing slogan: ``\textit{optimal degeneration is non-archimedean canonical metric} in our $\mu$-framework of K-stability''. It is interesting to ask if maximizers can be characterized by a ``non-archimedean PDE'' similarly as $\mu$-cscK metrics. The author expects the maximizer exists in the geometric class $\mathcal{H}^{\mathbb{R}}_{\mathrm{NA}} (X, L)$, not only in a larger space such as $\mathcal{E}^p_{\mathrm{NA}} (X, L)$, in belief of (non-archimedean) elliptic regularity (cf. \cite{CC}). This expectation is significant for further development of K-moduli theory on polarized varieties. The claims in this trailer are not proved nor used in this first article. They will be proved in the second article in preparation. This section may be transplanted to the second article when released on arXiv.
1,108,101,565,873
arxiv
\section{Introduction} \label{sec:intro} A wide range of algorithms in computer science build on the ability to check language equivalence or inclusion of finite automata. In model-checking for instance, one can build an automaton for a formula and an automaton for a model, and then check that the latter is included in the former. More advanced constructions need to build a sequence of automata by applying a transducer, and to stop whenever two subsequent automata recognise the same language~\cite{BouajjaniHV04}. Another field of application is that of various extensions of Kleene algebra, whose equational theories are reducible to language equivalence of various kinds automata: regular expressions and finite automata for plain Kleene algebra~\cite{kozen94:ka:completeness}, ``closed'' automata for Kleene algebra with converse~\cite{BES95,EB95}, or guarded string automata for Kleene algebra with tests (KAT) The theory of KAT has been developed by Kozen et al.~\cite{kozen:97:kat,cohenks96:kat:complexity,kozen08:kat:coalgebra}, it has received much attention for its applications in various verification tasks ranging from compiler optimisation~\cite{kozenp00:kat:compiler:opts} to program schematology~\cite{angusk01:kat:schemato}, and very recently for network programming analysis~\cite{NetKAT14,FKMST14a}. Like for Kleene algebra, the equational theory of KAT is PSPACE-complete, making it a challenging task to provide algorithms that are computationally practical on as many inputs as possible. One difficulty with KAT is that the underlying automata work on an input alphabet which is exponentially large in the number of variables of the starting expressions. As such, it renders standard algorithms for language equivalence intractable, even for reasonably small inputs. This difficulty is shared with other fields where various people proposed to work with \emph{symbolic automata} to cope with large, or even infinite, alphabets~\cite{Bryant92,Veanes13}. By symbolic automata, we mean finite automata whose transition function is represented using a compact data-structure, typically binary decision diagrams (BDDs)~\cite{Bryant86,Bryant92}, allowing the explore the automata in a symbolic way. D'Antoni and Veanes recently proposed a new minimisation algorithm for symbolic automata~\cite{DAntoniV14}, which is much more efficient than the adaptations of the traditional algorithms~\cite{Moore56,Hopcroft:1971,Paige-Tarjan-PartitionRefinement}. However, to our knowledge, the simpler problem of language equivalence for symbolic automata has not been covered yet. We say `simpler' because language equivalence can be reduced trivially to minimisation---it suffices to minimise the automaton and to check whether the considered states are equated, but minimisation has complexity $n\mathrm{ln} n$ while Hopcroft and Karp's algorithm for language equivalence~\cite{HopcroftKarp} is almost linear~\cite{Tarjan75}. Our main contributions are the following: \begin{itemize} \item We propose a simple coinductive algorithm for checking language equivalence of symbolic automata (Section~\ref{sec:symbolic}). This algorithm is generic enough to support various improvements that have been proposed in the literature for plain automata~\cite{CAV06,tacas10,DoyenR10,bp:popl13:hkc}. \item We show how to combine binary decisions diagrams (BDD) and \emph{disjoint set forests}, the very elegant data-structure used by Hopcroft and Karp to defined their almost linear algorithm~\cite{HopcroftKarp,Tarjan75} for deterministic automata. This results in a new version of their algorithm, for symbolic automata (Section~\ref{ssec:dsf}). \item We study several constructions for building efficiently a symbolic automaton out of a KAT expression (Section~\ref{sec:kat}): we consider a symbolic version of the extension of Brzozowski's derivatives~\cite{Brzozowski64} and Antimirov' partial derivatives~\cite{Antimirov96}, as well as a generalisation of Ilie and Yu's inductive construction~\cite{Ilie-yu-FollowAutomata}. The latter construction also requires us to generalise the standard procedure consisting in eliminating epsilon transitions. \end{itemize} \subsection*{Notation} We denote sets by capital letters $X,Y,S,T\ldots$ and functions by lower case letters $f,g,\dots$ Given sets $X$ and $Y$, $X\times Y$ is their Cartesian product, $X\uplus Y$ is the disjoint union and $X^Y$ is the set of functions $f\colon Y\to X$. The collection of subsets of $X$ is denoted by $\pow(X)$. For a set of letters $A$, $A^\star$ denotes the set of all finite words over $A$; $\epsilon$ the empty word; and $w_1w_2$ the concatenation of words $w_1,w_2 \in A^\star$. We use $2$ for the set $\set{0,1}$. \section{Preliminary material} \label{sec:prelim} We first recall some standard definitions about finite automata and binary decision diagrams. For finite automata, the only slight difference with the setting described in~\cite{bp:popl13:hkc} is that we work with Moore machines~\cite{Moore56} rather than automata: the accepting status of a state is not necessarily a Boolean, but a value in a fixed yet arbitrary set. Since this generalisation is harmless, we stick to the standard automata terminology. \subsection{Finite automata} \label{ssec:finite:automata} A deterministic finite automaton (DFA) over the input alphabet $A$ and with outputs in $B$ is a triple $\tuple{S,t,o} $, where $S$ is a finite set of states, $o\colon S \to B$ is the output function, and $t\colon S \to S^A$ is the transition function which returns, for each state $x$ and for each input letter $a\in A$, the next state $t_a(x)$. For $a\in A$, we write $x\tr{a}x'$ for $t_a(x)=x'$. For $w\in A^\star$, we denote by $x\tr{w}x'$ for the least relation such that (1) $x\tr{\epsilon}x$ and (2) $x\tr{aw'}x'$ if $x\tr{a}x''$ and $x''\tr{w'}x'$. The \emph{language} accepted by a state $x\in S$ of a DFA is the function $\bb{x}\colon A^\star\to B$ defined as follows: \begin{align*} \bb{x}(\epsilon) &= o(x)\enspace, & \bb{x}(a w) &= \bb{t_a(x)}(w)\enspace. \end{align*} (When the output set is $2$, these functions are indeed characteristic functions of formal languages). Two states $x,y\in S$ are said to be \emph{language equivalent} (written $x\sim y$) iff they accept the same language. \subsection{Coinduction} \label{ssec:coinduction} We then define bisimulations. We make explicit the underlying notion of progression which we need in the sequel. \begin{definition}[Progression, Bisimulation]\label{def:bisimulation} Given two relations $R,R'\subseteq S \times S$ on states, $R$ \emph{progresses to} $R'$, denoted $R\prog R'$, if whenever $x\mathrel R y$ then \begin{enumerate} \item $o(x) = o(y) $ and \item for all $a\in A$, $t_a(x) \mathrel{R'} t_a(y)$. \end{enumerate} A \emph{bisimulation} is a relation $R$ such that $R\prog R$. \end{definition} Bisimulation is a sound and complete proof technique for checking language equivalence of DFA: \begin{proposition}[Coinduction] \label{prop:bisimulation-langequivalence} Two states are language equivalent iff there exists a bisimulation that relates them. \end{proposition} Accordingly, we obtain the simple algorithm described in Figure~\ref{alg:plain}, for checking language equivalence of two states of a given automaton. (Note that to check language equivalence of two states from two distinct automata, it suffices to consider the disjoint union of the two automata.) \begin{figure} \centering \begin{ocaml} type ('s,'b) dfa = {t: 's -> $A$ -> 's; o: 's -> 'b} let equiv (M: ('s,'b) dfa) (x y: 's) = let r = Set.empty () in let todo = Queue.singleton (x,y) in while $\lnot$Queue.is_empty todo do (* invariant: r \prog r \cup todo *) ?\label{line:plain:invariant}? let (x,y) = Queue.pop todo in if Set.mem r (x,y) then continue ?\label{line:plain:upto}? if M.o x <> M.o y then return false ?\label{line:plain:ce}? iter$_A$ (fun a -> Queue.push todo (M.t x a, M.t y a)) ?\label{line:plain:span}? Set.add r (x,y) done; return true \end{ocaml} \caption{Simple algorithm for checking language equivalence.} \label{alg:plain} \end{figure} This algorithm works as follows: the variable \code{r} contains a relation which is a bisimulation candidate and the variable \code{todo} contains a queue of pairs that remain to be processed. To process a pair $(x,y)$, one first checks whether it already belongs to the bisimulation candidate: in that case, the pair can be skipped since it was already processed. Otherwise, one checks that the outputs of the two states are the same ($o(x)=o(y)$), and one pushes all derivatives of the pair to the \code{todo} queue: all pairs $(t_a(x),t_a(y))$ for $a\in A$. The pair $(x,y)$ is finally added to the bisimulation candidate, and we proceed with the remainder of the queue. The main invariant of the loop (line~\ref{line:plain:invariant}: $\mathtt{r}\prog \mathtt{r}\cup \mathtt{todo}$) ensures that when \code{todo} becomes empty, then \code{r} contains a bisimulation, and the starting states were indeed bisimilar. Another invariant of the loop is that for any pair $(x',y')$ in \code{todo}, there exists a word $w$ such that $x\tr w x'$ and $y \tr w y'$. Therefore, if we reach a pair of states whose outputs are distinct---line~\ref{line:plain:ce}, then the word $w$ associated to that pair witnesses the fact that the two initial states are not equivalent. \begin{remark} Note that such an algorithm can be modified to check for language inclusion in a straightforward manner: assuming an arbitrary preorder $\leq$ on the output set $B$, and letting language inclusion mean $x \leq y$ if for all $w\in A^\star$, $\bb x(w)\leq\bb y(w)$, it suffices to replace line~\ref{line:plain:ce} in Figure~\ref{alg:plain} by \smallskip {\rm \code{if $\lnot$(M.o x $\leq$ M.o y) then return false}.} \end{remark} \subsection{Up-to techniques} \label{ssec:upto} The previous algorithm can be enhanced by exploiting \emph{up-to techniques}~\cite{San98MSCS,pous:dsbook11}: an up-to technique is a function $f$ on binary relations such that for any relation $R$ such that $R\prog f(R)$ is contained in bisimilarity. Intuitively, such relations, that are not necessarily bisimulations, are constrained enough to be contained in bisimilarity. Bonchi and Pous have recently shown~\cite{bp:popl13:hkc} that the standard algorithm by Hopcroft and Karp~\cite{HopcroftKarp} actually exploits such an up-to technique: on line~\ref{line:plain:upto}, rather than checking whether the processed pair is already in the candidate relation \code{r}, Hopcroft and Karp check whether it belongs to the equivalence closure of \code{r}. Indeed the function $e$ mapping a relation to its equivalence closure is a valid up-to technique, and this optimisation allows the algorithm to stop earlier. Hopcroft and Karp moreover use an efficient data-structure to perform this check in almost constant time~\cite{Tarjan75}: \emph{disjoint sets forests}. We recall this data-structure in Section~\ref{ssec:dsf}. Other examples of valid up-to techniques include context-closure, as used in antichain based algorithms~\cite{CAV06,tacas10,DoyenR10}, or congruence closure~\cite{bp:popl13:hkc}, which combines both context-closure and equivalence closure. These techniques however require to work with automata whose state carry a semi-lattice structure, as is typically the case for a DFA obtained from a non-deterministic automaton, through the powerset construction. \subsection{Binary decision diagrams} \label{ssec:bdd} Assume an ordered set $(A,<)$ and an arbitrary set $B$. Binary decision diagrams are directed acyclic graphs that can be used to represent functions of type $2^A\to B$. When $B=2$ is the two elements set, BDDs thus intuitively represent Boolean formulas with variables in $A$. Formally, a \emph{(multi-terminal, ordered) binary decision diagram} (BDD) is a pair $(N,c)$ where $N$ is a finite set of nodes and $c$ is a function of type $N \to B \uplus A\times N\times N$ such that if $c(n)=(a,l,r)$ and either $c(l)=(a',\_,\_)$ or $c(r)=(a',\_,\_)$, then $a<a'$. The condition on $c$ ensures that the underlying graph is acyclic, which make it possible to associate a function $\sem n\colon 2^A \to B$ to each node $n$ of a BDD: \begin{align*} \sem n(\alpha) &= \begin{cases} b & \text{if }c(n)=b\in B\\ \sem l(\alpha) &\text{if }c(n)=(a,l,r) \text{ and } \alpha(a)=0\\ \sem r(\alpha) &\text{if }c(n)=(a,l,r) \text{ and } \alpha(a)=1 \end{cases} \end{align*} Let us now recall the standard graphical representation of BDDs: \begin{itemize} \item A node $n$ such that $c(n)=b\in B$ is represented by a square box labelled by $b$. \item A node $n$ such that $c(n)=(a,l,r)\in A\times N\times N$ is a decision node, which we picture by a circle labelled by $a$, with a dashed arrow towards the \emph{left child} $(l)$ and a plain arrow towards the \emph{right child} $(r)$. \end{itemize} For instance, the following drawing represents a BDD with three nodes; its top-most node denotes the function given on the right-hand side. \begin{align*} \begin{minipage}{.3\linewidth} \includegraphics[width=2cm]{simple-bdd.pdf} \end{minipage} && \alpha\mapsto \begin{cases} b_1 &\text{if }\alpha(a_1)=1 \text{ and } \alpha(a_2)=0\\ b_2 &\text{otherwise} \end{cases} \end{align*} A BDD is \emph{reduced} if $c$ is injective, and $c(n)=(a,l,r)$ entails $l\neq r$. (The above example BDD is reduced.) Any BDD can be transformed into a reduced one. When $A$ is finite, reduced (ordered) BDD nodes are in one-to-one correspondence with functions from $2^A$ to $B$~\cite{Bryant86,Bryant92}. The main interest in this data-structure is that it is often extremely compact. In the sequel, we only work with reduced ordered BDDs, which we simply call BDDs. We denote by $\bdd A B$ the set of nodes of a large enough BDD with values in $B$, and we let $\mes f$ denote the unique BDD node representing a given function $f\colon 2^A\to B$. This notation is useful to give abstract specifications to BDD operations: in the sequel, all usages of this notation actually underpin efficient BDD operations. \paragraph{Implementation.} To better explain parts of the proposed algorithms, we give a simple implementation of BDDs in Figure~\ref{alg:bdd}. \begin{figure} \begin{ocaml} type 'b node = 'b descr hash_consed and 'b descr = V of 'b | N of $A$ ** 'b node ** 'b node val hashcons: 'b descr -> 'b node val c: 'b node -> 'b descr val memo_rec: (('a -> 'b -> 'c) -> 'a -> 'b -> 'c) -> 'a -> 'b -> 'c let constant v = hashcons (V v) let node a l r = if l==r then l else hashcons (N(a,l,r)) let apply (f: 'a -> 'b -> 'c): 'a node -> 'b node -> 'c node = memo_rec (fun app x y -> match c(x), c(y) with | V v, V w -> constant (f v w) | N(a,l,r), V _ -> node a (app l y) (app r y) | V _, N(a,l,r) -> node a (app x l) (app x r) | N(a,l,r), N(a',l',r') -> if a=a' then node a (app l l') (app r r') if a<a' then node a (app l y ) (app r y ) if a>a' then node a' (app x l') (app x r')) \end{ocaml} \caption{An implementation of BDDs.} \label{alg:bdd} \end{figure} The type for BDD nodes is given first: we use Filli\^atre's hash-consing library~\cite{FilliatreC06} to enforce unique representation of each node, whence the two type declarations and the two conversion functions \code{hashcons} and \code{c} between those types. The third utility function \code{memo_rec} is just a convenient operator for defining recursive memoised functions. The function \code{constant} creates a constant node, making sure it was not already created. The function \code{node} creates a new decision node, unless that node is useless and can be replaced by one of its two children. The generic function \code{apply} is central to BDDs~\cite{Bryant86,Bryant92}: many operations are just instances of this function. Its specification is the following: \begin{align*} \mathtt{apply}~f~x~y &= \mes{\alpha\mapsto f(\sem x(\alpha))(\sem y(\alpha))} \end{align*} This function is obtained by ``zipping'' the two BDDs together until a constant is reached. Memoisation is used to exploit sharing and to avoid performing the same computations again and again. \medskip Suppose now that we want to define logical disjunction on Boolean BDD nodes. Its specification is the following: \begin{align*} x \lor y &= \mes{\alpha\mapsto \sem n(\alpha)\lor \sem m(\alpha)}. \end{align*} We can thus simply use the \code{apply} function, applied to the Boolean disjunction function: \begin{ocaml} let dsj: bool node -> bool node -> bool node = apply (||) \end{ocaml} Note that this definition could actually be slightly optimised by inlining \code{apply}'s code, and noticing that the result is already known whenever one of the two arguments is a constant: \begin{ocaml} let dsj: bool node -> bool node -> bool node = memo_rec (fun dsj x y -> match c(x), c(y) with | V true, _ | _, V false -> x | _, V true | V false, _ -> y | N(a,l,r), N(a',l',r') -> if a=a' then node a (dsj l l') (dsj r r') if a<a' then node a (dsj l y ) (dsj r y ) if a>a' then node a' (dsj x l') (dsj x r')) \end{ocaml} We ignore such optimisations in the sequel, for the sake of clarity. \section{Symbolic automata} \label{sec:symbolic} A standard technique~\cite{Bryant92,HenriksenJJKPRS95,Veanes13,DAntoniV14} for working automata over a large input alphabet consists in using BBDs to represent the transition function: a \emph{symbolic DFA} with output set $B$ and input alphabet $A'=2^A$ for some set $A$ is a triple $\tuple{S,t,o}$ where $S$ is the set of states, $t\colon S\to \bdd A S$ maps states into nodes of a BDD over $A$ with values in $S$, and $o\colon S \to B$ is the output function. Such a symbolic DFA is depicted in Figure~\ref{fig:symbolic:dfa}. It has five states, input alphabet $2^{\set{a,b,c}}$, and natural numbers as output set. We represent the BDD graphically; for each state, we write the values of $t$ and $o$ together with the name of the state, in the corresponding square box. The explicit transition table is given below the drawing. \begin{figure} \centering \includegraphics[width=7cm]{symbolic-dfa} \begin{align*} \begin{array}{c|l@{\,}l@{\,}l@{\,}l@{\,}l@{\,}l@{\,}l@{\,}l|l@{\,}l@{\,}l@{\,}l@{\,}l@{\,}l@{\,}l@{\,}l} &\multicolumn{8}{c}{s_1, s_2, s_3}&\multicolumn{8}{c}{s_4, s_5}\\\hline a&0&0&0&0&1&1&1&1&0&0&0&0&1&1&1&1\\ b&0&0&1&1&0&0&1&1&0&0&1&1&0&0&1&1\\ c&0&1&0&1&0&1&0&1&0&1&0&1&0&1&0&1\\\hline t& s_1&s_2&s_3&s_3&s_2&s_2&s_3&s_3& s_4&s_4&s_5&s_5&s_4&s_4&s_5&s_5\\ \end{array} \end{align*} \label{fig:symbolic:dfa} \caption{A symbolic DFA with five states.} \end{figure} The simple algorithm described in Figure~\ref{alg:plain} is not optimal when working with such symbolic DFAs: at each non-trivial iteration of the main loop, one goes through all letters of $A'=2^A$ to push all the derivatives of the current pair of states to the queue \code{todo} (line~\ref{line:plain:span}), resulting in a lot of redundancies. Suppose for instance that we run the algorithm on the DFA of Figure~\ref{fig:symbolic:dfa}, starting from states $s_1$ and $s_4$. After the first iteration, \code{r} contains the pair $(s_1,s_4)$, and the queue \code{todo} contains eight pairs: \begin{align*} (s_1,s_4), (s_2,s_4), (s_3,s_5), (s_3,s_5), (s_2,s_4), (s_2,s_4), (s_3,s_5), (s_3,s_5) \end{align*} Assume that elements of this queue are popped from left to right. The first two elements are removed during the next two iterations, since $(s_1,s_4)$ already is in \code{r}. Then $(s_2,s_4)$ is processed: it is added to \code{r}, and the above eight pairs are appended again to the queue, which now has thirteen elements. The following pair is processed similarly, resulting in a queue with twenty ($13-1+8$) pairs. Since all pairs of this queue are already in \code{r}, it is finally emptied through twenty iterations, and the algorithm returns true. Note that it would be even worse if the input alphabet was actually declared to be $2^{\set{a,b,c,d}}$: even though the bit $d$ of all letters is irrelevant for the considered DFA, each non-trivial iteration of the algorithm would push even more copies of each pair to the \code{todo} queue. What we propose here is to exploit the symbolic representation, so that a given pair is pushed only once. Intuitively, we want to recognise that starting from the pair of nodes $(n,m)$, the letters $010$, $011$, $110$ and $111$ are equivalent\footnote{Letters being elements of $2^{\set{a,b,c}}$ here, we represent them with bit-vectors of length three}, since they yield to the same pair, $(s_3,s_5)$. Similarly, the letters $001$, $100$, and $101$ are equivalent: they yield to the pair $(s_2,s_4)$. This idea is easy to implement using BDDs: like for the \code{apply} function (Figure~\ref{alg:bdd}), it suffices to zip the two BBDs together, and to push pairs when we reach two leaves. We use for that the procedure \code{pairs} from Figure~\ref{alg:pairs}, which successively applies a given function to all pairs reachable from two nodes. Its code is almost identical to \code{apply}, except that nothing is constructed (and memoisation is just used to remember those pairs that have already been visited). \begin{figure} \centering \begin{ocaml} let pairs (f: 'a ** 'b -> unit): 'a node -> 'b node -> unit = memo_rec (fun pairs x y -> match c(x), c(y) with | V v, V w -> f (v,w) | V _, N(_,l,r) -> pairs x l; pairs x r | N(_,l,r), V _ -> pairs l y; pairs r y | N(a,l,r), N(a',l',r') -> if a=a' then pairs l l'; pairs r r' if a<a' then pairs l y ; pairs r y if a>a' then pairs x l'; pairs x r') \end{ocaml} \label{alg:pairs} \caption{Iterating over the set of pairs reachable from two nodes.} \end{figure} We finally modify the simple algorithm from Section~\ref{ssec:finite:automata} by using this procedure on line~\ref{line:symb:span}: we obtain the code given in Figure~\ref{alg:symb:equiv}. \begin{figure} \centering \begin{ocaml} type ('s,'b) sdfa = {t: 's -> 's bdd; o: 's -> 'b} let symb_equiv (M: ('s,'b) sdfa) (x y: 's) = let r = Set.empty () in let todo = Queue.singleton (x,y) in let push_pairs = pairs (Queue.push todo) in ?\label{line:more:memo}? while $\lnot$Queue.is_empty todo do let (x,y) = Queue.pop todo in if Set.mem r (x,y) then continue if M.o x <> M.o y then return false ?\label{line:symb:ce}? push_pairs (M.t x) (M.t y) ?\label{line:symb:span}? Set.add r (x,y) done; return true \end{ocaml} \caption{Symbolic algorithm for checking language equivalence.} \label{alg:symb:equiv} \end{figure} We apply \code{pairs} to its first argument once and for all (line~\ref{line:more:memo}), so that we maximise memoisation: a pair of nodes that has been visited in the past will never be visited again, since all pairs of states reachable from that pair of nodes is already guaranteed to be processed. (As an invariant, we have that all pairs reachable from a pair of nodes memoised in \code{push_pairs} appear in \code{r \cup todo}.) Let us illustrate this algorithm by running it on the DFA from Figure~\ref{fig:symbolic:dfa}, starting from states $s_1$ and $s_4$ as previously. During the first iteration, the pair $(s_1,s_4)$ is added to \code{r}, and \code{push_pairs} is called on the pair of nodes $(n,m)$. This call virtually results in building the following BDD, \begin{center} \includegraphics[width=3.8cm]{pairs} \end{center} so that the following three pairs are pushed to \code{todo}. \begin{align*} (s_1,s_4), (s_2,s_4), (s_3,s_5) \end{align*} The first pair is removed by a trivial iteration: $(s_1,s_4)$ already belongs to \code{r}. The two other pairs are processed by adding them to \texttt{r}, but without pushing any new pair to \code{todo}: thanks to memoisation, the two expected calls to \code{push_pairs n m} are skipped. All in all, each reachable pair is pushed only once to the \code{todo} queue. More importantly, the derivatives of a given pair are explored symbolically. In particular, the algorithm would execute exactly in the same way, even if the alphabet was actually declared to be much larger (for instance because the considered states were part of a bigger automaton with more letters). \subsection{Displaying symbolic counter-examples.} \label{ssec:ce} Another advantage of this new algorithm is that it can easily be instrumented to produce concise counter-examples in case of failure. Consider for instance the following automaton \begin{center} \includegraphics[width=1.1\linewidth]{ce} \end{center} Intuitively, the states $s_1$ and $s_2$ are not equivalent because $s_2$ can take three transitions to reach $s_5$, with output $1$, while $s_1$ cannot reach $s_5$ in three transitions. More formally, the word $100~100~100$ over $2^{\set{a,b,c}}$ is a counter-example: we have \begin{align*} \bb{s_1}(100~100~100)&=\bb{s_2}(100~100)=\bb{s_3}(100)=o(s_4)=0\\ \bb{s_2}(100~100~100)&=\bb{s_3}(100~100)=\bb{s_4}(100)=o(s_5)=1 \end{align*} But there are plenty of other counter-examples of length three: it suffices that $a$ be assigned true in the three letters, the value of the bits $b$ and $c$ does not change the above computation. As a consequence, this counter-example is best described as the word $a\, a\, a$, whose letters are Boolean formulas in conjunctive normal form indicating the least requirements to get a counter example. The algorithm from Figure~\ref{alg:symb:equiv} makes it possible to give this information back to the user: \begin{itemize} \item modify the queue \emph{todo} to store triples $(w,x,y)$ where $(x,y)$ is a pair of states to process, and $w$ is the associated potential counter-example; \item modify the function \code{pairs} (Figure~\ref{alg:pairs}), so that it uses an additional argument to record the encountered node labels, with negative polarity when going through the recursive call for the left children, and positive polarity for the right children; \item modify line~\ref{line:plain:ce} of the main algorithm to return the symbolic word associated current pair when the output test fails. \end{itemize} \subsection{Non-deterministic automata} \label{ssec:det} Standard coinductive algorithms for DFA can be applied to non-deterministic automata (NFA) by using the \emph{powerset construction}, on the fly. This construction transforms a non-deterministic automaton into a deterministic one; we extend it to symbolic automata in the straightforward way. A \emph{symbolic NFA} is a tuple $\tuple{S,t,o}$ where $S$ is the set of states, $o\colon S\to B$ is the output function, and $t\colon S\to \bdd A{\pow S}$ maps a state and a letter of the alphabet $A'=2^A$ to a set of possible successor states, using a symbolic representation. Assuming such an NFA, one defines a symbolic DFA $\tuple{\pow S,t^\sharp,o^\sharp}$ as follows: \begin{align*} t^\sharp(\set{x_1,\dots,x_n})&\eqdef t(x_1)\bddcup\dots\bddcup t(x_n) \\ o^\sharp(\set{x_1,\dots,x_n})&\eqdef o(x_1)\bddlor\dots\vee o(x_n) \end{align*} (Where $\bddcup$ denotes the pointwise union of two BDDs over sets: $n\bddcup m = \mes{\phi\mapsto\sem n(\phi)\cup \sem m(\phi)}$.) \subsection{Hopcroft and Karp: disjoint sets forests} \label{ssec:dsf} The previous algorithm can be freely enhanced by using up-to techniques, as described in Section~\ref{ssec:upto}: it suffices to modify line~\ref{line:plain:upto} to skip pairs more or less aggressively, according to the chosen up-to technique. The up-to-equivalence technique used in Hopcroft and Karp's algorithm can however be integrated in a deeper way, by exploiting the fact that we work with BDDs. This leads to a second algorithm, which we describe in this section. Let us first recall \emph{disjoint sets forests}, the data structure used by Hopcroft and Karp to represent equivalence classes. This standard data-structure makes it possible to check whether two elements belong to the same class and to merge two equivalence classes, both in almost constant amortised time~\cite{Tarjan75}. The idea consists in storing a partial map from elements to elements and whose underlying graph is acyclic. An element for which the map is not defined is the \emph{representative} of its equivalence class, and the representative of an element pointing in the map to some $y$ is the representative of $y$. Two elements are equivalent if and only if they lead to the same representative, and to merge two equivalence classes, it suffices to add a link from the representative of one class to the representative of the other class. Two optimisations are required to obtain the announced theoretical complexity: \begin{itemize} \item when following the path leading from an element to its representative, one should compress it in some way, by modifying the map so that the elements in this path become closer to their representative. There are various ways of compressing paths, in the sequel, we use the method called \emph{halving}~\cite{Tarjan75}; \item when merging two classes, one should make the smallest one point to the biggest one, to avoid generating too many long paths. Again, there are several possible heuristics, but we elude this point in the sequel. \end{itemize} As explained above, the simplest thing to do would be to replace the bisimulation candidate $r$ from Figure~\ref{alg:symb:equiv} by a disjoint sets forest over the states of the considered automaton. The new idea consists in relating the BBD nodes of the symbolic automaton rather that just its states (i.e., just the BDD leaves). By doing so, one avoids visiting pairs of nodes that have already been visited up to equivalence. Concerning the implementation, we first introduce a variant of the function \code{pair} in Figure~\ref{alg:dsf:pairs}, which uses disjoint sets forest rather than plain memoisation. \begin{figure} \centering \begin{ocaml} let pairs' (f: 'b ** 'b -> unit): 'b node -> 'b node -> unit = (* the disjoint sets forest *) let m = Hmap.empty() in let link x y = Hmap.add m x y in (* representative of a node *) let rec repr x = match Hmap.get m x with | None -> x | Some y -> match Hmap.get m y with | None -> y | Some z -> link x z; repr z in let rec pairs x y = let x = repr x in let y = repr y in if x <> y then match c(x), c(y) with | V v, V w -> link x y; f (v,w) ?\label{line:freelink1}? | V _, N(_,l,r) -> link y x; pairs x l; pairs x r | N(_,l,r), V _ -> link x y; pairs l y; pairs r y | N(a,l,r), N(a',l',r') -> if a=a' then link x y; pairs l l'; pairs r r' ?\label{line:freelink2}? if a<a' then link x y; pairs l y ; pairs r y if a>a' then link y x; pairs x l'; pairs x r') in pairs \end{ocaml} \label{alg:dsf:pairs} \caption{Iterating over the set of pairs reachable from two nodes, optimised using disjoint set forests.} \end{figure} This function first creates an empty forest (we use for that use Filli\^atre's implementation of maps over hash-consed values). The function \code{link} adds a link between two representatives; the recursive terminal function \code{repr} looks for the representative of a node and implements halving. The function \code{pairs'} is defined similarly as \code{pairs}, except that it first takes the representative of the two given nodes, and that it adds a link from one to the other before recursing. Those links can be put in any direction on lines~\ref{line:freelink1} and~\ref{line:freelink2}, and we should actually use an appropriate heuristic to take this decision, as explained above. In the four other cases, we put a link either from the node to the leaf, or from the node with the smallest label to the node with the biggest label. By proceeding this way, we somehow optimise the BDD, by leaving as few decision nodes as possible. It is however important to notice that there is actually no choice left in those four cases: we work implicitly with the optimised BDD obtained by mapping all nodes to their representatives, so that we have to maintain the invariant that this optimised BDD is ordered and acyclic. (Notice that on the contrary, this optimised BDD need not be reduced anymore: the children of given a node might be silently equated, and a node might have several representations since its children might be silently equated with the children of another node with the same label) We finally obtain the algorithm given in Figure~\ref{alg:dsf:equiv}. \begin{figure} \centering \begin{ocaml} let dsf_equiv (M: ('s,'b) sdfa) (x y: 's) = let todo = Queue.singleton (x,y) in let push_pairs = pairs' (Queue.push todo) in while $\lnot$Queue.is_empty todo do let (x,y) = Queue.pop todo in if M.o x <> M.o y then return false push_pairs (M.t x) (M.t y) done; return true \end{ocaml} \caption{Symbolic algorithm optimised with disjoint set forests.} \label{alg:dsf:equiv} \end{figure} It is similar to the previous one (Figure~\ref{alg:symb:equiv}), except that we use the above new function \code{pairs'} to push pairs into the \code{todo} queue, and that we no longer need to store the bisimulation candidate \code{r}: this relation is subsumed by the restriction of the disjoint set forests to BDD leaves. \medskip If we execute this algorithm on the symbolic DFA from Figure~\ref{fig:symbolic:dfa}, between states $s_1$ and $s_4$, we obtain the disjoint set forest depicted below using dashed red arrows. This is actually corresponds to the pairs which would be visited by the first symbolic algorithm (Figure~\ref{alg:symb:equiv}). \begin{center} \includegraphics[width=7cm]{dfa-dsf1} \end{center} If instead we start from nodes $n1$ and $m1$ in the following partly described automaton, we would get the disjoint set forest depicted similarly in red, while the first algorithm would go through all blue pairs, one of which contains is superfluous. \begin{center} \includegraphics[width=7cm]{dsf} \end{center} \section{Kleene algebra with tests} \label{sec:kat} Now we consider Kleene algebra with tests, for which we provide several automata constructions that allow one to use the previous symbolic algorithms. \medskip A \emph{Kleene algebra with tests} (KAT) is a tuple $\tuple{X,B,{\cdot},{+},{\cdot^\star},\lnot,1,0}$ such that \begin{enumerate}[(i)] \item $\tuple{X,{\cdot},{+},{\cdot^\star},1,0}$ is a Kleene algebra~\cite{kozen94:ka:completeness}, i.e., an idempotent semiring with a unary operation, called ``Kleene star'', satisfying the following axiom and inference rules: \begin{mathpar} 1+x\cdot{}x^\star \le x^\star \and \inferrule{y\cdot{}x \le x}{y^\star\cdot{}x \le x} \and % \inferrule{x\cdot{}y \le x}{x\cdot{}y^\star \le x} \end{mathpar} (The preorder $(\le)$ being defined by $x\le y ~ \eqdef ~ x+y = y$.) \item $B\subseteq X$ \item $\tuple{B,{\cdot},{+},{\neg},1,0}$ is a Boolean algebra. \end{enumerate} The elements of the set $B$ are called ``tests''; we denote them by $\phi,\psi$. The elements of $X$, called ``Kleene elements'', are denoted by $x,y,z$. We sometimes omit the operator ``$\cdot$'' from expressions, writing $xy$ for $x\cdot y$. The following (in)equations illustrate the kind of laws that hold in all Kleene algebra with tests: \begin{mathpar}{} \phi+\neg \phi = 1 \and % \phi\cdot(\neg \phi + \psi) = \phi\cdot \psi = \neg(\neg \phi + \neg \psi) \\ x^\star x^\star = x^\star \and % (x+y)^\star = x^\star(yx^\star)^\star \and (x+x x y)^\star \le (x+x y)^\star\\ \phi\cdot(\neg \phi \cdot x)^\star = \phi \and % \phi\cdot(\phi\cdot x \cdot \neg \phi+\neg \phi\cdot y \cdot \phi)^\star\cdot\phi \le (x\cdot y)^\star % \end{mathpar} The laws from the first line come from the Boolean algebra structure, while the ones from the second line come from the Kleene algebra structure. The two laws from the last line require both Boolean algebra and Kleene algebra reasoning. \paragraph{Binary relations.} Binary relations form a Kleene algebra with tests; this is the main model we are interested in, in practice. The Kleene elements are the binary relations over a given set $S$, the tests are the predicates over this set, encoded as sub-identity relations, and the star of a relation is its reflexive transitive closure. This relational model is typically used to interpret imperative programs: such programs are state transformers, i.e., binary relations between states, and the conditions used to define the control-flow of these programs are just predicates on states. Typically, a program ``\code{while $\phi$ do p}'' is interpreted through the KAT expression $(\phi\cdot p)^\star\cdot\lnot\phi$. \paragraph{KAT expressions.} We denote by $Rel(V)$ the set of \emph{regular expressions} over a set $V$: \begin{align*} x,y ::= v\in V \OR x+y \OR x\cdot y \OR x^\star\enspace. \end{align*} Assuming a set $A$ of elementary tests, we denote by $B(A)$ the set of \emph{Boolean expressions} over $A$: \begin{align*} \phi,\psi ::= a\in A \OR 1 \OR 0 \OR \phi\land\phi \OR \phi\lor\phi \OR \lnot\phi \end{align*} Further assuming a set $\Sigma$ of letters (or atomic Kleene elements), a \emph{KAT expression} is a regular expression over the disjoint union $\Sigma\uplus B(A)$. Note that the constants $0$ and $1$ from the signature of KAT, and usually found in the syntax of regular expressions, are represented here by injecting the corresponding tests. \paragraph{Guarded string languages.} Guarded string languages are the natural generalisation of string languages for Kleene algebra with tests. We briefly define them. An \emph{atom} is a valuation from elementary tests to Booleans; it indicates which of these tests are satisfied. We let $\alpha,\beta$ range over atoms, the set of which is denoted by $At$: $At=2^A$. A Boolean formula $\phi$ is \emph{valid} under an atom $\alpha$, denoted by $\alpha\vDash\phi$, if $\phi$ evaluates to true under the valuation $\alpha$. A \emph{guarded string} is an alternating sequences of atoms and letters, both starting and ending with an atom: \begin{align*} \alpha_1,p_1,\alpha_2,\dots,\alpha_n,p_n,\alpha_{n+1}\enspace. \end{align*} The concatenation $u\ast v$ of two guarded strings $u,v$ is a partial operation: it is defined only if the last atom of $u$ is equal to the first atom of $v$; it consists in concatenating the two sequences and removing one copy of the shared atom in the middle. To any KAT expression, one associates a \emph{guarded string language}, i.e., a set of guarded strings, as follows: \begin{align*} G(\phi) &= \set{\alpha\in At \mid \alpha \vDash \phi} \tag*{$(\phi\in B(A))$} \\ G(p) &= \set{\alpha p \beta \mid \alpha,\beta\in At} \tag*{$(p\in \Sigma)$} \\ G(x+y) &= G(x) \cup G(y) \\ G(xy) &= \set{u\ast v \mid u\in G(x), v\in G(y)} \\ G(x^\star) &= \set{u_1\ast \dots \ast u_n \mid \exists u_1\dots u_n, \forall i\leq n, u_i\in G(x)} \end{align*} \paragraph{KAT Completeness.} Kozen and Smith proved that the equational theory of Kleene algebra with tests is complete over the relational model~\cite{kozens96:kat:completeness:decidability}: any equation that holds universally in this model can be proved from the axioms of KAT. Moreover, two expressions are provably equal if and only if they denote the same language of guarded strings. By a simple reduction to automata theory this gives algorithms to decide the equational theory of KAT. Now we study several such algorithms, and we show each time how to exploit symbolic representations to make them efficient. \subsection{Brzozowski's derivatives} \label{ssec:brz} Derivatives were introduced by Brzozowski~\cite{Brzozowski64} for (plain) regular expressions; they make it possible to define a deterministic automaton where the states of the automaton are the regular expressions themselves. Derivatives can be extended to KAT expressions in a very natural way~\cite{kozen08:kat:coalgebra}: we first define a Boolean function $\epsilon_\alpha$, that indicates whether an expression accepts the single atom $\alpha$; this function is then used to define the derivation function $\delta_{\alpha p}$, that intuitively returns what remains of the given expression after reading the atom $\alpha$ and the letter $p$. \begin{figure} \centering \begin{align*} \begin{aligned} \epsilon_\alpha(x{+}y) &= \epsilon_\alpha(x){+}\epsilon_\alpha(y) \\ \epsilon_\alpha(x{\cdot}y) &= \epsilon_\alpha(x){\cdot}\epsilon_\alpha(y) \\ \epsilon_\alpha(x^\star) &= 1\\ \epsilon_\alpha(q) &= 0 \\ \epsilon_\alpha(\phi) &= \begin{cases} 1 & \text{if }\alpha\vDash\phi\\ 0 & \text{oth.} \end{cases} \end{aligned}&& \begin{aligned} \delta_{\alpha p}(x{+}y) &= \delta_{\alpha p}(x){+}\delta_{\alpha p}(y) \\ \delta_{\alpha p}(x{\cdot}y) &= \begin{cases} \delta_{\alpha p}(x){\cdot}y ~\text{ if }\epsilon_\alpha(x)=0\\ \delta_{\alpha p}(x){\cdot}y {+} \delta_{\alpha p}(y) \text{ oth.} \end{cases}\\ \delta_{\alpha p}(x^\star) &= \delta_{\alpha p}(x)\cdot x^\star\\ \delta_{\alpha p}(q) &= \begin{cases} \bddtrue & \text{if }p=q\\ \bddfalse & \text{oth.} \end{cases}\\ \delta_{\alpha p}(\phi) &= \bddfalse \end{aligned} \end{align*} \vspace{-1em} \caption{Explicit derivatives for KAT expressions} \label{fig:kat:deriv} \end{figure} These two functions make it possible to give a coalgebraic characterisation of the function $G$, we have: \begin{align*} G(x)(\alpha) &= \epsilon_\alpha(x) & G(x)(\alpha\,p\,u) &= G(\delta_{\alpha p}(x))(u)\enspace. \end{align*} The tuple $\tuple{Reg(\Sigma\uplus B(A)),\delta,\epsilon}$ can be seen as a deterministic automaton with input alphabet $At\times\Sigma$, and output set $2^{At}$. Thanks to the above characterisation, a state $x$ in this automaton accepts precisely the guarded string language $G(x)$---modulo the isomorphism $(At\times\Sigma)^\star\to 2^{At}\approx \pow{(At\times\Sigma)^\star\times At}$. However, we cannot directly apply the simple algorithm from Section~\ref{ssec:finite:automata}, because this automaton is not finite. First, there are infinitely many KAT expressions, so that we have to restrict to those that are accessible from the expressions we want to check for equality. This is however not sufficient: we also have to quotient regular expressions w.r.t.\ a few simple laws~\cite{kozen08:kat:coalgebra}. This quotient is simple to implement by normalising expressions; we thus assume that expressions are normalised in the remainder of this section. \paragraph{Symbolic derivatives.} The input alphabet of the above automaton is exponentially large w.r.t.\ the number of primitive tests: $At\times\Sigma=2^A\times\Sigma$. Therefore, the simple algorithm from Section~\ref{ssec:finite:automata} is not tractable in practice. Instead, we would like to use its symbolic version (Figure~\ref{alg:symb:equiv}). The output values (in $(2^{At}={2^A}\to 2)$) are also exponentially large, and are best represented symbolically, using Boolean BDDs. In fact, any test appearing in a KAT expression can be pre-compiled into a Boolean BDD: rather than working with regular expressions over $\Sigma\uplus B(A)$ we thus move to regular expressions over $\Sigma\uplus \bdd A 2$, which we call \emph{symbolic KAT expressions}. We denote the set of such expressions by $\SKAT$, and we let $\enc e$ denote the symbolic version of a KAT expression $e$. Note that there a slight discrepancy here w.r.t.\ Section~\ref{sec:symbolic}: the input alphabet is $2^A\times\Sigma$ rather than just $2^{A'}$ for some $A'$. For the sake of simplicity, we just assume that $\Sigma$ is actually of the shape $2^{\Sigma'}$; alternatively, we could work with automata whose transition functions are represented partly symbolically (for $At$), and partly explicitly (for $\Sigma$). \begin{figure} \centering \begin{align*} \begin{aligned} \sepsilon(x{+}y) &= \sepsilon(x){\bddlor}\sepsilon(y) \\ \sepsilon(x{\cdot} y) &= \sepsilon(x){\bddland}\sepsilon(y) \\ \sepsilon(x^\star) &= 1\\ \sepsilon(p) &= 0 \\ \sepsilon(\phi) &= \phi \end{aligned}&& \begin{aligned} \sdelta(x{+}y) &= \sdelta(x)\bddplus\sdelta(y) \\ \sdelta(x{\cdot} y) &= (\sdelta(x) \bdddot y) \bddplus (\sepsilon(x)\bddtimes\sdelta(y)) \\ \sdelta(x^\star) &= \sdelta(x) \bdddot x^\star\\ \sdelta(p) &= \sem{p\mapsto 1, \_\mapsto 0}\\ \sdelta(\phi) &= 0 \end{aligned} \end{align*} \vspace{-1em} \caption{Symbolic derivatives for KAT expressions} \label{fig:kat:symb:deriv} \end{figure} We define the symbolic derivation operations in Figure~\ref{fig:kat:symb:deriv}. The output function, $\sepsilon$, has type $\SKAT\to \bdd A 2$, it maps symbolic KAT expressions to Boolean BDD nodes. The operations used on the right-hand side of this definition are those on Boolean BDDs. The function $\sepsilon$ is much more efficient than its explicit counterpart ($\epsilon$, in Figure~\ref{fig:kat:deriv}): the set of all accepted atoms is computed at once, symbolically. The transition function $\sdelta$, has type $\SKAT\to \bdd{A\uplus\Sigma'}{\SKAT}$. It maps symbolic KAT expressions to BDDs whose leaves are themselves symbolic KAT expressions. Again, in contrast to its explicit counterpart, $\sdelta$ computes the all the transitions of a given expression once and for all. The operations used on the right-hand side of the definition are the following ones: \begin{itemize} \item $n\bddplus m$ is defined by pointwise applying the syntactic sum operation from KAT expressions to the two BDDs $n$ and $m$: $n\bddplus m=\mes{\phi\mapsto \sem n(\phi)+\sem m(\phi)}$; \item $n\bdddot x$ syntactically multiplies all leaves of the BDD $n$ by the expression $x$, from the right: $n\bdddot x=\mes{\phi\mapsto \mes n(\phi)\cdot x}$; \item $f\bddtimes n$ ``multiplies'' the Boolean BDD $f$ with the BDD $n$: $f\bddtimes n=\mes{\phi\mapsto \mes n(\phi) \text{ if } \mes f(\phi)=1, 0\text{ otherwise}}$. \item $\mes{q\mapsto 1, \_\mapsto 0}$ is the BDD mapping $q$ to $1$ and everything else to $0$ ($q\in \Sigma=2^{\Sigma'}$ being casted into an element of $2^{A\uplus\Sigma'}$). \end{itemize} By two simple inductions, one proves that for all atom $\alpha\in At$, expression $x\in \SKAT$, and letter $p\in\Sigma$, we have: \begin{align*} \sem{\sepsilon \enc x}(\alpha) &= \epsilon_\alpha(x)\\ \sem{\sdelta \enc x}(\alpha p) &= \enc{\delta_{\alpha p}(x)} \end{align*} (Again, we abuse notation by letting the pair $\alpha p$ denote an element of $2^{A\uplus\Sigma'}$.) This ensures that the symbolic deterministic automaton $\tuple{\SKAT,\sdelta,\sepsilon}$ faithfully represents the previous explicit automaton, and that we can use the symbolic algorithms from Section~\ref{sec:symbolic}. \subsection{Partial derivatives} \label{ssec:partial} An alternative to Brzozowski's derivatives consists in using Antimirov' \emph{partial derivatives}~\cite{Antimirov96}, which generalise to KAT in a straightforward way~\cite{pous:itp13:ra}. The difference with Brzozowski's derivative is that they produce a non-deterministic automaton: states are still expressions, but the derivation function produces a set of expressions. An advantage is that we do not need to normalise expressions: the set of partial derivatives reachable from an expression is always finite. We give directly the symbolic definition, which is very similar to the previous one: \begin{align*} \spdelta(x{+}y) &= \spdelta(x)\bddcup\spdelta(y) \\ \spdelta(x{\cdot} y) &= (\spdelta(x) \bddsdot y) \bddcup (\sepsilon(x)\bddstimes\spdelta(y)) \\ \spdelta(x^\star) &= \spdelta(x) \bddsdot x^\star\\ \spdelta(p) &=\mes {p\mapsto \set 1, \_\mapsto \emptyset}\\ \spdelta(\phi) &= \emptyset \end{align*} The differences lie in the BDD operations, whose leaves are now sets of expressions: \begin{itemize} \item $n\bddcup m = \mes{\phi\mapsto\sem n(\phi)\cup \sem m(\phi)}$; \item $n\bddsdot x = \mes{\phi\mapsto\set {x'\cdot x \mid x' \in \sem n(\phi)}}$; \item $f\bddstimes n = \mes{\phi\mapsto \sem n(\phi) \text{ if } \sem f(\phi)=1, \emptyset\text{ otherwise}}$. \end{itemize} One can finally relate partial derivatives to Brzozowski's one: \begin{align*} \KA\vdash\Sigma_{x' \in \delta'_{\alpha p}(x)} x' = \enc{\delta_{\alpha p}(x)}. \end{align*} (We do not have a syntactic equality because partial derivatives inherently exploit the fact that multiplication distributes over sums.) Using symbolic determinisation as described in Section~\ref{ssec:det}, one can thus use the algorithm from Section~\ref{sec:symbolic} with Antimirov' partial derivatives. \subsection{Ilie \& Yu's construction} \label{ssec:ilieyu} Other automata constructions from the literature can be generalised to KAT expressions. We can for instance consider Ilie and Yu's construction~\cite{Ilie-yu-FollowAutomata}, which produces non-deterministic automata with epsilon transitions with exactly one initial state, and one accepting state. We consider a slightly simplified version here, where we elude a few optimisations and just proceed by induction on the expression. The four cases are depicted below: $i$ and $f$ are the initial and accepting states, respectively; in the concatenation and star cases, a new state $p$ is introduced. \begin{align*} \phi/p:&\quad \xymatrix @R=1em { % *+[o][F]{i}\ar[r]_{\phi/p} & *+[o][F]{f} }& % x\cdot y:&\quad \xymatrix @R=1em { % *+[o][F]{i}\ar@/_/@{-}[r]\ar@/^/@{-}[r]_{A(x)} & % *+[o][F.]{p}\ar@/_/@{-}[r]\ar@/^/@{-}[r]_{A(y)} & % *+[o][F]{f} % }\\ % x+y:&\quad \xymatrix @R=1em { \\% *+[o][F]{i}\ar@/_2.5em/@{-}[r]\ar@/_1em/@{-}[r]_{A(y)} % \ar@/^2.5em/@{-}[r]\ar@/^1em/@{-}[r]^{A(x)} & % *+[o][F]{f} % }& % x^\star:&\quad \xymatrix @R=2em @C=2em { &*{}&\\ *+[o][F]{i}\ar[r]_1 & % *+[o][F.]{p}\ar[r]_1 % \ar@{-}@(ul,ur)[]^{A(x)} % \ar@{-}@(ul,l)[u] % \ar@{-}@(ur,r)[u] % & *+[o][F]{f} % } % \end{align*} \smallskip To adapt this construction to KAT expressions, it suffices to generalise epsilon transitions to transitions labelled by tests. In the base case for a test $\phi$, we just add a transition labelled by $\phi$ between $i$ and $f$; the two epsilon transitions needed for the star case just become transitions labelled by the constant test $1$. As expected, when starting from a symbolic KAT expression, those counterparts to epsilon transitions are labelled by Boolean BDD nodes rather than by explicit Boolean expressions. \paragraph{Epsilon cycles.} The most important optimisation we miss with this simplified presentation of Ilie and Yu's construction is that we should merge states that belong to cycles of epsilon transitions. An alternative to this optimisation consists in normalising first the expressions so that for all subexpressions of the shape $e^\star$, $e$ does not contain $1$, i.e., $\sepsilon(e)\neq 1$. Such a normalisation procedure has been proposed for plain regular expressions by Br\"uggemann-Klein~\cite{klein93}, it generalises easily to (symbolic) KAT expressions. For instance, here are typical normalisations: \begin{align} \label{ex1} (\phi+p)^\star &\mapsto p^\star \\ (p^\star+q)^\star &\mapsto (p+q)^\star \\ \label{ex2} ((1+p)(1+q))^\star &\mapsto (p+q)^\star \end{align} When working with such normalised expressions, the automata produced by the above simplified construction have acyclic epsilon transitions, so that the aforementioned optimisation is unnecessary. According to the example~\eqref{ex1}, it might be tempting to strengthen example~\eqref{ex2} into $((\phi+p)(\psi+q))^\star \mapsto (p+q)^\star $. Such a step is invalid, unfortunately. (The second expression accepts the guarded string $\alpha p \beta$ for all $\alpha,\beta$, while the starting expression needs $\beta\vDash\psi$.) This example seems to show that one cannot ensure that all starred subexpressions are mapped to $0$ by $\sepsilon$. As a consequence we cannot assume that test-labelled transitions in general form an acyclic graph. \subsection{Epsilon transitions removal} \label{ssec:epsilon} It remains to eliminate epsilon transitions, so that the powerset construction can be applied to get a DFA. The usual technique with plain automata consists in computing the reflexive transitive closure of epsilon transitions, to precompose the other transitions with the resulting relation, and to saturate accepting states accordingly. More formally, let us recall Kozen's matricial representation of non-deterministic automaton with epsilon transitions~\cite{kozen94:ka:completeness}, as tuples $\tuple{n,u,J,N,v}$, where $u$ is a $(1,n)$ 01-matrix denoting the initial states, $J$ is a $(n,n)$ 01-valued matrix denoting the epsilon transitions, $N$ is a $(n,n)$ matrix representing the other transitions (with entries sets of letters in $\Sigma$), and $v$ is a $(n,1)$ 01-matrix encoding the accepting states. The language accepted by such an automaton can be represented by following the matricial product, using Kleene star on matrices: \begin{align*} u\cdot(J+N)^\star\cdot v \end{align*} Thanks to the algebraic law $(a+b)^\star=a^\star\cdot (b\cdot a^\star)^\star$, which is valid in any Kleene algebra, we get \begin{align*} KA\vdash u\cdot(J+N)^\star\cdot v = u\cdot (J^\star N)^\star\cdot (J^\star v) \end{align*} We finally check that $\tuple{n,u,0,J^\star N,J^\star v}$ represents a non-deterministic automaton without epsilon transitions. This is how Kozen validates epsilon elimination for plain automata, algebraically~\cite{kozen94:ka:completeness}. The same can be done here for KAT by noticing that tests (or Boolean BDD nodes) form a Kleene algebra with a degenerate star operation: the constant-to-1 function. One can thus generalise the above reasoning to the case where $J$ is a tests-valued matrix rather than a 01-matrix. The iteration $J^\star$ of such a matrix can be computed using standard shortest-path algorithms~\cite{HofnerM12}, on top of the efficient semiring of Boolean BDD nodes. The resulting automaton has the expected type: \begin{itemize} \item there is a transition labelled by $\alpha p$ between $i$ and $j$ if there exists a $k$ such that $\alpha \vDash (J^\star)_{i,k}$ and $p\in N_{k,j}$. (The corresponding non-deterministic symbolic transition function can be computed efficiently using appropriate BDD functions.) \item The output value of a state $i$ is the Boolean BDD node obtained by taking the disjunction of all the $(J^\star)_{i,j}$ such that $j$ is an accepting state (i.e., just $(J^\star)_{(i,f)}$ when using Ilie and Yu's construction). \end{itemize} \section{Experiments} \label{sec:exp} We implemented all presented algorithms, the corresponding library is available online~\cite{symbolickat:web}. This allowed us to perform a few experiments and to compare the various presented algorithms and constructions. We generated random KAT expressions over two sets of seven primitive tests and seven atomic elements, with seventy connectives, and excluding the constant 0. A hundred pairs of random expressions were checked for equality after being saturated by adding the constant $\Sigma^\star$ (by doing so, we make sure that the expressions are equivalent, so that the algorithms have to run their worst case: they cannot stop early thanks to a trivial counter-example). Table~\ref{tab:pairs} gives the total number of output tests (e.g., line~\ref{line:symb:ce} in Figure~\ref{alg:symb:equiv}) performed by several combinations of algorithms and automata constructions, as well as the global running time. \begin{table} \centering \begin{tabular}{r|rrr|rrr} & \multicolumn{3}{c}{\code{symb_equiv}} & \multicolumn{3}{c}{\code{dsf_equiv}} \\ & Ant. & I.\&Y. & Brz. & Ant. & I.\&Y. & Brz. \\\hline time & 1.5s & 7.7s & 2m34 & 1.4s & 7.6s & 1m52 \\ output tests & 7363 & 7440 & 20167 & 4322 & 4498 & 10255 \\ \end{tabular} \caption{Checking random saturated pairs of expressions.} \label{tab:pairs} \end{table} One can notice than Antimirov' partial derivatives provide the fastest algorithms. Ilie and Yu's construction yield approximately the same number of output tests as Antimirov' partial derivatives, but require more time, certainly because our implementation of transitive closure for epsilon removal is sub-optimal. Brzozowski's construction gives poor results both in terms of time and output tests: the produced automata are apparently larger, and heavier to compute. Concerning the equivalence algorithm, one notices that using disjoint set forests significantly reduces the number of output tests. There is almost no difference in the timings with the first two constructions, because most of the time is spent in constructing the automata rather than checking them for equivalence. This is no longer true with Brzozowski's construction, for which the automata are sufficiently big to observe a difference. \section{Directions for future work} \label{sec:fw} Concerning KAT, a natural extension of this work would be to apply the proposed algorithms to KAT+!B~\cite{GKM14a:KATB} and NetKAT~\cite{NetKAT14}, two extensions of KAT with important applications in verification: while programs with mutable tests in the former case, and network programming in the later case. KAT+!B has a EXPSPACE-complete equational theory, and its structure makes explicit algorithms completely useless. Designing symbolic algorithms for KAT+!B seems challenging. NetKAT remains PSPACE-complete, and Foster et al. recently proposed a coalgebraic decision procedure relying on a extension of Brzozowski's derivatives~\cite{FKMST14a}. To get a practical algorithm, they represent automata transitions using sparse matrices, which allows for some form of symbolic treatment. It is important to notice, however, that by considering (multi-terminal) BDDs here, we go far beyond the capabilities of sparse transition matrices. Indeed, sparse matrices just make it possible to factor out those cases where a state has no successor at all. Consider for instance a KAT expression of the shape $apx+(\lnot a)py$, where $x$ and $y$ are two non-empty expressions, possibly using a lot of atomic tests. The derivative of this expression along a letter $\alpha p$ is either $x$ or $y$ depending on whether $\alpha(a)$ holds or not. A BDD representation would thus consist in a single decision node, with two leaves $x$ and $y$. In contrast, a sparse matrix representation would need to list the exponentially many atoms together with either $x$ or $y$. \medskip Moving away from KAT specificities, we leave open the question of the complexity of our symbolic variant of Hopcroft and Karp's algorithm (Figure~\ref{alg:dsf:equiv}). Tarjan proved that Hopcroft and Karp's algorithm is almost linear in amortised time complexity, and he made a list of heuristics and path compression schemes that lead to that complexity~\cite{Tarjan75}. A similar study for the symbolic counterpart we propose here seems out of reach for now. \bibliographystyle{abbrvnat}
1,108,101,565,874
arxiv
\section{Introduction and summary of results} \label{sec:intro} Supersymmetry is an efficient tool for understanding the microscopic structure of black holes, as exemplified by the wealth of developments and precision tests achieved in the last two decades, building on the original constructions of \cite{Strominger:1996sh, Maldacena:1997de} for counting the entropy for asymptotically flat BPS black holes. Supersymmetric black holes in asymptotically anti-de Sitter (AdS) spacetimes with a known field theory dual provide a solid testing ground for extending these results beyond the near-horizon region and into more general examples, e.g.\ by allowing more general horizon topologies and including rotation. The construction of the relevant black hole solutions in four dimensions has a long history (see e.g.\ \cite{Romans:1991nq, Gauntlett:2001qs}), leading to the first examples of static supersymmetric black holes in AdS$_4$ with a spherical horizon of finite area in gauged supergravity coupled to vector multiplets \cite{Cacciatori:2009iz,Dall'Agata:2010gj,Hristov:2010ri}. These examples were later extended to allow for a full set of electromagnetic charges \cite{Halmagyi:2013qoa, Katmadas:2014faa, Halmagyi:2014qza}. Most recently, the microstate counting for these static solutions in the gauged STU model (admitting an uplift to M-theory) was performed successfully in the large $N$ limit of the dual ABJM theory via supersymmetric localization \cite{Benini:2015eyy,Benini:2016hjo,Benini:2016rke,Cabo-Bizet:2017jsl}. The match was also extended to various other examples and in different directions, see e.g.\ \cite{Hosseini:2016tor,Azzurli:2017kxo,Liu:2017vll,Hosseini:2017fjo,Benini:2017oxt,Liu:2018bac,Hristov:2018lod} and references therein. The presence of a magnetic flux (in the electrically gauged symplectic frame) is crucial for preserving supersymmetry by the so called supersymmetric twist: the contribution of the spin connection cancels against the gauged R-symmetry in the BPS equations. The associated ground states of the dual field theory with magnetic flux are counted by the topologically twisted index \cite{Benini:2015noa}. The twist characterizing the static BPS solutions in AdS$_4$ leads to an additional special feature: the fermionic symmetries commute with spatial rotations \cite{Hristov:2011ye}. It is therefore immediate to see that adding nonzero angular momentum does not break any further symmetries, unlike the case of supersymmetric asymptotically flat black holes. Constructing explicitly such black holes is however a nontrivial task and it is the purpose of the present work to find such solutions in $\mathcal{N}=2$ gauged supergravity models with embedding in string or M-theory. The supersymmetric rotating black hole solutions that have appeared in the literature so far include electrically charged 1/4-BPS solutions \cite{Kostelecky:1995ei} without a regular static limit, as well as 1/4-BPS black holes with magnetic charges \cite{Caldarelli:1998hg, Klemm:2011xw}, which must necessarily have a non-compact horizon. The known non-extremal rotating black hole configurations supported by scalars and gauge fields \cite{Chong:2004na,Chow:2010sf,Chow:2010fw,Chow:2013gba,Gnecchi:2013mja}, also display similar features and are not continuously connected to static magnetic black holes with spherical horizons, for which the counting is known. In this paper, we construct 1/4-BPS black hole solutions that have either compact or non-compact horizons and realize the same topological twist via magnetic flux\footnote{Here we again refer to a frame where the gauging is purely electric. More generally, this requirement implies that the vector of FI gaugings must be mutually nonlocal with the vector of charges, i.e. their inner product must not vanish.} as the static black holes of \cite{Cacciatori:2009iz,Dall'Agata:2010gj,Hristov:2010ri}, to which they reduce in a static limit. We find that the rotation manifests itself as a slicing of spacetime in terms of a squashed sphere (or a squashed hyperbolic space in the non-compact case), rather than a constant curvature one as in the static case. We therefore expect the microstate counting for these rotating AdS black holes to be based on the refined topologically twisted index on a similarly squashed background as described in \cite{Benini:2015noa}. Such a microstate counting can provide precious insight on the microscopic structure of more realistic spinning black holes, belonging to the class of extremal Kerr with or without cosmological constant, since the relevant degrees of freedom are expected to be universal and reside in the near horizon geometry. In this work we make an extensive use of the real formulation of supergravity \cite{Klemm:2012yg, Klemm:2012vm} in terms of the rank-four symplectic covariant tensor $I_4$ \cite{Ferrara:1997uz,Ferrara:2006yb,Bossard:2013oga}. Although this way of repackaging $4d$ ${\cal N}=2$ supergravity is less popular in the literature, we find it very convenient for solving the BPS equations. We therefore give an extensive introduction to the real formulation and the properties of the quartic invariant. The manifest symplectic covariance of the real formulation makes it possible to solve for a very general near-horizon ansatz, which allows for a full set of electromagnetic charges $(q_I, p^I)$, subject to the twisting condition\footnote{The vanishing of the NUT charge, imposed to ensure causality, provides an additional constraint among the set of conserved charges.}, and angular momentum ${\cal J}$ bounded from above, for arbitrary symmetric models and gaugings. The real, or $I_4$, formalism then also allows us to write down full analytic flows to the asymptotic AdS$_4$ region in the models we consider. We finish this work with a proposal for the BPS entropy function for our rotating solutions, such that we can formulate the black hole entropy and attractor equations through an extremization principle. The existence of a BPS entropy function has proven crucial in the successful holographic matches so far. Adding rotation, we find a more intricate form of the entropy function that simplifies drastically only for a special class of models that does not exhibit AdS$_4$ asymptotics. Our results therefore provide a nontrivial check for the large $N$ evaluation of the refined twisted index of the holographically dual theories. As an appetizer, we can already present the general formula for the Bekenstein-Hawking entropy of the spherical rotating black holes in AdS$_4$, \begin{equation} S_{BH} = \frac{\pi l_{AdS}^{2} } { \sqrt2 G_N} \sqrt{ \frac14\?I_4(\Gamma, \Gamma, G, G) + \sqrt{\frac1{16}\?I_4(\Gamma, \Gamma, G, G)^2 - 4 \frac{(I_4(\Gamma) + {\cal J}^2)}{l_{AdS}^{4}} }}\,, \end{equation} given in terms of the AdS length scale $l_{AdS}$, the conserved angular momentum ${\cal J}$, and the symplectic vectors of electromagnetic charges $\Gamma$ and gauging parameters $G$\footnote{Notice that these black holes, just like those considered for the microstate counting in the static case \cite{Cacciatori:2009iz}, do not have a well defined flat space limit.}. The rank-four symplectic tensor $I_4$ then encodes the properties of the scalar manifold, and we discuss it more explicitly in the main text emphasizing its properties for the STU model (which has a higher-dimensional origin for several choices of gauging vectors $G$). Some other interesting questions are left for future investigations. For instance, in this paper we have constructed black hole solutions which do not admit an ergosphere associated to the Killing vector $\partial_t$: it would be interesting to construct the additional Kerr-like orbit, and ultimately connect these solutions as limits of a rotating black hole with temperature (non-extremal generalization), along the lines of \cite{Chow:2013gba,Gnecchi:2013mja}. It would be moreover interesting to compute the renormalized on-shell action of these solutions, and elucidate the holographic mapping of the fugacities. Another interesting direction would be to use the general form of the four-dimensional solution presented here to generate rotating five-dimensional black string solutions \cite{Benini:2013cda}, via the uplift procedure along the lines of \cite{Hristov:2014eza,Hosseini:2017mds,Azzola:2018sld}. We hope to come back to these points in the near future. The rest of this paper is organized as follows. Section \ref{sec:BPSequations} provides some necessary background, in particular the real formulation of the supergravity degrees of freedom and the relevant BPS equations for models with symmetric scalar manifold. In section \ref{sec:warm-up} we summarize some results on the static solutions and their possible asymptotics that are useful in later sections and allow to build intuition on the construction of solutions in the real formulation. We then proceed to analyze the rotating near horizon geometry in section \ref{sec:nhg}, adopting an ansatz that incorporates the topological twist for a squashed internal space. This results in a family of rotating BPS attractors satisfying the same number of constraints as the corresponding static one, to which it reduces when the rotation parameter is turned off. We provide several examples for such near-horizon geometries in section \ref{subsec:nhg-examples}, including (the Kaluza-Klein reduction of) the near-horizon geometry of the five dimensional solutions in \cite{Gutowski:2004yv, Kunduri:2006ek}, as well as new examples in the STU model with gaugings that admit an AdS$_4$ vacuum. In section \ref{sec:full-sol} we study the complete BPS black hole flow interpolating between the near-horizon geometry and asymptotically locally AdS$_4$ space, once again providing an analytic example solution. We further comment on the asymptotic properties of our solutions and identify the conserved quantities. Finally, section \ref{sec:entropy} focuses on the entropy function for our family of solutions, providing an appropriate extremization principle similar to the one used in the static case to match to the twisted index in the dual field theory. The two appendices provide more details on our conventions on ${\cal N}=2$ supergravity coupled to vector multiplets with a symmetric scalar manifold and on the reduction of the solutions of \cite{Gutowski:2004ez,Gutowski:2004yv, Kunduri:2006ek}, respectively. \section{Supergravity formalism and BPS equations} \label{sec:BPSequations} \subsection{FI gauged supergravity} \label{subsec:sugra} Our starting point is the bosonic action for abelian gauged supergravity \cite{deWit:1984pk, deWit:1984px}, \begin{equation} \label{Ssugra4D} S_\text{4D}=\frac{1}{16\pi}\int_{M_4} \Bigl(R\star_4 1 - 2\,g_{i\bar\jmath}\,d t^i\wedge\star_4 d \bar{t}^{\bar\jmath} - \tfrac12 F^I\wedge G_I + 2\,V_g\,\star_4 1\Bigr), \end{equation} which describes neutral complex scalars $t^i$ (belonging to the $n_\mathrm{v}$ vector multiplets) and abelian gauge fields $F_{\mu\nu}{}^I$, $I=\{0,\,i \}=0,\dots n_\mathrm{v}$ (from both the gravity multiplet and the vector multiplets), all coupled to gravity. We refer to appendix \ref{sec:conventions} for more details on our conventions for ${\cal N}\!=\!2$ supergravity. The dual gauge fields, $G_{\mu\nu}{}_I$, are given in terms of the field strengths and the scalar dependent period matrix ${\cal N}_{IJ}$, by \begin{equation}\label{G-def} G^-_{\mu\nu}{}_I = {\cal N}_{IJ} F^-_{\mu\nu}{}^J\,, \end{equation} where the explicit expression for the period matrix is not necessary in the following. The metric on the special K\"ahler target space $g_{i\bar\jmath}$ and the period matrix ${\cal N}_{IJ}$ are completely specified by a holomorphic homogeneous function of degree two $F(X)$, of the projective coordinates $X^I$ on the scalar manifold, as in \cite{deWit:1984pk, deWit:1984px}. One may recover the physical scalars as $t^i=X^i/X^0$. One can repackage the scalars in terms of a symplectic covariant section, as \begin{equation}\label{eq:sym-sec-0} {\cal V}={\mathrm{e}}^{{\cal K}/2} \begin{pmatrix} X^I\\ F_I\end{pmatrix}\,, \qquad F_I= \frac{\partial F}{\partial X^I}\,, \end{equation} where $\mathcal{K}$ is the K\"ahler potential and $F_I$ stands for the derivatives of $F(X)$. Note that ${\cal V}$ is uniquely specified by the physical scalars up to a local U(1) transformation. Finally, the scalar potential $V_g$ takes the form \begin{equation}\label{gau-pot} V_g= g^{i\bar\jmath}\?Z_i(G)\,\bar Z_{\bar\jmath}(G) -3\,\left|Z(G)\right|^2 \,, \end{equation} where we used the definitions of the central charge \begin{align}\label{ch-def-0} Z(G) = \Iprod{G}{{\cal V}} \equiv {\mathrm{e}}^{{\cal K}/2} \left( g_I\? X^I - g^I\? F_I \right) \,, \end{align} and its K\"ahler covariant derivative, $Z_i(G)$. The constant symplectic vector $G=\{g^I, g_I\}$ defines the so-called Fayet-Iliopoulos (FI) terms, which specialize the precise combination of $U(1)$ gauge fields gauging the R-symmetry. Note that in \eqref{ch-def-0} we defined in passing the inner product between two symplectic vectors, which will be often used in the following. In the abelian class of gaugings we consider in this paper, the FI terms specify the coupling of the gravitini\footnote{We have omitted the fermionic completion of the Lagrangian, which can be found in \cite{deWit:1984pk, deWit:1984px}.}, as their kinetic term contains the minimal coupling \begin{gather}\label{eq:gravitino} \epsilon^{\mu\nu\rho\sigma} \bar\psi_{\mu}{}_i\gamma_\nu\,D_\rho\psi_{\sigma}{}^i \equiv \epsilon^{\mu\nu\rho\sigma} \bar\psi_{\mu}{}_i\gamma_\nu\left( \partial_\rho + \tfrac{i}2\,\Iprod{G}{A_\rho} \right)\psi_{\sigma}{}^i\,, \\ \Iprod{G}{A_\mu} \equiv g_I A_\mu{}^I - g^I A_{\mu}{}_I\,.\nonumber \end{gather} This coupling is in general non-local, due to the presence of the dual gauge fields $A_{\mu}{}_I$. However, as for any vector, $G$ can always be rotated to a frame such that it is purely electric, i.e.~$g^I=0$, leading to a local coupling of the gauge fields. More generally, one can consider couplings of magnetic vectors as well, using the embedding tensor formalism \cite{deWit:2005ub, deWit:2011gk}, which requires the introduction of extra auxiliary fields. For the theories discussed in this paper however, the bosonic action is only affected through the nontrivial potential \eqref{gau-pot}, which can be straightforwardly written in an electric/magnetic covariant way, as above, reproducing the result obtained using the embedding tensor formalism. Based on this observation, we use covariant versions of all quantities, since all results for the bosonic backgrounds must necessarily be covariant under electric/magnetic duality. We therefore need not choose a frame for the FI terms explicitly from the outset, only specifying a frame when discussing examples. The symplectic section ${\cal V}$, along with the vector of FI terms, $G$, and the natural symplectic vectors of field strengths and charges in \eqref{eq:dual-gauge}, may be used to describe all the supergravity degrees of freedom in terms of symplectic covariant objects. The action \eqref{Ssugra4D} can be recast as \begin{equation} \label{Ssugra4D-sym} S_\text{4D}=\frac{1}{16\pi}\int_{M_4} \Bigl(R\star_4 1 - \mathrm{i}\?\Iprod{D^\mu \bar{\cal V}}{D_\mu {\cal V}} - \tfrac12 F^I\wedge G_I + 2\,V_g\,\star_4 1\Bigr) \,, \end{equation} where $D_\mu$ stands for the K\"ahler connection. Note that the kinetic terms for the vector fields cannot be written in a manifestly duality covariant form, while keeping Lorentz invariance. As in this paper we concentrate on stationary black hole solutions, we can in fact describe the degrees of freedom of the vector fields in a duality covariant form. The requirement of a timelike isometry is enough to cast the spacetime in the form \begin{equation}\label{eq:metr-bps} ds^2_4 = -{\mathrm{e}}^{2\?U} \?(dt+\omega )^2 + {\mathrm{e}}^{-2\?U}\?ds^2_3 \,, \end{equation} where $ds^2_3$ is the metric of a three-dimensional base space, on which the function $U$, the one-form $\omega$ and all other fields discussed above are defined. Similarly, the gauge field strengths in \eqref{eq:dual-gauge} can be decomposed as \begin{gather}\label{eq:F-time-dec} \mathsf{F} = d\big( \zeta\,(dt+\omega) \big) + {\cal F} = d\big( \zeta\,(dt+\omega) \big) + d {\cal A} \,, \end{gather} where $\zeta$, ${\cal F}$, ${\cal A}$ denote the symplectic vectors of timelike components and spatial field strengths and potentials, for both electric and magnetic gauge fields. The complex self-duality condition \eqref{cmplx-sdual} can be used to relate the time- and spacelike components as \begin{equation}\label{eq:zeta-F} d \zeta = {\mathrm{e}}^{2\?U} \?\mathrm{J}\?\star \left( {\cal F} + \zeta\? d\omega \right)\,, \end{equation} where $\star$ will denote the three-dimensional Hodge operator for the remainder of this paper. Finally, the degrees of freedom of the scalars are again described by the section, in a combination encoding both the physical scalars and the scale factor ${\mathrm{e}}^U$, as \begin{equation}\label{eq:q-def} \mathcal{R} = 2\?{\mathrm{e}}^{U}\? \Re \left( {\mathrm{e}}^{-\mathrm{i}\?\alpha} {\cal V} \right)\,, \end{equation} where $\alpha$ is an arbitrary function, parametrizing the unphysical overall phase of the symplectic section. Using the timelike isometry one may reduce the original four-dimensional action down to three dimensions, which leads to the so-called real formulation of special geometry. Sparing the reduction details, here we directly give the resulting Lagrangian as derived in \cite{Klemm:2012yg, Klemm:2012vm} \begin{align} \label{eq:real-formulation-Lag} e^{-1} {\cal L} = &\,\, \frac{1}{2}\? R_3 - \tilde{H}^{{\scriptscriptstyle M}{\scriptscriptstyle N}} \left(\?\partial \mathcal{R}_{\scriptscriptstyle M} \? \partial \mathcal{R}_{\scriptscriptstyle N} - \partial \zeta_{\scriptscriptstyle M} \? \partial \zeta_{\scriptscriptstyle N} \?\right) + \frac{1}{H}\? V_{3d} \nonumber \\ &\, - \frac{1}{16\?H^2} \? \Iprod{\mathcal{R}}{ \partial \mathcal{R}}^2 + \frac{1}{8\?H^2} \? \Iprod{\mathcal{R}}{ \partial \zeta}^2 - \frac{1}{4 H^2} \left( \partial \tilde{\phi} + \tfrac12\? \Iprod{\zeta}{ \partial \zeta} \right)^2 \,, \end{align} where the field strengths ${\cal F}$ were dualised into the $\zeta$'s using \eqref{eq:zeta-F} and the scalar $\tilde{\phi}$ is defined through \begin{equation} d \tilde{\phi} + \tfrac12\? \Iprod{\zeta}{ d \zeta} = 4\?H^2 \star d \omega \,. \end{equation} The function $H(\mathcal{R})$ is the so-called Hesse potential, which plays a role analogous to the prepotential in four dimensions, and in fact the two objects are related through the reduction. When evaluated with $\mathcal{R}$ as in \eqref{eq:q-def}, the Hesse potential evaluates to \begin{equation} H = -\tfrac12\? {\mathrm{e}}^{2\?U} \,, \end{equation} while the remaining objects in \eqref{eq:real-formulation-Lag} are given as \begin{equation} \tilde{H} = -\frac12 \? \log( -2 H ) \,, \qquad \tilde{H}^{{\scriptscriptstyle M}} = \frac{\partial \tilde{H}}{\partial \mathcal{R}_{\scriptscriptstyle M}}\,, \qquad \tilde{H}^{{\scriptscriptstyle M}{\scriptscriptstyle N}} = \frac{\partial^2 \tilde{H}}{\partial \mathcal{R}_{\scriptscriptstyle M} \partial \mathcal{R}{\scriptscriptstyle N}} \,. \end{equation} The scalar potential $V$ in \eqref{gau-pot} leads to the following expression for the potential in three dimensions \begin{equation}\label{eq:3d-pot} \frac{1}{H}\?V_{3d} = G_{\scriptscriptstyle M}\? G_{\scriptscriptstyle N}\?\left( - \tilde{H}^{{\scriptscriptstyle M}{\scriptscriptstyle N}} + 4\? \tilde{H}^{{\scriptscriptstyle M}} \tilde{H}^{{\scriptscriptstyle N}} \right) + \frac{1}{8\?H^2}\? \Iprod{G}{\mathcal{R}}^2 \ . \end{equation} Before closing, we point out the existence of the dual coordinates, defined as \begin{equation}\label{eq:dual-coo} \mathcal{I}_{\scriptscriptstyle M} = -2\? \Omega_{{\scriptscriptstyle M}{\scriptscriptstyle N}}\tilde{H}^{\scriptscriptstyle N} = 2\? {\mathrm{e}}^{-U}\? \Im \left( {\mathrm{e}}^{-\mathrm{i}\?\alpha} {\cal V} \right)_{\scriptscriptstyle M} \,, \end{equation} where $\Omega_{{\scriptscriptstyle M}{\scriptscriptstyle N}}$ is the (inverse) anti-symmetric symplectic structure matrix that serves for raising and lowering indices and in the second equality we re-expressed this object in terms of the section. This equation expresses the fact that only half of the components in the symplectic section \eqref{eq:sym-sec-0} are independent, so that its imaginary part is given in terms of the real part. Note that the variables $\mathcal{I}$ are useful in obtaining explicit solutions, as will be seen in the following, so that an explicit algebraic way of computing the Hesse potential $H$ is very useful. This is true for the symmetric models considered in the rest of this paper. \subsection{Symmetric models and the real formulation} We henceforth fully restrict our considerations to models whose special K\"ahler target manifold parametrized by the scalars is a symmetric space, classified in \cite{deWit:1992wf}. For the cubic models, i.e. whose prepotential is of the form \begin{equation}\label{prep-def-0} F=\frac{1}{6}\?c_{ijk}\?\frac{X^i X^j X^k}{X^0} \,, \end{equation} this requirement translates into the following identity for the constant tensor $c_{ijk}$ \begin{equation}\label{eq:symm-cub} \frac43\?\delta_{i(l}\? c_{mpq)} = c_{ijk}\?c_{j^\prime (l m} \?c_{pq)k^\prime}\?\delta^{j j^\prime}\?\delta^{k k^\prime}\,. \end{equation} In terms of duality covariant objects, the property \eqref{eq:symm-cub} is expressed by the existence of a rank-4 symplectic tensor satisfying a number of identities, discussed in Appendix \ref{app:I4}. When contracted e.g.\ with a charge vector $\Gamma = \{p^I, q_I\}$, this defines the quartic form \begin{eqnarray}\label{I4-ch-0} I_4(\Gamma)= - (p^0 q_0 + p^i q_i)^2 + \frac{2}{3} \,q_0\,c_{ijk} p^i p^j p^k - \frac{2}{3} \,p^0\,c^{ijk} q_i q_j q_k + c_{ijk}p^jp^k\,c^{ilm}q_lq_m\,, \end{eqnarray} which is invariant under symplectic transformations. This is particularly useful in view of the fact that symplectic transformations do not necessarily leave the form of the prepotential \eqref{prep-def-0} invariant, while a prepotential might not exist at all in certain frame. The duality covariant formulation, based on the quartic invariant \eqref{I4-ch-0}, allows to treat all frames on the same footing. The quartic invariant provides an explicit solution for the Hesse potential $H$ of symmetric models, which encodes the three-dimensional real formulation of the theory \eqref{eq:real-formulation-Lag}, as \begin{equation} H = -\frac12\?{\mathrm{e}}^{2\?U} = -\frac12\? \sqrt{I_4(\mathcal{R})} \,. \end{equation} This identification allows for a completely algebraic rewriting of the various objects in the previous section in terms of contractions of $I_4$ with the various symplectic vectors. In particular, \eqref{eq:dual-coo} becomes \begin{equation} \mathcal{I} = \frac1{2\?I_4(\mathcal{R})}\? I^\prime_4(\mathcal{R}) = \frac12\? {\mathrm{e}}^{-4\?U}\? I^\prime_4(\mathcal{R})\,, \end{equation} while its inverse is similarly given by \begin{equation}\label{eq:r-i} \mathcal{R} = - \frac1{2\?I_4(\mathcal{I})}\? I^\prime_4(\mathcal{I}) = - \frac12\? {\mathrm{e}}^{4\?U}\? I^\prime_4(\mathcal{I})\,. \end{equation} We refer again to Appendix \ref{app:I4} for the definition of $I^\prime_4$ and higher derivatives of $I_4$ as symplectic tensors, as well as various identities among them. In what follows we will make extensive use of the quartic invariant $I_4$ contracted with the various symplectic vectors describing the scalars, vectors, charges, FI terms and so on in the theory. In fact one can completely write the Lagrangian using $I_4$ contractions. For future reference, we provide the expressions for the derivatives of $\tilde H$, from which the Lagrangian \eqref{eq:real-formulation-Lag} is built \begin{align} \tilde{H}^{\scriptscriptstyle M} & = -\frac{1}{4}\?\frac{ \partial^{\scriptscriptstyle M} I_4(\mathcal{R})}{I_4(\mathcal{R})}\,, \\ \label{eq:Hab-tensor} \tilde H^{{\scriptscriptstyle M}{\scriptscriptstyle N}} & = \frac{1}{4\?I_4(\mathcal{R})}\?\left( - \partial^{\scriptscriptstyle M}\partial^{\scriptscriptstyle N} I_4(\mathcal{R}) + \frac{1}{I_4(\mathcal{R})}\? \partial^{\scriptscriptstyle M} I_4(\mathcal{R}) \? \partial^{\scriptscriptstyle N} I_4(\mathcal{R})\right)\,. \end{align} A benchmark model in this class is provided by the STU model (sometimes also called electric STU model to distinguish it from its symplectically rotated version we discuss below), with prepotential \begin{equation}\label{eq:STU-def} F^{\scriptscriptstyle STU}= \frac{X^1 X^2 X^3}{X^0} \,, \end{equation} so that it describes three vector multiplets coupled to supergravity. In this case the only non-vanishing coefficients are $c_{123} = 1$ and permutations lead to $c^{123} = 1$ and permutations. The quartic invariant of a charge vector is therefore \begin{align}\label{I4-ch-stu} I_4(\Gamma)^{\scriptscriptstyle STU}=&\? - (p^0 q_0 + p^i q_i)^2 + 4\,q_0 p^1 p^2 p^3 - 4\,p^0 q_1 q_2 q_3 \nonumber\\ &\? + 4 (p^1 p^2 q_1 q_2 + p^1 p^3 q_1 q_3 + p^2 p^3 q_2 q_3)\,. \end{align} From this expression, it is clear that any purely electric or magnetic $\Gamma$ leads to $I_4(\Gamma)^{\scriptscriptstyle STU}=0$. This in turn implies that an AdS$_4$ vacuum in this model requires a mixed electric/magnetic vector of FI terms, $G$, since the cosmological constant is controlled by $I_4(G)$ in any symmetric model (which we show carefully in section \ref{sec:warm-up}). If we apply the symplectic transformation defined by \begin{equation}\label{eq:STU-sym-rot} \begin{pmatrix} p^0 \\ p^i \\ q_i \\ q_0 \end{pmatrix} \rightarrow \begin{pmatrix} -p^0 \\ - q_i \\ p^i \\ -q_0 \end{pmatrix}\,, \end{equation} on all symplectic vectors, one transforms the prepotential \eqref{eq:STU-def} into the magnetic STU model \begin{equation}\label{eq:STU-root} F^{\scriptscriptstyle mSTU}= 2\?i \sqrt{X^0 X^1 X^2 X^3}\,, \end{equation} while \eqref{I4-ch-stu} becomes \begin{align}\label{I4-ch-root} I_4(\Gamma)^{\scriptscriptstyle mSTU}=&\? - (p^0 q_0 - p^i q_i)^2 + 4\,q_0 q_1 q_2 q_3 + 4\,p^0 p^1 p^2 p^3 \nonumber\\ &\? + 4 (p^1 p^2 q_1 q_2 + p^1 p^3 q_1 q_3 + p^2 p^3 q_2 q_3)\,. \end{align} This frame, despite its non-cubic prepotential, is equivalent to the one in \eqref{eq:STU-def}, but allows for an AdS$_4$ vacuum with a purely electric gauging. In fact, this model arises by consistent truncation of M-theory \cite{Duff:1999gh,Cvetic:1999xp}, so that the corresponding solutions may be oxidized to eleven-dimensional supergravity. We finish this subsection by briefly mentioning two simple models that can be obtained by truncations of \eqref{eq:STU-def} and \eqref{eq:STU-root}. In particular, the T$^3$ model is found by setting all three scalars in the STU model to be equal, with the result \begin{align}\label{eq:T3-model} F^{\scriptscriptstyle T^3}= &\? \frac{(X^1)^3}{X^0}\,, \nonumber\\ I_4(\Gamma)^{\scriptscriptstyle T^3}= &\? - (q_0 p^0)^2 - 6\? q_0 p^0 q_1 p^1 + 3\? (q_1 p^1)^2 + 4\? q_0 (p^1)^3 - 4\? p^0 (q_1)^3\ . \end{align} Similarly, one may obtain the minimally coupled $X^0 X^1$ model \begin{align}\label{eq:X0X1-model} F^{X^0 X^1} = i X^0 X^1\ , \qquad I_4(\Gamma)^{X^0 X^1}= (q_0 q_1 + p^0 p^1)^2 \ , \end{align} by identifying $X^2=X^0$ and $X^3=X^1$ in the prepotential \eqref{eq:STU-root} and appropriately rescaling the scalars and charges. \subsection{BPS equations} \label{subsec:BPS} The BPS equations for solutions of abelian gauged $\mathcal{N}=2$ supergravity with a timelike Killing vector were given in \cite{Cacciatori:2008ek, Meessen:2012sr, Chimento:2015rra}. We start from the BPS equations as summarized conveniently given in the latter paper for the timelike class, for which the metric takes the form \eqref{eq:metr-bps}, as appropriate for black hole solutions. The BPS equations fix the gauge field strengths ${\cal F}$ in \eqref{eq:F-time-dec} in terms of the scalars, so that the Maxwell equations and Bianchi identities take the form of a Poisson equation on the base metric, as \begin{gather}\label{eq:Poiss-Ortin} d {\cal F} = -d\left[ \star d \mathcal{I} - 2\, {\mathrm{e}}^{-4\?U}\,\Iprod{\star \hat{G}}{\mathcal{R}}\,\mathcal{R} + \? {\mathrm{e}}^{-2\?U}\mathrm{J}\star\hat{G} \right] +d \omega\wedge \hat{G} = 0\,, \end{gather} where $\mathrm{J}$ is the scalar dependent complex structure in \eqref{CY-hodge}, $\hat{G}$ is the direct product of the vector of gaugings with a one-form\footnote{This is necessarily so for theories without hypermultiplets, while in the more general case $\hat{G}$ is replaced by the hypermultiplet moment maps.}. Introducing the vielbeine $e^x$, with $x$, $y \dotso =1,2,3$, for the three-dimensional base metric $ds^2_3$, $\hat{G}$ must satisfy the equation \begin{equation}\label{eq:de-bps} d e^x - \Iprod{\hat{G}}{\mathcal{I}} \wedge e^x + \varepsilon^{xyz}\Iprod{\cal A}{\hat{G^y}}\wedge e^z =0 \,, \end{equation} where ${\cal A}$ denotes the spatial gauge potentials in \eqref{eq:F-time-dec}. The final BPS equation imposes that the rotation one-form $\omega$ must satisfy \begin{align} \star d\omega = &\, \Iprod{d \mathcal{I}}{\mathcal{I}} - 2\? {\mathrm{e}}^{-4\?U}\,\Iprod{\hat{G}}{\mathcal{R}} \label{eq:omega-eqn-gen} \\ = &\, {\mathrm{e}}^{-4\?U}\,\Iprod{\mathcal{R}}{d \mathcal{R} + 2 \?\hat{G} } \label{eq:omega-eqn-gen-2}\,, \end{align} where in the second line we re-expressed the first term through the variable $\mathcal{R}$. The conditions \eqref{eq:Poiss-Ortin}, \eqref{eq:de-bps} and \eqref{eq:omega-eqn-gen} are sufficient to preserve supersymmetry. The Poisson equation \eqref{eq:Poiss-Ortin} guarantees the local existence of the spatial gauge field strengths, ${\cal F}$, which can be obtained by writing this equation as a total derivative. However, this is subtle in general due to the last term on the RHS, as it can be written as a total derivative in more than one way. In particular, invariance of ${\cal F}$ following from \eqref{eq:Poiss-Ortin} under time reparametrizations, $\omega \rightarrow \omega + d \sigma$, for any function $\sigma$ on the base, requires that $\hat G$ be exact on a simply connected manifold. We write \begin{equation}\label{eq:r-def} \hat G = G\? d\rho \,, \end{equation} for some function $\rho$, to be determined by \eqref{eq:de-bps} once a particular base is chosen. One may then recast \eqref{eq:Poiss-Ortin} as the flow equation \begin{gather}\label{eq:fl-Ortin} \star d \mathcal{I} - 2\, {\mathrm{e}}^{-4\?U}\,\Iprod{\star \hat{G}}{\mathcal{R}}\,\mathcal{R} + \? {\mathrm{e}}^{-2\?U}\mathrm{J}\star\hat{G} -\rho\? d \omega\? G + {\cal F} = 0 \,. \end{gather} Here, we note that our choice of total derivative is consistent with the complex anti-selfduality condition \eqref{cmplx-sdual}, where the time components of the gauge fields are given by \eqref{eq:zeta-F} as \begin{equation}\label{eq:zeta-BPS} d\zeta = d\mathcal{R} - \hat{G}\,. \end{equation} The flow equation \eqref{eq:fl-Ortin} can be simplified using the identity \eqref{I4toJ}, repeated here in terms of the variables $\mathcal{R}$ and $\mathcal{I}$ \begin{align} \label{I4toJ-2} \frac12\, I^\prime_4(\mathcal{I} , \mathcal{I}, \Gamma) = 2\,\Iprod{\Gamma}{\mathcal{I}}\,\mathcal{I} +4\,{\mathrm{e}}^{-4\?U}\,\Iprod{\Gamma}{\mathcal{R}}\,\mathcal{R} - 2\,{\mathrm{e}}^{-2\?U}\mathrm{J}\,\Gamma \,, \end{align} leading to \begin{align}\label{eq:fl-Ortin-s} \star d \mathcal{I} + \Iprod{\star \hat{G}}{\mathcal{I}}\,\mathcal{I} -\frac14\? I^\prime_4(\mathcal{I} , \mathcal{I} , \star\hat{G}) &\, -\rho\? d \omega\? G + {\cal F} = 0\,. \end{align} In this form, the flow equation is algebraic in terms of the combination $\mathcal{I}$, allowing for simpler manipulation using techniques similar to \cite{Katmadas:2014faa, Halmagyi:2014qza}, as will be seen below. In all the cases we consider we are interested in black holes corresponding to a radial flow from the asymptotic region to the near-horizon geometry. We can therefore already specify an ansatz for the three-dimensional base space as a product of the radial direction with a 2d surface $\Sigma$, as \begin{equation}\label{eq:3d-metr} d s_3^2 = dr^2 + {\mathrm{e}}^{2 \psi (r)}\? ds^2_{\Sigma}\ . \end{equation} We can then decompose the vielbein in terms of the veilbein $\hat{e}^{a}$ on the surface $\Sigma$ \begin{align} e^1 = dr\,, \qquad e^a = {\mathrm{e}}^\psi\ \hat{e}^{a}\,, \end{align} where the indices $a, b \dotso=2,3$. For this basis it turns out the function $\rho$ associated with the one-form $\hat{G}$ becomes the radial coordinate, such that \begin{equation} \hat{G} = G\ dr\ . \end{equation} With this ansatz we find further simplifications in \eqref{eq:de-bps} and \eqref{eq:fl-Ortin-s} so that we can finally present the full set of BPS equations in the compact form \begin{subequations} \label{eq:final} \begin{empheq}[box=\fbox]{align} \label{eq:final-psi} \psi' =&\, \Iprod{G}{\mathcal{I}}\ , \rule[.5cm]{0cm}{0cm} \\ \label{eq:final-Sig} \hat{\omega}^{ab} = &\, \varepsilon^{ab}\?\Iprod{G}{{{\cal A}}}\ , \\ \label{eq:final-omega} \star d\omega = &\, \Iprod{d \mathcal{I}}{\mathcal{I}} + \Iprod{G}{I^\prime_4(\mathcal{I})}\ d r\ , \\ \label{eq:final-flow} \quad {\mathrm{e}}^{-\psi}\ d ({\mathrm{e}}^\psi \mathcal{I}) =&\, \frac14\? I^\prime_4(\mathcal{I} , \mathcal{I} , G)\ d r + G\ r\ \star d \omega\? - \star {\cal F}\ , \quad {\rule[-.5cm]{0cm}{0cm}} \end{empheq} \end{subequations} where $\varepsilon^{ab} = \varepsilon^{1ab}$ and $\hat{\omega}^{ab}$ denote the antisymmetric symbol and the spin connection on the Riemann surface $\Sigma$, respectively. In order to obtain black hole solutions, one must solve \eqref{eq:final} for a given electromagnetic charge vector $\Gamma = \{p^I, q_I\}$, defined as \begin{equation}\label{eq:charge-def} \Gamma \equiv \frac{1}{\mathrm{Vol}_\Sigma} \int_{\Sigma} {\cal F}\ , \end{equation} along with boundary conditions for the scalars that will be spelled out in the following section. Note that the requirement \eqref{eq:charge-def} fixes the spatial field strengths ${\cal F}$ completely in the static case in general, as well as in a class of rotating asymptotically flat black holes \cite{Bossard:2012xsa}, but not for rotating black holes in gauged supergravity, as will be seen in due course. \section{Warm-up: asymptotics and static black holes} \label{sec:warm-up} Before proceeding with the analysis of the BPS equations for rotating black holes, we pause to present some known properties of the solutions to \eqref{eq:final}, in order to gain some extra intuition and understanding of the real formulation of supergravity. We focus on the possible asymptotic structure of the solutions depending on the gauging, listing known examples for each case, and then give a short review of the static asymptotically AdS$_4$ black hole solutions. \subsection{Asymptotic structure and string theory embeddings} \label{sec:as-vac} We first consider the asymptotic behaviour of solutions, depending on the model and on the choice of gauging. As it turns out, for scalar manifolds specified by the cubic prepotential and in particular for the STU model, one can asymptotically realize various possibilities, including AdS$_4$, Minkowski space, as well as hyperscaling violating Lifshitz (hvLif) spacetimes\footnote{For these latter hyperscaling violating geometries the potential is not stabilized at infinity: the scalar fields do not reach asymptotically a constant value. With a slight abuse of terminology we will refer to them as "runaway vacua".}, depending on the choice of an explicit gauging vector, $G$. For each of these vacua and types of gauging vectors we then give examples of models with a string/M-theory origin. We consider the following expansion in the asymptotic region, around $r\rightarrow \infty$, parametrized by a constant $\lambda$ and a constant symplectic vector ${\cal A}_2$, as \begin{equation}\label{eq:asympt-exp} {\mathrm{e}}^{2\?\psi}\?\mathcal{I} = \lambda\, I^\prime_4(G)\, r^3 + {\cal A}_2\, r^2 + {\cal O}(r) \,, \end{equation} from which the function ${\mathrm{e}}^\psi$ is determined from \eqref{eq:final-psi} as \begin{equation}\label{eq:asympt-exp-2} {\mathrm{e}}^{2\?\psi} = 2\?\lambda\, I_4(G)\, r^4 + \frac23\, \Iprod{G}{{\cal A}_2}\, r^3 + {\cal O}(r^2) \,. \end{equation} One can easily verify that \eqref{eq:asympt-exp}-\eqref{eq:asympt-exp-2} solve the flow equation \eqref{eq:final-flow} up to a first order expansion asymptotically, for any $\lambda$ and any ${\cal A}_2$ that satisfies \begin{equation} \Iprod{{\cal A}_2}{I^\prime_4(G)} = 0\,. \end{equation} The asymptotic expansion for the combination in \eqref{eq:asympt-exp} specifies the relevant boundary conditions for all solutions considered in this paper, depending on the choice for the gauging and the properties of ${\cal A}_2$. We distinguish four cases depending on the properties of the vector $G$: \begin{itemize} \item AdS$_4$ vacuum: \quad $I_4 (G) \neq 0$, \quad $G$ is {\it generic} (rank-4), \item hvLif (class I): \quad $I_4 (G) = 0, \,\, I_4'(G) \neq 0$, \quad $G$ is {\it restricted} (rank-3), \item hvLif (class II): \quad $I_4 (G) = I_4'(G) = 0, \,\, I_4''(G) \neq 0$, \quad $G$ is {\it small} (rank-2), \item $\mathbb{R}^{1,3}$ vacuum: \quad $\frac14\,I_4 (G, G, \Gamma_1, \Gamma_2) = - \Iprod{G}{\Gamma_1}\,\Iprod{G}{\Gamma_2}$, \quad $G$ is {\it very small} (rank-1). \end{itemize} To make things explicit, let us consider the case of the STU model, for which a gauging vector with components \begin{equation} G = \{ g^0,g^1,g^2,g^3,g_0,g_1,g_2,g_3 \}^{\rm T} \,, \end{equation} leads to the following $I_4(G)$ (cf. \eqref{I4-ch-stu}) \begin{align} I_4(G)^{\scriptscriptstyle STU} = &\? - (\sum_I g_I g^I)^2 + 4\,g_0 g^1 g^2 g^3- 4\,g^0 g_1 g_2 g_3 \nonumber\\ &\? + 4 (g^1 g^2 g_1 g_2 + g^1 g^3 g_1 g_3 + g^2 g^3 g_2 g_3)\,. \end{align} Depending on the choice of nonvanishing components of $G$ we can have all of the options mentioned above. We proceed to discuss the properties of the asymptotic geometries of each class that are important in later sections, providing string theory embeddings from reductions of higher dimensional supergravity. \paragraph{AdS$_4$} We start with the generic case, where the gauging vector $G$ is rank-4, so that the constant $\lambda$ is fixed in terms of the AdS$_4$ radius as $\lambda = \frac12\?I_4(G)^{-1/2}$, and \eqref{eq:asympt-exp}-\eqref{eq:asympt-exp-2} become \begin{align}\label{eq:as-sol} {\mathrm{e}}^{2\?\psi}\?\mathcal{I} =&\? \frac1{2\?\sqrt{I_4(G)}}\, I^\prime_4(G)\, r^3 + {\cal A}_2\, r^2 + {\cal O}(r) \,, \nonumber\\ {\mathrm{e}}^{2\?\psi} =&\? \sqrt{I_4(G)}\, r^4 + \frac23\,\Iprod{G}{{\cal A}_2}\, r^3 + {\cal O}(r^2) \,, \end{align} It follows that the four dimensional metric \eqref{eq:metr-bps} for the base \eqref{eq:3d-metr} becomes asymptotically \begin{equation} ds^2_{AdS_4} = - \sqrt{I_4(G)}\,r^2\?dt^2 +\frac{1}{\sqrt{I_4(G)}}\, \left( \frac{dr^2}{r^2} + 2\,I_4(G)\?r^2\? ds^2_{\Sigma} \right)\,, \end{equation} which is the standard metric on AdS$_4$ up to rescaling of the time and radial coordinates. An example generic vector is given by \begin{equation} G = \{ g^0,0,0,0,0,g_1,g_2,g_3 \}^{\rm T}\quad \Rightarrow\quad I_4(G) = - 4\,g^0 g_1 g_2 g_3 \,, \end{equation} for the well known case of the gauged $STU$ model in 4d. The choice $-g^0 = g_1 = g_2 = g_3 = 1$, after symplectically rotating as in \eqref{eq:STU-sym-rot}, corresponds to the STU model in the frame \eqref{eq:STU-root}, which arises from maximal 4d gauged supergravity and thus from a direct reduction of 11d supergravity on S$^7$ \cite{Cvetic:1999xp}. \paragraph{hvLif:} A less restricted class of asymptotics is given by the hyperscaling violating Lifshitz (hvLif) runaway vacua, a two-parameter family of asymptotic geometries that includes AdS space as a special case. For the general definition and main properties of hvLif solutions we refer to \cite{Perlmutter:2012he}. Here, we use the parametrization of the hvLif runaway vacua in terms of two constants, the dynamical exponent $z$ and the hyperscaling violation exponent $\theta$, given by \begin{equation} ds^2_{hvLif} = r^{-\theta}\left( - r^{2\?z}\, dt^2 + \frac{dr^2}{r^2} + r^2\? ds^2_{\Sigma} \right)\,, \end{equation} which reduces to AdS$_4$ for $z = 1, \theta = 0$. We divided the hvLif solutions in class I and II depending on the rank of the gauging vector $G$, but in fact there are various possibilities for the values of $z$ and $\theta$ in each case, arising by imposing constraints on the subleading terms specified by the vector ${\cal A}_2$. Instead of giving a general discussion, we restrict ourselves here to the explicit examples of interest for us. Within the rank-3 class for $G$ (class I), the metric functions turn out to scale as ${\mathrm{e}}^{U} \sim r^{1/2}$, ${\mathrm{e}}^{\psi} \sim r^{3/2}$ in the general case, leading to the exponents $z=0$ and $\theta=-2$. However, in this paper we are interested in solutions that can be lifted to asymptotically AdS$_5$ geometries. This is achieved by imposing the constraint \begin{equation} I_4(I^\prime_4(G),I^\prime_4(G),{\cal A}_2,{\cal A}_2) =0\,, \end{equation} on the subleading vector ${\cal A}_2$. In this special case, the expansion in \eqref{eq:asympt-exp}-\eqref{eq:asympt-exp-2} describes a behaviour where ${\mathrm{e}}^{2\?U} \sim {\mathrm{e}}^{\psi} \sim r^{3/2}$. The asymptotic four dimensional metric then takes the form \begin{equation} ds^2_{hvLif_I} = - r^{3/2}\,dt^2 + \frac{dr^2}{r^{3/2}} + r^{3/2}\? ds^2_{\Sigma} \,, \end{equation} which, after redefining the radial variable as $r=\rho^2$ and rescaling the other coordinates by a constant factor, takes the form \begin{equation}\label{eq:metr-rank-3} ds^2_{hvLif_I} = \rho\left( - \rho^2\, dt^2 + \frac{d\rho^2}{\rho^2} + \rho^2\? ds^2_{\Sigma} \right)\,. \end{equation} The metric \eqref{eq:metr-rank-3} is of the hvLif type with exponents $\theta=-1$ and $z=1$, in the notation of \cite{Perlmutter:2012he}. A relevant example within the STU model that we use in the following is given by \begin{equation}\label{eq:rank3-vec} G = \{ 0,0,0,0,g_0,g_1,g_2,g_3 \}\quad \Rightarrow\quad I_4(G) = 0,\quad (I_4')_0 (G) = \frac{\partial I_4 (G)}{\partial g^0} = -4\, g_1 g_2 g_3\,, \end{equation} This possibility, with $g_1 = g_2 = g_3 = \frac12$ arises from a dimensional reduction of five-dimensional $STU$ gauged supergravity which is a further truncation of maximal 5d supergravity. Solutions of this theory have interpretation as asymptotically AdS$_5 \times $S$^5$ solutions in type IIB supergravity. The four-dimensional hvLif$_I$ runaway vacuum indeed corresponds to the dimensional reduction of AdS$_5$ as discussed in \cite{Hristov:2014eza,Hosseini:2017mds}. We will not be further concerned here with hvLif solutions of class II, coming from rank-2 gaugings. \paragraph{Minkowski} For the case of $G$ being very small, the potential \eqref{eq:3d-pot} vanishes upon using \eqref{eq:Hab-tensor}, so that the bosonic Lagrangian is identical to that of ungauged supergravity, even though $G$ still appears in the gravitino coupling \eqref{eq:gravitino}. Since Minkowski space is not a supersymmetric vacuum in abelian gauged supergravity, asymptotically flat solutions are necessarily non-BPS. Nevertheless, one may still construct such black hole solutions starting from supersymmetric attractors of the type described in \cite{Hristov:2012nu}, so we briefly record this case as well. An example very small vector is given by \begin{equation}\label{eq:rank1-vec} G = \{ 0,0,0,0,g_0,0,0,0 \}^{\rm T}\quad \Rightarrow\quad I_4(G) = 0,\quad I_4'(G) = 0\,, \end{equation} for any cubic symmetric model. This possibility arises explicitly from a Sherk-Schwarz reduction of five-dimensional ungauged supergravity with a symmetric scalar manifold, as explained in \cite{Looyestijn:2010pb}. In turn this means one can view solutions of this theory as solutions to 11-dimensional supergravity on appropriate CY$_3\times S^1$ compactifications with a twist along the circle. \subsection{Review of static AdS\texorpdfstring{$_4$}{4} black holes} \label{sec:static-bh} As an illustration of the real formulation for the BPS equations, we present here a short summary of the known static BPS black hole solutions in gauged supergravity. It is instructive to first consider \eqref{eq:final-psi}. We already used the ansatz in \eqref{eq:3d-metr} where the function $\psi$ only depends on the radial coordinate, therefore we can choose an ansatz for the symplectic variable $\mathcal{I}$ in \eqref{eq:dual-coo} in the following way \begin{equation}\label{eq:section-ansa} \mathcal{I} = {\mathrm{e}}^{- \psi}\ {\cal H} (r) + f (r, \Sigma)\ G\ . \end{equation} Here, ${\cal H} (r)$ is a radially dependent symplectic vector and $f(r, \Sigma)$ is an arbitrary function on the base \eqref{eq:3d-metr}, i.e. that can depend also on the coordinates on the Riemann surface $\Sigma_g$. Note that we have chosen the second term in \eqref{eq:section-ansa} proportional to $G$, so that it drops out of \eqref{eq:final-psi}, due to the antisymmetry of the symplectic inner product, $\Iprod{G}{G} = 0$, but in principle any vector that is mutually local with $G$ could be used to the same effect, so that we arrive at the equation \begin{equation} ({\mathrm{e}}^\psi)' = \Iprod{G}{{\cal H}}\ . \end{equation} In the static case it can further be shown that $f(r, \Sigma) = 0$ but we make use of this freedom in the equation in the construction of the rotating solutions in the following sections. We now restrict to the static case, for which the general black hole solution is known \cite{Halmagyi:2014qza}. However, for illustrational purposes, we further restrict to the simpler class of solutions appearing in \cite{Cacciatori:2009iz,Dall'Agata:2010gj,Hristov:2010ri, Katmadas:2014faa} for which ${\cal H}$ is linear in the radial variable\footnote{We remark on the most general case later in this subsection, around \eqref{eq:gen-stat}.} \begin{equation}\label{eq:stat-ans} {\cal H}(r) = {\cal H}_0 + {\cal H}_{\infty}\ r\ , \end{equation} for some constant symplectic vectors ${\cal H}_0$ and ${\cal H}_\infty$ that are to be fixed by the BPS equations. The names are appropriately chosen as ${\cal H}_0$ gives the near-horizon ($r \rightarrow 0$) value of the scalars, while ${\cal H}_\infty$ fixes the scalars asymptotically ($r \rightarrow \infty$). From the discussion of the asymptotics in the previous subsection, we can already see from \eqref{eq:asympt-exp} that \begin{equation}\label{eq:H-infty} {\cal H}_\infty = \lambda I_4' (G)\ , \end{equation} for a constant $\lambda$ that is usually chosen by convention depending on the type of asymptotics. The explicit form will help us simplify substantially the remaining equations using the various identities in App. \ref{app:I4}. Notice that we can already give the general solution for the metric function $\psi (r)$, \begin{equation}\label{eq:psi-static} {\mathrm{e}}^\psi = \Iprod{G}{{\cal H}_0}\ r + 2 \lambda\ I_4 (G)\ r^2\ , \end{equation} where $\lambda = \frac12\?I_4(G)^{-3/4}$ to match the AdS$_4$ asymptotics in \eqref{eq:as-sol} and we did not allow for an integration constant that can always be shifted away by a coordinate redefinition. We then turn to Eq.\ \eqref{eq:final-omega}, whose left hand side vanishes for static solutions, since the rotation one-form $\omega$ vanishes in this case. After a bit of rewriting on the right hand side, one is left with the following constraint: \begin{equation} 2\ {\mathrm{e}}^\psi\ \Iprod{{\cal H}_0}{{\cal H}_\infty} = \Iprod{G}{I_4' ({\cal H}_0 +{\cal H}_\infty\ r)}\ , \end{equation} which we can further expand in powers of $r$. The left hand side only has a linear and quadratic piece in $r$ by virtue of \eqref{eq:psi-static}, while the right hand side also has a constant and a cubic piece in $r$ since $I_4'$ is homogeneous of degree 3. This in principle amounts to four equations, but upon the explicit use of \eqref{eq:H-infty} together with the identities \eqref{eq:quint}-\eqref{eq:proj}, it turns out three of these equations are already satisfied. One constraint for the vector ${\cal H}_0$ remains, \begin{equation} I_4 ({\cal H}_0, {\cal H}_0, {\cal H}_0, G) = 0 \quad \Rightarrow \quad \Re({\mathrm{e}}^{-\mathrm{i}\?\alpha}Z(G))\bigr|_{\text hor}=0\ , \end{equation} where in the second equation we rewrote the constraint in the complex basis, recognized as one of the BPS equations at the attractor. Proceeding with \eqref{eq:final-flow}, we note that due to the symmetries we can set the gauge field strengths proportional to the volume form of the Riemann surface, ${\cal F} = \Gamma\ {\rm vol}_{\Sigma}$. We therefore find \begin{equation} {\mathrm{e}}^\psi\ {\cal H}_\infty = \frac14 I_4' ({\cal H}_0 + {\cal H}_\infty\ r, {\cal H}_0 + {\cal H}_\infty\ r, G) - \Gamma\ , \end{equation} leading to three vector equations once expanded in powers of the radial variable. In particular, the quadratic terms in $r$ cancel upon inserting the solution for ${\cal H}_\infty$ in \eqref{eq:H-infty}. Using the identity \eqref{eq:proj}, we then remain with the condition \begin{equation}\label{eq:static-constr} \Iprod{{\cal H}_0}{{\cal H}_\infty} = 0\,, \end{equation} from the terms linear in $r$ and with \begin{empheq}[box=\fbox]{align}\label{eq:H-0} \Gamma = \frac14 I_4' ({\cal H}_0, {\cal H}_0, G)\ , \end{empheq} at the horizon at $r=0$. The latter is an equation for a full symplectic vector and is therefore enough to completely solve for the near-horizon symplectic vector ${\cal H}_0$ in terms of the gauging vector $G$ and the charge vector $\Gamma$. Eventually the constraint \eqref{eq:static-constr} can be transformed into a restriction on the allowed electromagnetic charges allowed by the gauging, \begin{empheq}[box=\fbox]{align}\label{eq:final-constr} I_4 (\Gamma, \Gamma, \Gamma, G) = I_4 (G, G, G, \Gamma) = 0\ . \end{empheq} We have therefore written down the solution for a static black hole subject to a particular constraint on the charges. In order to find the most general solution we should have started with a slightly more general ansatz for the section \begin{equation}\label{eq:gen-stat} \mathcal{I} = e^{-2 \psi} \left( {\cal A}_1\ r + {\cal A}_2\ r^2 + {\cal A}_3\ r^3 \right)\ , \end{equation} for three constant symplectic vectors ${\cal A}_{1,2,3 }$ determined by the equations. In particular the AdS$_4$ asymptotics fix ${\cal A}_3 = \frac12\?I_4(G)^{-1/2}\,I_4' (G)$. The complete solution can be found in a similar fashion, as can be seen explicitly in \cite{Halmagyi:2014qza}, but is slightly more complicated due to the more general ansatz. In this case the constraint \eqref{eq:final-constr} is relaxed, thus allowing for one extra free parameter in the charge vector \cite{Halmagyi:2014qza}. Finally let us remark on the last equation still to be solved, \eqref{eq:final-Sig}. Upon taking an exterior derivative we can then relate the gauge field strengths to the Ricci curvature of the 2d surface, \begin{equation}\label{eq:charge-constr} \Iprod{G}{{\cal F}} = \epsilon_{ab} \hat{{\cal R}}^{ab}\ , \end{equation} where $\hat{{\cal R}}^{ab}$ is the Ricci form on $\Sigma$. In the static case all isometries of the 2d surface $\Sigma$ are preserved and one can further show that near the horizon the 2d metric must be the constant curvature metric, which we can also take as a solution along the full black hole flow (see \cite{Anderson:2011cz} for a more careful analysis). This leaves three possibilities, namely the constant curvature metrics on $S^2$ or H$_2$ and the flat metric on ${\mathbb R}^2$. In order to have a compact horizon we can further quotient the non-compact spaces ${\mathbb R}^2$ and H$_2$ to arrive at a Riemann surface $\Sigma_{\mathfrak{g}}$ of arbitrary genus. Explicitly, one can use any of the following metrics \begin{equation} ds^2_{\Sigma_\mathfrak{g}} = d\theta^2 + f_\kappa^2(\theta)\, d\phi^2 \;,\quad f_\kappa(\theta) = \left\{ \begin{array}{ll} \sin\theta & \kappa = 1 \\ 1 & \kappa=0 \\ \sinh\theta & \kappa=-1\,, \end{array} \right. \end{equation} where $\kappa = 1$ for $S^2$, $\kappa = 0$ for $T^2$, and $\kappa = -1$ for $\Sigma_\mathfrak{g}$ with $\mathfrak{g}>1$. The scalar curvature in each case is $2\?\kappa$ and the volume is \begin{equation} \mathrm{Vol}(\Sigma_\mathfrak{g}) = 2\? \pi \?\eta \;, \quad \eta = \left\{ \begin{array}{ll} 2\? |\mathfrak{g}-1| &\text{for } \mathfrak{g} \neq 1 \\ 1 &\text{for } \mathfrak{g} = 1 \;. \end{array} \right. \end{equation} The gauge field strengths are then given by \begin{equation} {\cal F} = \Gamma\ f_\kappa(\theta)\ d\theta \wedge d\phi\ , \end{equation} and we finally arrive (using \eqref{eq:charge-def} and integrating \eqref{eq:charge-constr}) at one final constraint on the charges, \begin{empheq}[box=\fbox]{align}\label{eq:final-charge-constr} \Iprod{G}{\Gamma} = -\kappa\ . \end{empheq} Summarizing, in order to obtain a static BPS solution in the class described by \eqref{eq:stat-ans}, one has to solve \eqref{eq:H-0}, subject to the constraints \eqref{eq:final-constr} and \eqref{eq:final-charge-constr}. In the next sections, we will extend these solutions to the rotating case by finding solutions of the form \eqref{eq:section-ansa}. \section{Rotating near-horizon geometry} \label{sec:nhg} \subsection{Solving the BPS conditions} \label{subsec:nhg-BPS} Restricting to an attractor solution, which we expect to be topologically AdS$_2\times \Sigma$, we impose the appropriate scaling with respect to the radial coordinate for all the relevant fields. In particular, we take the function $\psi(r)$ in \eqref{eq:3d-metr} as \begin{equation}\label{eq:psi-atrr} {\mathrm{e}}^\psi = \mathrm{v} \, r \,, \end{equation} with $\mathrm{v}$ a constant that physically gives the ratio between the scales of $\Sigma$ and AdS$_2$ on the horizon, so that the three-dimensional base metric becomes \begin{equation}\label{eq:base-bps-attr} ds^2_3 = dr^2 + \mathrm{v}^2\, r^2 \? ds^2_{\Sigma}\,. \end{equation} The conical structure of this ansatz implies the scaling behaviour \begin{equation}\label{eq:eU-attr} {\mathrm{e}}^{-2\?U} = \frac{1}{r^2}\? {\mathrm{e}}^{-2\? \mathsf{u}}\,, \qquad \omega = \frac{1}{r}\?\omega_0 \,, \end{equation} for the remaining objects in the 4d metric, where $\mathsf{u}$ and $\omega_0$ are a function and a one-form that depend only on the coordinates on $\Sigma$. The total metric \eqref{eq:metr-bps} thus takes the expected form \begin{equation}\label{eq:metr-bps-atrr} ds^2_4 = -{\mathrm{e}}^{2\?\mathsf{u}} \left(r \? dt + \omega_0 \right)^2 + {\mathrm{e}}^{-2\?\mathsf{u}}\?\left( \frac{1}{r^2} \? dr^2 + \mathrm{v}^2 \, ds^2_{\Sigma} \right) \,. \end{equation} The requirement \eqref{eq:eU-attr} implies that the variables $\zeta$, $\mathcal{R}$ and $\mathcal{I}$ behave as \begin{equation}\label{eq:scal-fields} \zeta = r\? \zeta_0 \,, \qquad \mathcal{R} = r\? \mathcal{R}_0 \,, \qquad \mathcal{I} = \frac1r\? \mathcal{I}_0 \,, \end{equation} where $\zeta_0$, $\mathcal{R}_0$ and $\mathcal{I}_0$ are symplectic vectors that depend only on the coordinates on $\Sigma$ and are such that \begin{equation} {\mathrm{e}}^{4\?\mathsf{u}} = I_4(\mathcal{R}_0) = I_4(\mathcal{I}_0)^{-1} \,. \end{equation} Finally, we note that with the choice \eqref{eq:psi-atrr}, the condition \eqref{eq:final-psi} becomes \begin{equation}\label{eq:ImG-atrr} \Iprod{G}{\mathcal{I}_0} = 1 \,. \end{equation} Turning to the flow equation in \eqref{eq:final-flow}, the scaling behaviour \eqref{eq:psi-atrr}-\eqref{eq:scal-fields} leads to \begin{align}\label{eq:fl-Ortin-f} \frac1r\? \star d \mathcal{I}_0 -\frac1{4\?r^2}\? {\mathrm{e}}^{-2\?\mathsf{u}}I^\prime_4(\mathcal{I}_0 , \mathcal{I}_0 , G)\? \star dr - r\? d\left( \frac{1}{r}\?\omega_0 \right) \?G + {\cal F} = 0\,. \end{align} By the assumption that the scalar fields and ${\mathrm{e}}^\mathsf{u}$ do not depend on the radial coordinate near the horizon, this flow equation breaks up in components as \begin{gather} \star d \mathcal{I}_0 = \omega_0 \wedge dr\, G \,, \label{eq:d-scalars} \\ {\cal F} = \frac1{4\?r^2}\? {\mathrm{e}}^{-2\?\mathsf{u}}I^\prime_4( \mathcal{I}_0 , \mathcal{I}_0 , G )\? \star dr + d\omega_0 \?G\,. \label{eq:attractor} \end{gather} Here, the first equation determines the dependence of the scalars along $\Sigma$, while the second fixes their constant parts in terms of the charges, directly generalizing the corresponding static attractor equation. The final condition to reduce on $\Sigma$ is \eqref{eq:final-omega}. To this end, note that taking the inner product of \eqref{eq:d-scalars} with $\mathcal{I}_0$ and using \eqref{eq:ImG-atrr}, one finds \begin{equation} \Iprod{d \mathcal{I}_0}{\mathcal{I}_0} = \star\left( \omega_0\?\wedge dr \right) \,, \end{equation} so that \eqref{eq:final-omega} becomes \begin{align}\label{eq:omega-eqn} \star d \omega_0 = &\? \frac1{r^2}\? \Iprod{G}{I^\prime_4(\mathcal{I}_0)} \? dr\,. \end{align} Acting with a derivative on \eqref{eq:omega-eqn} and using \eqref{eq:d-scalars}, we find \begin{equation} d\star d \omega_0 = \frac1{2\? r^2}\?I_4\left( \mathcal{I}_0 , \mathcal{I}_0 , G, G \right)\, \star\left( \omega_0\wedge dr \right)\wedge dr\,. \end{equation} The expression in the RHS is recognized as the contraction of \eqref{eq:attractor} with $G$, which is in turn fixed by \eqref{eq:final-Sig}, leading to \begin{equation}\label{eq:Lapl-omega} d\star_{\scriptscriptstyle (2)} d \omega_0 = \mathrm{R}^{\scriptscriptstyle (2)} \? \star_{\scriptscriptstyle (2)} \omega_0 \,, \end{equation} where $\star_{\scriptscriptstyle (2)}$ and $ \mathrm{R}^{\scriptscriptstyle (2)}$ are the Hodge star and Ricci scalar on $\Sigma$. We therefore conclude that $\omega_0$ must be the one-form dual to a Killing vector field $\tilde\omega_0$ on $\Sigma$. Unlike the static case we discussed above, this in turn means that rotating black holes can only have spherical topology if we insist on compactness. Rotating black holes with non-spherical topology have non-compact horizons, i.e. cylindrical or hyperbolic ones, see \cite{Caldarelli:1998hg} With this knowledge we can now be more specific and write down the general form of the metric on the 2d space $\Sigma$. As it turns out, this cannot be of the constant curvature type, as it will become clear by the following analysis that such a choice would be inconsistent for a gauging allowing for an AdS$_4$ vacuum. We therefore parametrize the metric on $\Sigma$ in terms of a single function $\Delta(\theta)$, as \begin{equation}\label{eq:base-bps-sph} ds^2_{\Sigma} = \frac{d\theta^2}{\Delta(\theta)} + \?\Delta(\theta)\? f_\kappa^2(\theta)\, d\phi^2 \, ,\quad f_\kappa(\theta) = \left\{ \begin{array}{ll} \sin\theta & \kappa = 1 \\ 1 & \kappa=0 \\ \sinh\theta & \kappa=-1 \end{array} \right. \end{equation} where $\kappa = 1$ for the spherical case, $\kappa = 0$ for the cylindrical, and $\kappa = -1$ for the hyperbolic. Here, $\theta$, $\phi$ are coordinates along the surface $\Sigma$, and $\Delta(\theta)$ is a function of $\theta$. The direction $\phi$ corresponds to a compact isometry in all three cases. We also record the Ricci scalar corresponding to the metric \eqref{eq:base-bps-sph}, given by \begin{equation}\label{eq:Ric-Sigma} \mathrm{R}^{\scriptscriptstyle (2)} = -\frac{1}{f_\kappa}\?\partial_\theta\left( \frac{1}{f_\kappa}\?\partial_\theta \left( \Delta(\theta)\? f^2_\kappa\! (\theta) \right) \right) \,. \end{equation} One can now insert \eqref{eq:Ric-Sigma} into \eqref{eq:Lapl-omega}, which can be readily solved for $\omega_0$ as \begin{equation}\label{eq:omega-sph} \omega_0 = -\frac{\mathrm{j}}{\mathrm{v}}\?\Delta(\theta)\? f^2_\kappa\! (\theta)\,d\phi \,, \end{equation} where $\mathrm{j}$ is a constant and the factor of $\mathrm{v}$ was added for later convenience. Note that the dual vector to \eqref{eq:omega-sph} with respect to the metric \eqref{eq:base-bps-sph} is simply $\tilde\omega_0 =-\frac{\mathrm{j}}{\mathrm{v}}\?\frac{\partial}{\partial\? \phi}$, which is manifestly Killing and corresponds to a compact $U(1)$ isometry. With this expression for $\omega_0$, we can explicitly compute \begin{equation} \star\left( \omega_0\wedge dr \right) = \frac{\mathrm{j}}{\mathrm{v}}\? d(F_\kappa)\,, \end{equation} where $\partial_\theta F_\kappa \equiv - f_\kappa$, so that \eqref{eq:d-scalars} can be solved as \begin{equation}\label{eq:ImO-attr} \mathrm{v}\?\mathcal{I}_0 = e^{\psi} \mathcal{I} = {\cal H}_0 + \mathrm{j}\? F_\kappa\? G\,, \end{equation} for a constant symplectic vector, ${\cal H}_0$. Consistency with \eqref{eq:ImG-atrr} requires that \begin{equation} \label{eq:vvv} \mathrm{v} = \Iprod{G}{{\cal H}_0}\ . \end{equation} Having worked out the BPS conditions for the different choices of $\Sigma$, from here on we specialize to the spherical case in order to be explicit. We come back to present the general formulas for other cases at the end of this section. Using the functions $f_\kappa = \sin \theta, F_\kappa = \cos \theta$ in \eqref{eq:attractor}, we obtain the explicit form of the gauge field strengths for a spherical horizon, as \begin{equation}\label{eq:F-attr} {\cal F} = {\cal B}\? \sin \theta\ d\theta\wedge d\phi + d\omega_0 \?G \,, \end{equation} where the ${\cal B}$ are given by \begin{equation}\label{eq:attr-fin0} {\cal B} = \frac{1}{4}\? I^\prime_4\left({\cal H}_0 + \mathrm{j}\? \cos \theta\ G , {\cal H}_0 + \mathrm{j}\? \cos \theta\ G, G \right) \,, \end{equation} and we write the term along the gauging separately, as it does not contribute to the charge and ultimately to the attractor equation. The charge vector is obtained through its standard definition \eqref{eq:charge-def}, as \begin{equation} \Gamma = \frac{1}{\mathrm{Vol}_\Sigma}\?\int_{\Sigma} {\cal F} = \frac{1}{\mathrm{Vol}_\Sigma}\?\int_{\Sigma} {\cal B}\? \sin \theta\ d\theta\wedge d\phi \,. \end{equation} Using \eqref{eq:attr-fin0} explicitly, we find \begin{empheq}[box=\fbox]{align}\label{eq:attr-fin} \Gamma = \frac{1}{4}\? I^\prime_4\left({\cal H}_0, {\cal H}_0, G \right) + \frac{1}{2}\? \mathrm{j}^2\? I^\prime_4\left( G \right)\,, \end{empheq} which is the final attractor equation to be solved for ${\cal H}_0$, the constant part of the scalars. It generalizes the static attractor equation \eqref{eq:H-0} and can be explicitly solved in a given model defined by a prepotential (which in turn defines the quartic invariant $I_4$) and a gauging vector $G$. The final object is the function $\Delta(\theta)$, which appears in the attractor metric \eqref{eq:metr-bps-atrr} both in the base space metric and in $\omega$. This is obtained from \eqref{eq:omega-eqn} upon using the expression \eqref{eq:ImO-attr} for the scalar section, leading to \begin{align} \label{eq:omega-attr} \omega_0 = & - \frac{\mathrm{j}}{\mathrm{v}}\? \left( 1 - \Iprod{{\cal H}_0}{I^\prime_4(G)} \? \mathrm{j} \? \cos \theta\ + I_4(G)\? \mathrm{j}^2 \sin^2 \theta\ \right)\? \sin^2 \theta\ \?d\phi \nonumber\\ &\? -\frac1{\mathrm{v}}\?\left( \Iprod{G}{I^\prime_4({\cal H}_0)} + \Iprod{{\cal H}_0}{I^\prime_4(G)} \? \mathrm{j}^2 \right)\? \cos \theta\ d\phi \,, \end{align} where we used the inner product of \eqref{eq:attr-fin} with $G$ to rearrange terms, together with the final constraint from \eqref{eq:final-Sig} \begin{empheq}[box=\fbox]{align} \label{eq:final-sph} \Iprod{G}{\Gamma} = -1\ . \end{empheq} The second line in \eqref{eq:omega-attr} corresponds to a NUT charge, which must vanish for a regular solution\footnote{The presence of nonzero NUT charge requires a compact time to avoid Misner strings. Therefore the solution would have closed timelike curves.}. We thus arrive at \begin{equation}\label{eq:Delta-sol} \Delta(\theta) = 1 - \Iprod{{\cal H}_0}{I^\prime_4(G)} \? \mathrm{j} \? \cos \theta\ + I_4(G)\? \mathrm{j}^2 \sin^2 \theta\ \,, \end{equation} along with \begin{equation}\label{eq:NUT-ch} N =\frac1{2\mathrm{v}} \left( \Iprod{G}{I^\prime_4({\cal H}_0)} + \mathrm{j}^2\ \Iprod{{\cal H}_0}{I^\prime_4(G)} \right)\ , \end{equation} which we set to zero here for a regular rotating black hole\footnote{See \cite{Erbin:2015gha} for solutions with NUT charge in gauged supergravity.}. If we choose the internal space to be non-compact, i.e.\ the cylindrical and hyperbolic rotating black holes, most of the above formulas generalize easily by inserting $\kappa$ where appropriate. In particular we have $\Iprod{G}{\Gamma} = - \kappa$ and we again need to solve the same main equation \eqref{eq:attr-fin} in order to find the full black hole metric and the scalars. In the explicit examples below we concentrate on the spherical solutions, but also give one example with non-compact horizon in order to also relate with previous literature. \subsection{Summary of BPS attractors} \label{sec:summ-attr} Given the above results, we now summarize the structure of rotating BPS attractors. The metric is as in \eqref{eq:metr-bps-atrr}, with the metric along the Riemann surface as in \eqref{eq:base-bps-sph}, where the function $\Delta$ is given by \eqref{eq:Delta-sol}. The rotation one-form $\omega_0$ is given by \eqref{eq:omega-sph}, while the NUT charge is given by \eqref{eq:NUT-ch}. The remaining fields are given in terms of the constant vector ${\cal H}_0$, which is determined as the solution to \eqref{eq:attr-fin}. In particular, the constants ${\mathrm{e}}^\mathsf{u}$ and $\mathrm{v}$, along with the scalar fields are given by \eqref{eq:ImO-attr} and \eqref{eq:vvv}. More explicitly, the combination $\mathrm{v}\?{\mathrm{e}}^{-\mathsf{u}}$ is computed through \begin{equation}\label{eq:e4u-atrr} \mathrm{v}^4\? {\mathrm{e}}^{-4\?\mathsf{u}} = I_4({\cal H}_0 + \mathrm{j}\? \cos \theta\ G) = {\cal W} + \mathrm{j}^2\? \Delta(\theta)\? \sin^2 \theta \,, \end{equation} where \begin{equation}\label{eq:entr2} {\cal W} = I_4({\cal H}_0) - \left( 1 + I_4(G)\?\mathrm{j}^2 \right)\?\mathrm{j}^2\,. \end{equation} The physical scalars follow by constructing the symplectic section from \eqref{eq:ImO-attr} in the standard way as \begin{equation}\label{eq:section-phys} 2\? {\mathrm{e}}^{-\mathsf{u}}\?{\mathrm{e}}^{-\mathrm{i}\?\alpha} {\cal V} = -\frac1{2\?\sqrt{I_4(\mathcal{I}_0)}}\? I^\prime_4(\mathcal{I}_0) + \mathrm{i} \,\mathcal{I}_0\,, \end{equation} and forming the ratios $t^i=X^i/X^0$. Finally, the gauge field strengths are given by \eqref{eq:F-time-dec} and \eqref{eq:zeta-BPS} as \begin{equation}\label{eq:F-attr-full} \mathsf{F} = d\left[ \left(- \frac12\? {\mathrm{e}}^{4\?\mathsf{u}}\? I^\prime_4( \mathcal{I}_0) - G \right)\,(r\?dt+\omega_0) \right] + {\cal F} \,, \end{equation} where ${\cal F}$ is given explicitly by \eqref{eq:F-attr}-\eqref{eq:attr-fin0}. The physical quantities of interest include the conserved charges, electromagnetic and rotational, as well as the entropy associated to the black hole horizon. The electromagnetic charges have been computed in \eqref{eq:attr-fin}, while for the computation of the entropy through the area law, it is useful to recast \eqref{eq:metr-bps-atrr} the following form, by completing the square with respect to $\phi$ and using \eqref{eq:e4u-atrr} \begin{equation}\label{eq:metr-Sen} ds^2_4 = {\mathrm{e}}^{-2\?\mathsf{u}} \left( - r^2 \? d\tau^2 + \frac{dr^2}{r^2} + \frac{ \mathrm{v}^2}{\Delta(\theta)}\? d\theta^2 \right) + {\mathrm{e}}^{2\?\mathsf{u}} \frac{{\cal W}}{ \mathrm{v}^2} \?\Delta(\theta)\? \sin^2 \theta\? \?\left( d\phi + \frac{\mathrm{j}} {\mathrm{v}\?\sqrt{{\cal W}}}\?r\?d\tau\right)^2 \,. \end{equation} Here, we rescaled the time variable as $\tau = \mathrm{v}^2\? {\cal W}^{-1/2}dt$ and ${\cal W}$ is as in \eqref{eq:entr2}. In the form \eqref{eq:metr-Sen}, it is easy to compute the black hole entropy $S$ through the horizon area $A$, via the Bekenstein-Hawking formula \begin{equation}\label{eq:BHentropy-horizon} S = \frac{A}{4} = \pi\?\sqrt{I_4({\cal H}_0) - \left( 1 + I_4(G)\?\mathrm{j}^2 \right)\?\mathrm{j}^2} \equiv \pi\? \sqrt{{\cal W}}\,, \end{equation} where we have set $G_N = 1$ for simplicity. The final conserved quantity is the angular momentum ${\cal J}$, which can be computed in terms of the appropriate Noether integral at the horizon. However, we will defer this discussion until section \ref{subsec:charges}, where both the Noether integral as well as the simpler Komar integral it reduces to in the asymptotic region are presented. Here, we simply cite the result (cf. \eqref{eq:J-general}) \begin{equation}\label{eq:J-general-0} {\cal J} = - \frac{\mathrm{j}}{2}\?\left( \Iprod{I_4^\prime(G)}{I_4^\prime({\cal H}_0)} -\frac12\? I_4({\cal H}_0, {\cal H}_0, G, G)\?\Iprod{G}{{\cal H}_0} \rule[.1cm]{0pt}{\baselineskip}\right)\,, \end{equation} which should be solved together with \eqref{eq:attr-fin} for $\mathrm{j}$ and ${\cal H}_0$ in terms of ${\cal J}$ and $\Gamma$, in order to obtain the entropy in terms of conserved charges through \eqref{eq:BHentropy-horizon}, as discussed in section \ref{subsec:charges}. \subsection{Examples} \label{subsec:nhg-examples} In this section, we present four examples of rotating BPS near-horizon geometries, each corresponding to one of the three interesting asymptotics listed in section \ref{sec:as-vac}. The attractor solution corresponding to an asymptotically AdS$_4$ flow within the T$^3$ model is new, and the full flow will be presented in the next section. The examples corresponding to flows with hvLif and Minkowski asymptotics are known in the literature, as the near-horizon regions of the KLR black hole \cite{Kunduri:2006ek} and of the non-BPS black hole of \cite{Bena:2009ev} respectively\footnote{Both of these solutions were constructed within 5d supergravity, here we consider their reduction to 4d along an angular isometry.}. \subsubsection{Models with AdS\texorpdfstring{$_4$}{4} vacuum} \label{Model_ads_vac} \subsubsection*{The T$^3$ model} We now turn to the example of a rotating attractor in the simplest symmetric model in the cubic series, the T$^3$ model, with prepotential as in \eqref{eq:T3-model}. We focus on this model for brevity, noting that the extension to the STU model \eqref{eq:STU-def} and indeed to any cubic model is straightforward. For the T$^3$ model, an AdS$_4$ vacuum exists in presence of mixed electric and magnetic gauging vector $G$, as \begin{equation}\label{eq:ex1-G} G = \{ g^0,\?0,\? 0,\? g_1 \}^{\rm T}\,, \end{equation} so that the components $g^1=g_0=0$ while $g^0$, $g_1$ are arbitrary. For this gauging, we list the relevant objects \begin{equation}\label{eq:ex1-I4G} I_4(G)= -4\? g^0 g_1^3 \,, \qquad I^\prime_4(G) = \{ 0,-4\?g^0 g_1^2,4\? g_1^3,0\}^{\rm T}\,. \end{equation} This gauging leads to the cosmological constant $\Lambda = -3 \sqrt{I_4(G)}= -3 \sqrt{-4\? g^0 g_1^3}$, so that we choose to focus on the case $g^0<0$, $g_1 >0$ which gives $I_4(G)>0$ and a real and negative cosmological constant. Similarly, we choose the charges carried by the black hole as \begin{equation}\label{eq:ex1-ch0} \Gamma = \{ 0,\?p^1,\? q_0,\? 0 \}^{\rm T}\,, \end{equation} for simplicity. The constraint \eqref{eq:final-Sig} can be then implemented by trading one of the charges for $\kappa$, and we take \begin{equation}\label{eq:ex1-ch} \Gamma = \{ 0,\?p^1,\? \frac{\kappa+3\?g_1\? p^1}{g^0},\? 0 \}^{\rm T}\,. \end{equation} The complete attractor solution is based on the vector ${\cal H}_0$ obtained as the solution to \eqref{eq:attr-fin}, for the gauging and charge vectors given above, and for an arbitrary constant $\mathrm{j}$. The resulting vector also has only two nonzero components, as \begin{equation}\label{eq:ex1-H} {\cal H}_0 = \{ 0,\?h^1,\? h_0,\? 0 \}^{\rm T}\,. \end{equation} The nontrivial components of \eqref{eq:attr-fin} are \begin{equation}\label{eq:ex1-attr} p^1 = h^1\? (g_1 h^1 - h_0 g^0)- 2\? g^0 g^2\? \mathrm{j}^2 \,, \qquad \frac{\kappa+3\?g_1\? p^1}{g^0} = h_0\?(h_0 g^0 + 3\? g_1 h^1) + 2\? g_1^3\? \mathrm{j}^2\,, \end{equation} and their solution reads\footnote{Since the attractor equations always have the square of the symplectic vector ${\cal H}_0$, its components are always determined upto an overall sign. Here and everywhere else in this paper we just present the solution with one chosen sign, since the other one is trivial to obtain and does not lead to a physically different solution. Additionally, there can be several branches of solutions due to different determination of square roots, and these do lead to potentially different solutions. In particular only the upper signs lead to regular spherical solutions in the solution discussed here. Depending on the range of charges, the cases with $\kappa = 0, -1$ do allow for both signs.} \begin{align}\label{eq:ex1-attr-sol} h^1 =&\, \frac{1}{4\?g_1}\?\left( \sqrt{\kappa + 12\?g_1\?p^1 + 16\?g_1^3 g^0 \?\mathrm{j}^2} \mp \sqrt{\kappa + 4\?g_1\?p^1} \right)\,, \\ \nonumber h_0 =&\, - \frac{1}{4\?g^0}\?\left( \sqrt{\kappa + 12\?g_1\?p^1 + 16\?g_1^3 g^0 \?\mathrm{j}^2} \pm 3\? \sqrt{\kappa + 4\?g_1\?p^1} \right)\,. \end{align} Below, we will not use these explicit expressions in displaying results for simplicity, and continue to use the parameters $h^1$, $h_0$ instead of the charges, unless stated otherwise. After setting up the above objects, we are now ready to display the various physical fields. The metric is of the form \eqref{eq:metr-bps-atrr}, with the metric along the sphere as in \eqref{eq:base-bps-sph}. Here, we focus on the physically more interesting case of a spherical horizon, so that we set $\kappa=1$ henceforth, noting that the cases with $\kappa=0,-1$ can be treated similarly. The function $\Delta(\theta)$ is given by \eqref{eq:Delta-sol} using \eqref{eq:ex1-I4G}, as \begin{equation}\label{eq:ex1-delta_th} \Delta(\theta) = 1 - 4\? g_1^3 g^0 \? \mathrm{j}^2 \sin^2 \theta \,, \end{equation} while the constant $\mathrm{v}$ is fixed through \eqref{eq:vvv} as \begin{equation}\label{eq:ex1-vvv} \mathrm{v} = 3\? g_1 h^1 -h_0 g^0 \,. \end{equation} The scale factor of the metric is given by \begin{equation}\label{eq:ex1-e4u-atrr} {\mathrm{e}}^{-4\?\mathsf{u}} = \frac1{\mathrm{v}^4}\?\left( 4\? (h^1)^3 h_0 - \mathrm{j}^2 \?\left( 1 - 4\? g^0 g_1^3 \mathrm{j}^2 \right) + \mathrm{j}^2\? \Delta(\theta)\? \sin^2 \theta \right)\,, \end{equation} consistent with \eqref{eq:e4u-atrr}. The remaining physical fields may now be constructed using the formulae given in the previous section. The scalar follows from solving \eqref{eq:ImO-attr}, with the result \begin{align} t = \frac12\?\frac{\mathrm{i}\? \mathrm{v}^2 \?e^{2\mathsf{u}} +\mathrm{j}\? \cos \theta (h_0 g^0 +h^1 g_1 )}{ (h^1) ^2 -g_1 g^0\? \mathrm{j}^2 \?\cos^2 \theta }\,, \end{align} while the gauge fields follow from \eqref{eq:F-attr-full} as \begin{align} A^0 = &\? -\frac{2\? (h^1) ^3- g^0\? \mathrm{j}^2\? (h_0\? g^0 + 3\? h^1 g_1)\? \cos^2 \theta}{\mathrm{v}^{3}\?{\mathrm{e}}^{-4\?\mathsf{u}}}\,(r\?dt+\omega_0) - g^0\? r\? dt \nonumber\\[6pt] &\? - \mathrm{j} \?g^0 \?(h_0\? g^0 + 3\? h^1 g_1)\? \sin^2\theta \, d\phi \,, \end{align} and \begin{align} A^1 = &\? \mathrm{j}\? \cos \theta \?\frac{h^1 \? (h_0 g^0 - h^1 g_1) + 2\?g^0 g_1^2 \, \mathrm{j}^2\? \cos^2 \theta}{\mathrm{v}^{3}\?{\mathrm{e}}^{-4\?\mathsf{u}}}\,(r\?dt+\omega_0) \nonumber\\[6pt] &\? - \left( p^1 + 2\?g^0 g_1^2 \, \mathrm{j}^2\?\sin^2\theta \right)\?\cos \theta \, d\phi \,, \end{align} where in the last expression we used the charge $p^1$ in \eqref{eq:ex1-attr} for simplicity. We stress once again that in the spherical case one should use the upper sign in \eqref{eq:ex1-attr-sol} to obtain the expressions of all the fields in terms of the conserved charges only. We have checked explicitly that this solution satisfies the equations of motion for the $T^3$ model, as expected. From the explicit form of the near horizon geometry we can read off the black hole entropy, which assumes a particularly simple expression in terms of the components of $\mathcal{H}$, as \begin{equation} S_{BH,\mathcal{H}} =\sqrt{ 4\? (h^1)^3 h_0 + \mathrm{j}^2 \? (4\? g^0\? g_1^3\? \mathrm{j}^2- 1)} \,, \end{equation} or alternatively, in terms of the electromagnetic charges: \begin{equation}\label{eq:ex1-S} S_{BH}= \sqrt{\frac{ (1+4\? g_1\? p^1)^{3/2} \sqrt{16\? g_1^3 g^0\? \mathrm{j}^2+12\? g_1\? p^1+1} - \left( 24\? g_1^2\? (p^1)^2+12\? g_1\? p^1+1 \right) }{-8\? g_1^3 g^0}}\,. \end{equation} The latter expression reduces to the known entropy formula for the static black hole of \cite{Dall'Agata:2010gj, Hristov:2010ri} for $\mathrm{j}=0 $, the parameter $\mathrm{j}$ being related to the angular momentum. The precise relation of this parameter to the conserved angular momentum $\mathcal{J}$ will be given in section \ref{subsec:charges}, by means of the Komar integral in the the asymptotic region. We refer to that section for the expression for the entropy in terms of the conserved charges. \subsubsection*{The $X^0 X^1$ model} Let us also briefly discuss another model with an AdS$_4$ vacuum, the $X^0 X^1$ model where supersymmetric rotating hyperbolic black holes with an ergosphere were previously found \cite{Klemm:2011xw}. For the $X^0 X^1$ model in the form \eqref{eq:X0X1-model}, an AdS$_4$ vacuum exists in presence of purely electric gauging vector $G$, as \begin{equation} G = \{0,\?0,\? g_0,\? g_1 \}^{\rm T}\,, \end{equation} so that the components $g^0=g^1=0$ while $g_0$, $g_1$ are assumed to be positive without loss of generality. The AdS$_4$ asymptotics are fixed by \begin{equation} I_4(G)= (g_0 g_1)^2 \,, \qquad I^\prime_4(G) = 2 g_0 g_1\ \{ g_1, g_0, 0,0\}^{\rm T}\,, \end{equation} so that the asymptotic scalar is a positive constant fixed by the ratio of $g_0$ and $g_1$. We then choose a purely magnetic charge vector, \begin{equation} \Gamma = \{ p^0,\?p^1,\? 0,\? 0 \}^{\rm T}\,, \end{equation} and it then easily follows that the attractor solution given by the vector ${\cal H}_0$ also needs to have purely magnetic components, \begin{equation}\label{eq:ex4-H} {\cal H}_0 = \{ h^0,\?h^1,\? 0,\? 0 \}^{\rm T}\,. \end{equation} The nontrivial components of \eqref{eq:attr-fin} then read \begin{equation} p^0 = g_1 \left(h^0 h^1+g_0 g_1 \mathrm{j}^2 \right) \,, \qquad p^1 = g_0 \left( h^0 h^1+g_0 g_1 \mathrm{j}^2 \right)\,, \end{equation} which, together with the constraint for arbitrary internal space $\Iprod{G}{\Gamma} = -\kappa$, imply that \begin{equation} g_0 p^0 = g_1 p^1 = - \frac{\kappa}{2}\ . \end{equation} One can easily see that in this special case the attractor mechanism cannot fully fix the scalars in terms of the charges, due to the fact that only the combination $h^0 h^1$ appears in the above equations. We can nevertheless show that a smooth horizon in this class must necessarily be of the hyperbolic type, by evaluating the entropy from \eqref{eq:BHentropy-horizon}, \begin{equation} S = - \frac{\pi\ \kappa}{2 g_0 g_1}\ , \end{equation} which is only positive in the hyperbolic case $\kappa = -1$. We have therefore constructed a rotating hyperbolic black hole near-horizon geometry that is a smooth continuation of the static case. This appears to be in another BPS branch of rotating solutions compared to the one discovered in \cite{Klemm:2011xw} in the same $X^0 X^1$ model. \subsubsection{Models from 5d reduction} We now turn to an example attractor in a model that does not admit an AdS$_4$ vacuum, but exhibits runaway behaviour. Nevertheless, this model is physically relevant, since it arises from dimensional reduction of the gauged STU model in five dimensions, which admits an AdS$_5$ vacuum. Indeed, reduction of an asymptotically AdS$_5$ spacetime along an isometry leads to a metric with hvLif asymptotics of the type \eqref{eq:rank3-vec}. It follows that one can - in principle - construct physically interesting solutions in five dimensions by lifting solutions of models with gaugings vectors $G$ of rank-3, as in \eqref{eq:metr-rank-3}. Here, we do not pursue such a goal, restricting ourselves to matching an attractor constructed in a model with rank-3 gauging to the reduction of the known solutions of \cite{Gutowski:2004ez,Gutowski:2004yv, Kunduri:2006ek}. Our starting point is the rank-3 vector of FI terms \begin{equation} G = \sqrt{2}\?\{ 0,\?0,\?0,\?0,\? g_0,\? g,\? g,\? g \}^{\rm T} \,, \end{equation} and we consider a set of charges of the type \begin{equation}\label{eq:ex2-ch} \Gamma = \{ p^0,\?0,\? q_0,\? q_i \}^{\rm T}\,. \end{equation} In order to connect to the four parameters of the solution in \cite{Kunduri:2006ek}, which can be thought of as three electric charges and one angular momentum, we fix the five charges in \eqref{eq:ex2-ch} in terms of four parameters, $\delta$ and $\mu_1$, $\mu_2$, $\mu_3$ as in \eqref{eq:KLR-charge}, so that there is one constraint among them, whose explicit form is not important for the following. The condition \eqref{eq:final-Sig} now leads to the identification \begin{equation} g_0 = \cosh\delta\,, \end{equation} and one can verify that the attractor equation \eqref{eq:attr-fin} is solved by the vector \begin{align}\label{eq:ex2-sol} {\cal H}_0 =&\,\frac{\sqrt{2}\? \mathrm{v}}{8\?g^3}\? \{ 0,\? 2\?g ,\? 2\?g ,\? 2\?g ,\?\tfrac12 - g_0,\? - g_0\?g,\? - g_0\?g,\? - g_0\?g \}^{\rm T} \nonumber\\ &\, + \frac1{\sqrt{2}\?g_0\? \mathrm{v}}\? \{ 1,\? - m_1 ,\? - m_2 ,\? - m_3 ,\? m_1\?m_2\?m_3,\? m_2\?m_3 ,\?m_1\?m_3,\? m_1\?m_2 \}^{\rm T}\,, \end{align} where we defined the shorthand parameters \begin{equation} m_i = \frac{1+g^2\?\mu_i}{2\?g}\,, \end{equation} and the remaining parameters of the solution are given as \begin{equation}\label{eq:ex2-J} \mathrm{v}^2 = 2\?\frac{g}{g_0}\?(m_1 + m_2 + m_3) - 2 \,. \qquad \mathrm{j} = -\frac{\sinh{\delta}}{8\?g^3}\? \mathrm{v} \,. \end{equation} Note that \eqref{eq:ex2-J} fixes $\mathrm{j}$ in terms of the other parameters, since the four free parameters of \cite{Kunduri:2006ek} have been interpreted as charges in the 4d solution above. The situation is reversed in \cite{Kunduri:2006ek}, as there are three electric charges in the 5d solution and the fourth parameter corresponds to the angular momentum parameter $\mathrm{j}$ above, while $p^0$ is interpreted as a geometrical parameter in five dimensions and is therefore fixed in terms of the other quantities. We refer to Appendix \ref{sec:klr} for a review of the solution of \cite{Kunduri:2006ek}, its recasting in terms of the parameters above and the reduction to four dimensions. Comparison with \eqref{eq:KLR-J} and \eqref{eq:KLR-G} shows that the attractor described by \eqref{eq:ex2-sol}-\eqref{eq:ex2-J} indeed matches with the result of this reduction near the horizon. The expression in \eqref{eq:KLR-sol} allows to construct the complete four dimensional solution, which can be seen to asymptote exactly to the hvLif geometry \eqref{eq:metr-rank-3} for $\delta=0$, while for $\delta\neq 0$ one finds a more general rotating asymptotic geometry with the same hvLif exponents. \subsubsection{Asymptotically flat black hole} Finally, we present an example of an attractor corresponding to an asymptotically flat black hole. Since the asymptotic Minkowski vacuum is not supersymmetric in an abelian gauged theory, such black holes can be viewed both as flows between this vacuum and a supersymmetric AdS$_2$ \cite{Hristov:2012nu} and as non-BPS black holes in ungauged supergravity \cite{Bossard:2012xsa}.\footnote{Since the potential vanishes identically in this case, the bosonic sector reduces to that of ungauged supergravity.} A concrete example is given by the STU model with FI parameters as in \eqref{eq:rank1-vec}, explicitly \begin{equation} g^0 = g^i = g_i = 0 \qquad g_0\neq 0 \,, \end{equation} which leads to a Minkowski vacuum. Choosing the charges as \begin{equation}\label{eq:ex3-ch} \Gamma = \{ p^0,\?0,\? 0,\? q_i \}^{\rm T}\,, \end{equation} this model admits an asymptotically flat under-rotating black hole solution whose near horizon geometry solves the BPS equation we presented. The constraint \eqref{eq:final-Sig} together with the attractor equation \eqref{eq:attr-fin} can be solved as \begin{equation} g_0 = -\frac{1}{p^0}\,, \qquad \mathrm{v}=1\,, \qquad {\cal H}_0 = - \{ p^0,\?0,\? 0,\? \frac{\sqrt{p^0 q_1 q_2 q_3}}{q_i} \}^{\rm T}\,, \end{equation} while \eqref{eq:Delta-sol} collapses to $\Delta(\theta)=1$. The metric then assumes the standard form for under-rotating attractors in ungauged supergravity: \begin{equation} ds^2 = -e^{2\?\mathsf{u}} \left(r\?dt+\omega_0 \right)^2 + e^{-2\?\mathsf{u}} \left( \frac{dr^2}{r^2} + d\theta^2 + \sin^2 \theta d\phi^2 \right) \end{equation} with \begin{equation} e^{-4\?\mathsf{u}} = - 4 p^0 q_1 q_2 q_3 - \mathrm{j}^2 \cos^2 \theta \qquad \quad \omega_0 = \mathrm{j} \sin^2 \theta\? d\phi\,. \end{equation} The remaining fields take the form summarized in section \ref{sec:summ-attr} and are given explicitly in \cite[App. C]{Hristov:2012nu}. \section{Full rotating flow in AdS\texorpdfstring{$_4$}{4}} \label{sec:full-sol} \subsection{The rotating black hole solution} \label{sec:flow} Here we consider the full BPS flow for rotating black holes, interpolating between the attractor solution in the previous section and asymptotically locally AdS$_4$. Inspired by the known solutions in the static case \cite{Cacciatori:2009iz,Dall'Agata:2010gj}, in the form given in \cite{Katmadas:2014faa, Halmagyi:2014qza} and reviewed in section \ref{sec:static-bh}, we take the three-dimensional base metric as in \eqref{eq:3d-metr} and concentrate for simplicity on the case where $\Sigma$ is of spherical topology, as in \eqref{eq:base-bps-sph}, so that the base metric becomes \begin{equation} ds^2_3 = dr^2 + {\mathrm{e}}^{2\?\psi(r)} \left( \frac{d\theta^2}{\Delta(\theta)} + \?\Delta(\theta)\?\sin^2\!\theta\? d\phi^2 \right)\,. \end{equation} The total metric \eqref{eq:metr-bps} then takes the explicit form\footnote{In section 5 of \cite{Gnecchi:2013mja} an ansatz for rotating AdS$_4$ black holes with arbitrary prepotentials was put forward. The analysis presented here suggests a possible generalization of the form of the function $v$, which in (5.2) of \cite{Gnecchi:2013mja} reads $v = Q-P$, to $v = \xi_1 Q - \xi_2 P$, with $\xi_1,\xi_2$ constant. In particular, for the specific case treated in this section we choose $\xi_2=0$, and this enables us to find the novel analytic solutions for the full flow for arbitrary models with vector multiplets.} \begin{equation}\label{eq:metr-bps-full} ds^2_4 = -{\mathrm{e}}^{2U} \?(dt + \omega )^2 + {\mathrm{e}}^{-2U}\?\left( dr^2 + \frac{ {\mathrm{e}}^{2\?\psi} }{\Delta(\theta)}\? d\theta^2 + {\mathrm{e}}^{2\?\psi}\?\Delta(\theta)\?\sin^2\!\theta\? d\phi^2 \right) \,. \end{equation} We now proceed to solve the BPS conditions \eqref{eq:final} for the full flow, following similar steps as in section \ref{subsec:nhg-BPS}. To this end, we introduce the combination \begin{equation}\label{eq:ItoH} {\mathrm{e}}^\psi \? \mathcal{I} = {\cal H} (r) + \mathrm{j} \cos \theta\ G\ , \end{equation} which allows to write the flow equation \eqref{eq:final-flow} as \begin{align}\label{eq:fl-Ortin-fin} {\mathrm{e}}^{-\psi}\?\star d ({\cal H} + \mathrm{j} \cos \theta\ G) -\frac1{4}\? {\mathrm{e}}^{-2\?\psi}\?I^\prime_4( {\cal H} + \mathrm{j} \cos \theta\ G , {\cal H} + \mathrm{j} \cos \theta\ G, \star\hat{G}) - r\?d\omega\? G + {\cal F} = 0\,, \end{align} while the remaining BPS equations in \eqref{eq:final} become \begin{align} \Iprod{ G }{{\cal H}} =&\, ( {\mathrm{e}}^\psi )^\prime \,, \label{eq:base-fin} \\ \Iprod{\cal A}{G} = &\, \frac{\left( \Delta(\theta)\sin^2{\theta} \right)^\prime}{2\?\sin{\theta}}\? d\phi\? \,, \label{eq:gauge-fin} \\ {\mathrm{e}}^{2\?\psi} \star d\omega = &\, \Iprod{ d ({\cal H} + \mathrm{j} \cos \theta\ G ) }{ {\cal H} + \mathrm{j} \cos \theta\ G } \nonumber \\ & +\frac{1}{6}\?\?{\mathrm{e}}^{-\?\psi} I_4( {\cal H} + \mathrm{j} \cos \theta\ G , {\cal H}+ \mathrm{j} \cos \theta\ G , {\cal H}+ \mathrm{j} \cos \theta\ G , G )\?dr\,. \label{eq:domega-fin} \end{align} This set of equations is a direct generalization of the corresponding static one, so that we may be guided by the known solutions to it. Here, we restrict for brevity on generalizing the simpler class of \cite{Katmadas:2014faa} to the rotating case, expecting that the most general solution of the static equations in \cite{Halmagyi:2014qza} can be also treated along the same lines. We therefore adopt the simple ansatz \begin{equation}\label{eq:flow-ansatz} {\cal H} (r) = {\cal H}_0 + {\cal H}_\infty\ r \,, \qquad \omega = \left( \omega_{\scriptscriptstyle \infty}(\theta) -\mathrm{j}\? \frac{\Delta(\theta)}{{\mathrm{e}}^{\psi}} \right) \?\sin^2\!\theta\?d\phi \,, \end{equation} where the two constant symplectic vectors $ {\cal H}_0, {\cal H}_\infty$ and the function $\omega_{\scriptscriptstyle \infty}(\theta)$ are to be determined. In this form, it is manifest that this ansatz reduces to the attractor solution of the previous section for $r\rightarrow 0$, while ${\cal H}_\infty$ and $\omega_{\scriptscriptstyle \infty}$ parametrize the asymptotic region. Inserting \eqref{eq:flow-ansatz} in \eqref{eq:base-fin} leads to the following expression for the function ${\mathrm{e}}^\psi$, as \begin{align}\label{eq:epsi-full} {\mathrm{e}}^\psi = &\, \frac12\? \Iprod{G}{{\cal H}_\infty}\?r^2 + \Iprod{G}{{\cal H}_0}\?r = I_4(G)^{1/4}\?r^2 + \Iprod{G}{{\cal H}_0}\?r \,, \end{align} where we disregarded an additive integration constant and in the second equality we imposed that the ${\cal O}(r^2)$ term is such that \eqref{eq:metr-bps-full} allows for AdS$_4$ asymptotics. Similarly, the BPS flow equation \eqref{eq:fl-Ortin-fin} reduces to \begin{align}\label{eq:fl-Ortin-ans} &\?{\mathrm{e}}^{\psi}\?{\cal H}_\infty\?\sin\theta\?d\theta\wedge d\phi -\frac1{4}\? I^\prime_4( {\cal H}_0 + \mathrm{j}\? \cos\theta\?G , {\cal H}_0 + \mathrm{j}\? \cos\theta\?G , G)\?\sin\theta\?d\theta\wedge d\phi \nonumber\\ &\? \qquad\qquad\qquad\qquad - r\?d( \omega_{\scriptscriptstyle \infty}\?\sin^2\!\theta)\wedge d\phi\, G + d\left(\mathrm{j}\? r\?\frac{\Delta(\theta)}{{\mathrm{e}}^\psi} \?\sin^2\!\theta\?d\phi \right)\? G + {\cal F} = 0\,, \end{align} with ${\mathrm{e}}^{\psi}$ as in \eqref{eq:epsi-full}. Note that since $\omega_{\scriptscriptstyle \infty}$ depends only on $\theta$, only the last two terms in \eqref{eq:fl-Ortin-ans} can have a leg along $dr$. It then follows that we can solve this flow equation order by order in $r$ for the $d\theta \wedge d \phi$ terms, similar to the static case. Starting with the ${\cal O}(r^2)$ terms of \eqref{eq:epsi-full}-\eqref{eq:fl-Ortin-ans} we find that they are solved by fixing the vector ${\cal H}_\infty$ as \begin{align}\label{eq:H-infty-sol} {\cal H}_\infty = \frac{1}{2}\? I_4(G)^{-3/4}\?I_4(G)^{\prime}\,, \end{align} in exactly the same way as in the static case. The sub-leading term in $r$ instead reduces to \begin{align} \Iprod{G}{{\cal H}_0}\? {\cal H}_\infty = \frac1{2}\? I^\prime_4( {\cal H}_\infty ,{\cal H}_0 + \mathrm{j}\? \cos\theta\?G, G) + \frac{\partial_\theta( \omega_{\scriptscriptstyle \infty}\?\sin^2\!\theta)}{\sin\theta}\? G \,, \end{align} which can be further simplified using \eqref{eq:proj}, leading to the conditions \begin{gather} \Iprod{{\cal H}_0}{{\cal H}_\infty}=0\,\quad \Rightarrow \quad I_4 ({\cal H}_0, G, G, G) = 0\ , \label{eq:flow-conds}\\ \omega_{\scriptscriptstyle \infty} = \mathrm{j}\? I_4(G)^{1/4}\,. \end{gather} With these results, the flow equation \eqref{eq:fl-Ortin-ans} reduces to the following expression for the gauge field strengths \begin{align}\label{eq:gauge-fields-sol} {\cal F} = \frac1{4}\? I^\prime_4( {\cal H}_0 + \mathrm{j}\? \cos\theta\?G , {\cal H}_0 + \mathrm{j}\? \cos\theta\?G , G)\?\sin\theta\?d\theta\wedge d\phi - d\left(\mathrm{j}\? r\?\frac{\Delta(\theta)}{{\mathrm{e}}^\psi} \?\sin^2\!\theta\?d\phi \right)\? G \,, \end{align} which is manifestly closed and generalizes the attractor fluxes \eqref{eq:attr-fin0}-\eqref{eq:F-attr} to the full flow. With these results, it is straightforward to verify that \eqref{eq:gauge-fin} and \eqref{eq:domega-fin} reduce to their attractor counterparts, leading to \begin{empheq}[box=\fbox]{align}\label{eq:final-sph-2} \Iprod{G}{\Gamma} = -1\ , \end{empheq} due to our choice of spherical topology, as well as \eqref{eq:Delta-sol} and \eqref{eq:NUT-ch}, which determine the function $\Delta(\theta)$ in \eqref{eq:flow-ansatz} and ensure regularity, respectively. Integrating the field strengths ${\cal F}$ in \eqref{eq:gauge-fields-sol} along the sphere leads to the attractor equation \begin{empheq}[box=\fbox]{align}\label{eq:attr-fin-2} \Gamma = \frac{1}{4}\? I^\prime_4\left({\cal H}_0, {\cal H}_0, G \right) + \frac{1}{2}\? \mathrm{j}^2\? I^\prime_4\left( G \right)\,, \end{empheq} which, as expected, is identical to the attractor condition in \eqref{eq:attr-fin}, upon identifying the vector ${\cal H}_0$ with the attractor solution. Finally we also find the constraints \begin{empheq}[box=\fbox]{align}\label{eq:charge-constraints} I_4 (\Gamma, \Gamma, \Gamma, G) = I_4 (G, G, G, \Gamma) = 0\ , \end{empheq} upon contracting \eqref{eq:attr-fin-2} with $I_4'(G)$ and using \eqref{eq:flow-conds}. One can write the solution to \eqref{eq:attr-fin-2}-\eqref{eq:charge-constraints} by using the explicit solution of \cite[Sec. 3.2]{Halmagyi:2014qza} in the static case, for a shifted charge $\hat\Gamma = \Gamma - \frac{1}{2}\? \mathrm{j}^2\? I^\prime_4( G )$, as \begin{equation}\label{eq:H0-sol} {\cal H}_0 = \frac{1}{\sqrt{C_1}}\?\left( C_2\? I^\prime_4( G ) + I^\prime_4(\hat\Gamma, \hat\Gamma, G) \right)\,, \end{equation} where the two constants $C_1$, $C_2$ are given by \begin{align} C_1(\hat\Gamma,G) = &\? C_1(\Gamma,G) =\Iprod{G}{\Gamma}\?I_4(\Gamma, \Gamma, G, G)-2\? \Iprod{I_4^\prime(G)}{I_4^\prime(\Gamma)} \,, \\ C_2(\hat\Gamma,G) = &\? -\frac{1}{2\?I_4(G)}\,\left( \frac14\?I_4(\hat\Gamma, \hat\Gamma, G, G) \pm \sqrt{\frac1{16}\?I_4(\hat\Gamma, \hat\Gamma, G, G)^2 - 4\?I_4(G)\? I_4(\hat\Gamma) } \right) \,, \label{eq:H0-sol-coeff} \end{align} and we stress that the shift in $\Gamma$ cancels out in the expression for $C_1$, a property that will be important below. This concludes the construction of the BPS black hole flows connecting the family of attractors in the previous section to asymptotic AdS$_4$. Summarizing, one starts from a charge vector satisfying the constraints \eqref{eq:final-sph-2} and \eqref{eq:charge-constraints} for a given given gauging $G$, and solves \eqref{eq:attr-fin-2} for the vector ${\cal H}_0$. The metric for the solution is then given by \eqref{eq:metr-bps-full} with the scale factor ${\mathrm{e}}^\psi$ in \eqref{eq:epsi-full} and the one-form $\omega$ in \eqref{eq:flow-ansatz}, while the function $\Delta(\theta)$ is given by \eqref{eq:Delta-sol}. The second scale factor ${\mathrm{e}}^U$ is given by the expression \begin{align}\label{eq:eU-flow} {\mathrm{e}}^{-4U} = &\? {\mathrm{e}}^{-4\psi}\left(I_4({\cal H}) +\frac14\? I_4({\cal H}, {\cal H}, G, G)\? \mathrm{j}^2 + I_4(G)\? \mathrm{j}^4 \rule{0pt}{6mm} \right. \nonumber\\ &\? \left.\rule{0pt}{6mm} \hspace{12mm} + \left( \Delta(\theta) -2 \? I_4(G)^{1/4}{\mathrm{e}}^\psi \right) \mathrm{j}^2 \sin^2\theta \right) \,, \end{align} obtained from \eqref{eq:ItoH}, with ${\cal H}$ as in \eqref{eq:flow-ansatz}. The scalar fields are also given by \eqref{eq:ItoH}, as explained around \eqref{eq:section-phys}, upon replacing $\mathcal{I}_0$ by $\mathcal{I}$ in \eqref{eq:ItoH}. Finally, the gauge field strengths are given by \begin{equation}\label{eq:F-full} \mathsf{F} = d\left[ \left(- \frac12\? {\mathrm{e}}^{4\?U}\? I^\prime_4( \mathcal{I}) - G\?r \right)\,(dt+\omega) \right] + {\cal F} \,, \end{equation} where the spatial components ${\cal F}$ are given in \eqref{eq:gauge-fields-sol}. \subsection{Conserved charges} \label{subsec:charges} We now turn to a discussion on the physical properties of the full black hole solutions constructed in section \ref{sec:flow}. As mentioned above, the electromagnetic charge vector is computed through its standard definition \eqref{eq:charge-def} with the result \eqref{eq:attr-fin-2}, which also constitutes the attractor equation for the scalars. Turning to the conserved charges in the gravitational sector, it is useful to note that the superalgebra underlying the BPS black hole solutions we have described is $U(1|1)$ \cite{Hristov:2011ye,Hristov:2011qr}. The latter is characterized by the anticommutation relation between two supercharges $ \{Q^I,Q^J \} = H \delta^{IJ}$, where $H$ is the generator of time translations, so that the angular momentum does not enter into the BPS bound. We therefore conclude that the mass of these solutions is $M=0$ since they saturate the BPS bound, while the angular momentum charge is free and has to be computed independently. The computation of the conserved angular momentum proceeds through the Noether integral associated to diffeomorphisms along the angular isometry. The central object in this approach is the Noether potential, which for a two-derivative Lagrangian ${\cal L}$ containing the Einstein--Hilbert term as well as Maxwell and scalar fields minimally coupled to gravity, reads \begin{equation}\label{eq:Noether-pot} Q_{\mu\nu}(\xi) = \nabla_\mu \xi_\nu + (\xi\cdot A^I)\?\frac{\partial {\cal L}}{\partial F^I{}^{\mu\nu}} = \nabla_\mu \xi_\nu + (\xi\cdot A^I)\?G_I{}_{\mu\nu}\,, \end{equation} where $\xi_\mu$ is a Killing vector and in the second equality we used the definition of the dual gauge field strengths. The supergravity action \eqref{Ssugra4D} clearly falls in this class, so that we can construct the corresponding Noether integral for the angular momentum\footnote{Note that one can in principle also compute the mass using the Noether potential for the timelike isometry, but for asymptotically AdS spacetimes this integral diverges naively, and an appropriate renormalization is required.} based on \eqref{eq:Noether-pot} for the rotational Killing vector $\xi^\phi=\partial_\phi$. In particular, one can exploit the compactness of the orbits of the rotational Killing vector $\xi^\phi$ to write the angular momentum ${\cal J}$ as \begin{equation}\label{eq:J-Noether} {\cal J} = \frac1{16\?\pi}\?\int_S dS^{\mu\nu} Q_{\mu\nu}(\xi^\phi) = \frac1{16\?\pi}\?\int_S dS^{\mu\nu} \left( \nabla_\mu \xi^\phi_\nu + (A^I_\phi)\?G_I{}_{\mu\nu} \right) \,, \end{equation} where $S$ is any surface enclosing the black hole horizon and $dS^{\mu\nu}$ is its surface element. The conservation of the Noether potential \eqref{eq:Noether-pot} ensures that \eqref{eq:J-Noether} is independent of the surface $S$, so that one may compute this integral either at the attractor described in section \ref{subsec:nhg-BPS} or in the asymptotic region of the full black hole solutions in section \ref{sec:flow}. We have performed both computations, obtaining the same result as expected, but in this section we present in some detail only the computation in the asymptotic region, for brevity. When $S$ in \eqref{eq:J-Noether} is identified with the asymptotic $\text{S}^2_{\infty}$, the asymptotic constants of the gauge fields $A^I_\phi$ can be set to zero by a judicious gauge transformation\footnote{This seems as an inherently asymptotic operation, but it can be enforced at the horizon by choosing the vector potentials ${\cal A}$ corresponding to the horizon field strengths \eqref{eq:F-attr} such that ${\cal A}(\theta=0)=-{\cal A}(\theta=\pi)$, as in the standard prescription of \cite[Sec. 3]{Astefanesei:2006dd}.}, so that the integral collapses to the standard Komar integral \begin{equation}\label{eq:J-Komar} {\cal J} = \frac1{16\?\pi}\?\int_{\text{S}^2_{\infty}} dS^{\mu\nu} \nabla_\mu \xi^\phi_\nu \,. \end{equation} In order to compute this integral explicitly, it is useful to put the metric in the canonical ADM form, which reads: \begin{equation}\label{eq:metric-ADM} ds^2 = - N^2 dt^2 + \sigma (d \phi - \Omega dt)^2 + \frac{dr^2}{Q} + \frac{d\theta^2}{T}\,, \end{equation} where the warp functions $N$, $\sigma$ and $\Omega$ are identified as \begin{align} Q =&\? {\mathrm{e}}^{2U}\,, \qquad T= {\mathrm{e}}^{2U} {\mathrm{e}}^{-2\?\psi} \Delta(\theta) \,, \nonumber\\ \sigma = &\? {\mathrm{e}}^{-2\?U} {\mathrm{e}}^{2\?\psi} \Delta(\theta)\? \sin^2 \theta - {\mathrm{e}}^{2\?U} \omega^2\,, \label{eq:ADM}\\ \Omega = &\? \frac{{\mathrm{e}}^{2\?U} \omega}{\sigma} = \frac{{\mathrm{e}}^{2\?U} \omega }{{\mathrm{e}}^{-2\?U} {\mathrm{e}}^{2\?\psi} \Delta(\theta)\? \sin^2 \theta - {\mathrm{e}}^{2\?U} \omega^2}\,, \nonumber\\ N^2 = &\? {\mathrm{e}}^{2\?U} + \sigma\? \Omega^2 = \frac{{\mathrm{e}}^{2\?\psi} \Delta(\theta) \? \sin^2 \theta}{{\mathrm{e}}^{-2\?U} {\mathrm{e}}^{2\?\psi} \Delta(\theta) \? \sin^2 \theta - {\mathrm{e}}^{2\?U} \omega^2}\,. \nonumber \end{align} From this we can read off the angular velocity at infinity: \begin{equation} \label{omega_inf} \Omega_{\infty} = \lim_{r \rightarrow \infty} \Omega = I_4(G)^{3/4} \, \mathrm{j} = \frac{\mathrm{j}}{l_{AdS}^3}\,. \end{equation} where $l_{AdS}^2 = I_4(G)^{-1/2}$. We note in passing that the angular velocity at the horizon, computed as $\Omega_h =\Omega(r=r_{h}) $ vanishes: \begin{equation} \Omega_h =0\,, \end{equation} as one may also verify by \eqref{eq:metr-Sen}. In terms of the objects in \eqref{eq:ADM}, the angular momentum Komar integral \eqref{eq:J-Komar} is computed by using \begin{equation} \xi^\phi= \frac{\partial}{\partial\phi} \,, \qquad dS^{\mu\nu} = (v^{\mu} u^{\nu} - v^{\nu}u^{\mu})\,\sqrt{ \frac{\sigma}{T} }\?d\theta d\phi\,, \end{equation} where the ratio $\sigma/T$ denotes the induced metric on a two-sphere of constant $r$ and $t$, and the two vectors $u^{\mu}$, $v^{\mu}$ are given by \begin{equation} u=\frac1N\?\left(\frac{\partial}{\partial t}+\Omega\?\frac{\partial}{\partial\phi} \right) \,, \qquad v= \sqrt{Q}\,\frac{\partial}{\partial r}\,, \end{equation} i.e. they the normal vector of a $t$-constant hypersurface and the normal outward-pointing vector to the boundary, respectively. Evaluation of \eqref{eq:J-Komar} proceeds straightforwardly, upon using the expressions in \eqref{eq:epsi-full}, \eqref{eq:flow-ansatz} and \eqref{eq:eU-flow} for ${\mathrm{e}}^\psi$, $\omega$ and ${\mathrm{e}}^U$ respectively in \eqref{eq:ADM}. The result reads \begin{equation}\label{eq:J-general} {\cal J} = - \frac{\mathrm{j}}{2}\?\left( \Iprod{I_4^\prime(G)}{I_4^\prime({\cal H}_0)} - \frac12\?I_4({\cal H}_0, {\cal H}_0, G, G)\?\Iprod{G}{{\cal H}_0} \rule[.1cm]{0pt}{\baselineskip}\right)\,, \end{equation} where we also used the conditions \eqref{eq:flow-conds} in the derivation. The expression \eqref{eq:J-general} for the angular momentum must be solved along with \eqref{eq:attr-fin-2} for ${\cal H}_0$ and $\mathrm{j}$ in order to obtain all physical quantities in terms of the conserved charges. Using the explicit solution \eqref{eq:H0-sol} for the ${\cal H}_0$ along with the constraints \eqref{eq:charge-constraints}, one can show that \eqref{eq:J-general} takes the form \begin{equation}\label{eq:J-Q} {\cal J} = \frac{\mathrm{j}}{2}\?\sqrt{\Iprod{G}{\Gamma}\?I_4(\Gamma, \Gamma, G, G)-2\? \Iprod{I_4^\prime(G)}{I_4^\prime(\Gamma)} } \,, \end{equation} which is independent of $\mathrm{j}$, as mentioned below \eqref{eq:H0-sol-coeff}. It follows that one may use \eqref{eq:J-Q} to write $\mathrm{j}$ in \eqref{eq:attr-fin-2} in terms of the charges, as \begin{empheq}[box=\fbox]{align}\label{eq:attr-fin-J} \frac{1}{4}\? I^\prime_4\left({\cal H}_0, {\cal H}_0, G \right) = \Gamma - \frac{2\?{\cal J}^2}{\Iprod{G}{\Gamma}\?I_4(\Gamma, \Gamma, G, G)-2\? \Iprod{I_4^\prime(G)}{I_4^\prime(\Gamma)}}\? I^\prime_4\left( G \right)\,, \end{empheq} and then obtain the solution to this equation for the ${\cal H}_0$ by replacing the shifted charge $\hat\Gamma$ in \eqref{eq:H0-sol} for the shifted charge in the right hand side of \eqref{eq:attr-fin-J}. The entropy formula \eqref{eq:BHentropy-horizon} then takes the following form in terms of conserved charges only \begin{equation} \label{entropy_bigJ-gen} S_{BH} = \pi \sqrt{\frac{l_{AdS}^{4} }{2}\,\left( \frac14\?I_4(\Gamma, \Gamma, G, G) \pm \sqrt{\frac1{16}\?I_4(\Gamma, \Gamma, G, G)^2 - 4 \frac{( I_4(\Gamma) + {\cal J}^2)}{l_{AdS}^{4}} } \right)}\,, \end{equation} where $l_{AdS}^2 = I_4(G)^{-1/2}$ and we have again set $G_N = 1$. In the spherical case one needs to strictly choose the positive sign in the above formula in order to possibly find a positive real answer, i.e.\ a regular black hole. One can see that the angular momentum is bounded from above due to the term in the inner square root and that \eqref{entropy_bigJ-gen} reduces to the entropy formula of \cite{Halmagyi:2013qoa} for static black holes upon setting $\mathcal{J} =0$. Before concluding this section, we would like to make a few comments on the conformal boundary of our solutions, which pertain to the class of asymptotically locally AdS$_4$ spacetimes (see for example \cite{Papadimitriou:2005ii}). Taking the limit $r\rightarrow \infty$ of the metric \eqref{eq:metric-ADM}, we see that the boundary metric approaches the form \begin{equation} \label{as_bound} ds^2= r^2 \Delta(\theta) \left[- \frac{dt^2}{l_{AdS}^2} +\frac{d \theta^2}{\Delta(\theta)^2}+ \frac{\sin^2 \theta}{\Delta(\theta)} \left( d \phi + \frac{\mathrm{j}}{l_{AdS}^3} dt\right)^2 \right]\,. \end{equation} This is not the standard metric on $R \times S^2$, due to the fact that there is a non-zero angular velocity at infinity $\Omega_{\infty}$, as in \eqref{omega_inf}. The metric in square bracket is that of the Einstein space $R \times S^2$ seen by a rotating frame of reference. The coordinate change \begin{equation} t' = \frac{t \sqrt{\Xi}}{l_{AdS}}\,, \qquad \phi' = \phi \sqrt{\Xi} + \frac{\mathrm{j}}{l_{AdS}^2} t' \,, \qquad \Xi = 1 + \frac{\mathrm{j}^2}{l_{AdS}^4}\,, \end{equation} brings the metric to the form \begin{equation} \label{as_bound2} ds^2= \frac{r^2 \Delta(\theta)}{\Xi} \left[-dt'^2+\frac{ \Xi d \theta^2}{\Delta(\theta)^2}+ \frac{\sin^2 \theta}{\Delta(\theta)}d \phi'^2 \right]\,, \end{equation} while the further reparametrization $\Xi \tan^2\theta' = \tan^2 \theta $ yields \begin{equation} ds^2 = r^2 \frac{\cos^2\theta}{\cos^2 \theta'} \left[- dt'^2 + d\theta'^2 + \sin^2 \theta' d\phi^2 \right]\,, \end{equation} which is the standard form $R \times S^2$ up to the conformal factor $ \cos^2 \theta/ \cos^2 \theta'$. Hence the boundary metric in \eqref{as_bound} is conformal to the standard boundary of four-dimensional AdS space. More details can be found for instance in \cite{Hawking:1998kw,Papadimitriou:2005ii,Gibbons:2004ai,Gibbons:2005vp,Toldo:2016nia}. Note that the boundary data falls into the general class discussed in \cite{Hristov:2013spa}. After a Wick rotation to Euclidean signature, this case was studied in \cite{Benini:2015noa} and \cite{Closset:2018ghr} corresponding to the refinement of the topologically twisted index by angular momentum. \subsection{An example solution} \label{sec:example-flow} We now turn to an example rotating black hole solution in AdS$_4$ in the T$^3$ model, which is the full flow corresponding to the attractor in section \ref{Model_ads_vac}. The solution is naturally described by replacing the constants ${\cal H}_0$ parameterizing the attractor in \eqref{eq:ex1-H} by the linear functions ${\cal H}$ in \eqref{eq:flow-ansatz}, as \begin{gather}\label{eq:exF-H} {\cal H} = \{ 0,\?{\cal H}^1,\? {\cal H}_0,\? 0 \}^{\rm T}\,, \nonumber\\ {\cal H}^1 = h^1 + \frac1{\sqrt{2}}\?\left(-\frac{g^0}{g_1} \right)^{1/4} r\,, \qquad {\cal H}_0 = h_0 + \frac1{\sqrt{2}}\?\left(-\frac{g_1}{g^0} \right)^{3/4} r\,. \end{gather} The scale factor ${\mathrm{e}}^\psi$ resulting from \eqref{eq:epsi-full} reads \begin{equation} {\mathrm{e}}^\psi = (-4\? g^0 g_1^3)^{1/4}r^2 + (- h_0 g^0 + 3\? g_1 h^1)\?r \,, \end{equation} while the scale factor ${\mathrm{e}}^U$ is given concisely as \begin{align} {\mathrm{e}}^{-4U} = &\? {\mathrm{e}}^{-4\psi}\left( 4\?{\cal H}_0\?({\cal H}^1)^3 - \left( ({\cal H}_0 g^0)^2 + 6\? g^0 g_1 {\cal H}_0 {\cal H}^1 -3 (g_1\? {\cal H}^1)^2 \right)\? \mathrm{j}^2 -4\? g^0 g_1^3\? \mathrm{j}^4 \rule{0pt}{6mm} \right. \nonumber\\ &\? \left.\rule{0pt}{6mm} \hspace{12mm} + \left( \Delta(\theta) -2 \? (-4\? g^0 g_1^3)^{1/4}{\mathrm{e}}^\psi \right) \mathrm{j}^2 \sin^2\theta \right) \,, \end{align} where $\Delta(\theta)$ is as in \eqref{eq:ex1-delta_th}. The rotation one-form $\omega$ is given by \begin{equation} \omega = \mathrm{j}\? \left(\rule{0pt}{4mm} (-4\? g^0 g_1^3)^{1/4} - {\mathrm{e}}^{-\psi}\ \Delta(\theta) \right) \?\sin^2\!\theta\?d\phi \,. \end{equation} With these definitions, the complex scalar assumes the form \begin{equation} z= \frac12\? \frac{ \mathrm{j}\? \cos \theta\? ( g_1\? h^1 +g^0\? h_0) + \mathrm{i}\? \sqrt{4\?{\cal H}_0\?({\cal H}^1)^3}}{ ({\cal H}^1)^2 -g^0\?g_1\? \mathrm{j}^2 \cos^2\theta}\,, \end{equation} while the vectors are given by \begin{align} A^0 = &\? -\frac{2\? ({\cal H}^1) ^3- g^0\? \mathrm{j}^2\? ({\cal H}_0\? g^0 + 3\? {\cal H}^1 g_1)\? \cos^2 \theta}{{\mathrm{e}}^{3\?\psi}\?{\mathrm{e}}^{-4 U}}\,(dt+\omega ) \nonumber\\[6pt] &\? - g^0\? r\? \left( dt + \mathrm{j}\? (-4\? g^0 g_1^3)^{1/4} \? \sin^2\theta \, d\phi \right) - \mathrm{j} \?g^0 \?( h_0\? g^0 + 3\? h^1 g_1)\? \sin^2\theta \, d\phi \,, \end{align} and \begin{align} A^1 = \?&\? \mathrm{j}\? \cos \theta \?\frac{{\cal H}^1 \? ({\cal H}_0 g^0 - {\cal H}^1 g_1) + 2\?g^0 g_1^2 \, \mathrm{j}^2\? \cos^2 \theta}{{\mathrm{e}}^{3\?\psi}\?{\mathrm{e}}^{-4 U}}\,(dt+\omega ) \nonumber\\[6pt] &\? - \left( p^1 + 2\?g^0 g_1^2 \, \mathrm{j}^2\?\sin^2\theta \right)\?\cos \theta \, d\phi \,, \end{align} where we have chosen the integration constants for the spatial components of the gauge fields such that they vanish asymptotically and we have used \eqref{eq:ex1-attr}. We now can compute the conserved charges applying the procedure described in the previous section. Using the formulas for the angular momentum \eqref{eq:J-general} we find \begin{equation}\label{J} \mathcal{J} = \mathrm{j} \? (g^0 h_0 + g_1 h^1)^3\,, \end{equation} which upon using the solution to the attractor equations for the $h$'s in terms of the charges in \eqref{eq:ex1-attr-sol}, leads to \begin{equation} \mathcal{J} = \mathrm{j} \? \left( 1 + 4\? g_1 p^1 \right)^{3/2}\,, \end{equation} which is indeed linear in the parameter $\mathrm{j}$. Using this result in the entropy formula \eqref{eq:ex1-S}, one finds the following expression for the entropy in terms of conserved quantities only \begin{eqnarray} \label{entropy_bigJ} S_{BH} = \pi \sqrt{\frac{\sqrt{(1 + 12\? g_1\? p^1 )(1 + 4\?g_1\? p^1)^3 - 4\?\mathcal{J}^2 l_{AdS}^{-4}} - (24\? (g_1 \? p^1)^2+12\? g_1\? p^1+1) }{2\? l_{AdS}^{-4}}}\,. \end{eqnarray} Finally, we record the following expression for the product of the areas of the four horizons of this solution\footnote{There are in principle four (complex) roots for the warp factor of an AdS black hole solution. Our configuration is extremal, and given the form of the warp factor the roots are pairwise equal $r_{1,2} =0$ and $r_{3,4} = -\frac{-g^0 h_0 +3\? g_1 h^1}{\sqrt{2} f_0^{1/4} g^{3/4}}$. The four areas correspond to these four roots.}: \begin{equation} \prod_{\alpha=1}^4 A_{\alpha} = (4 \pi)^4 l_{AdS}^4 \left(4 q_0 (p^1)^3 + \mathcal{J}^2 \right)\,, \end{equation} which turns out to depend only on the cosmological constant, quantized charges and angular momentum, as for other known classes of AdS black holes \cite{Cvetic:2010mn,Castro:2012av,Toldo:2012ec,Klemm:2012vm}. \section{The BPS entropy function} \label{sec:entropy} In this section, we turn to the definition of a BPS entropy function associated to our black hole solutions. Such objects have a long history, originating in the context of static BPS black holes in the ungauged theory, wherein it was observed that the attractor equations can be obtained by extremization of the central charge with respect to the scalars, while the entropy is given by its value at the extremum \cite{Ferrara:1996dd, Ferrara:1997tw}. Subsequently, it was shown that an extremization principle exists for any extremal black hole attractor, static or stationary, due to the scaling symmetry of the near horizon region \cite{Sen:2005wa, Astefanesei:2006dd}, reducing to the BPS entropy function when supersymmetric constraints are imposed. More recently, the corresponding BPS entropy functions for static black holes in gauged supergravity \cite{Cacciatori:2009iz, Dall'Agata:2010gj, Halmagyi:2014qza} have been matched with the topologically twisted index of the dual field theory, once it is extremized with respect to the fugacities \cite{Benini:2015eyy}. In this paper, we have constructed rotating BPS black holes in abelian gauged supergravity, generalizing the static ones described in \cite{Cacciatori:2009iz, Dall'Agata:2010gj}, for which a corresponding BPS entropy function is expected to exist. To this end, one may attempt to apply the two techniques used to derive a BPS entropy function in the static case, namely to either ``integrate'' the BPS equations to obtain a corresponding action, or to impose BPS constraints on the general entropy function obtained by reducing the Lagrangian at the attractor region \cite{Astefanesei:2006dd}. In practice, a naive attempt to obtain an action principle for the BPS attractor equations \eqref{eq:d-scalars}-\eqref{eq:omega-eqn} would lead to to an entropy functional rather than an entropy function, since the dependence of the various fields on the horizon coordinate must be incorporated. However, the analysis of \cite{Astefanesei:2006dd} shows that for the case of a compact horizon, such a functional should always be possible to reduce to a boundary term evaluated at the two poles. Based on these considerations, we propose a BPS entropy function for the black hole solutions of section \ref{sec:flow}, given by the following expression \begin{equation}\label{eq:entropy-fun-Sen} {\cal S} = -\frac\pi4\?\frac{\Iprod{\Gamma}{ I_4^\prime({\cal H}_0 + \mathrm{j}\ G \cos\theta) } - 4 \? \gamma\ \mathrm{j}^2\ \cos^2\theta}{\Iprod{G}{{\cal H}_0}\?\sqrt{I_4({\cal H}_0 + \mathrm{j}\ G \cos\theta)}}\? \cos\theta \, \Biggr|_{\,\theta = 0}^{\,\theta = \pi}\,, \end{equation} which is equivalent to \begin{equation}\label{eq:entropy-fun-extremization} {\cal S} = \frac{\pi}{4 \Iprod{G}{{\cal H}_0} \sqrt{{\cal W}(\mathrm{j})} } \left(\frac13 I_4(\Gamma, {\cal H}_0, {\cal H}_0, {\cal H}_0)+\mathrm{j}^2 (I_4(\Gamma, {\cal H}_0, G, G) - 8 \gamma) \right)\,, \end{equation} where we used the definition \eqref{eq:BHentropy-horizon} to shorten the notation. Here we used that the gauging $G$ and the vector of parameters ${\cal H}_0$ are such that the NUT charge \eqref{eq:NUT-ch} vanishes, while $\mathrm{j}$ is viewed as function of the ${\cal H}_0$ as $\mathrm{j}({\cal J},{\cal H}_0)$, whose explicit form is given by the expression for the angular momentum \eqref{eq:J-general}, as \begin{equation}\label{eq:gamma-def} \mathrm{j}({\cal J},{\cal H}_0) = - 2\?\left( \Iprod{I_4^\prime(G)}{I_4^\prime({\cal H}_0)} - \frac12 I_4({\cal H}_0, {\cal H}_0, G, G)\?\Iprod{G}{{\cal H}_0} \rule[.1cm]{0pt}{\baselineskip}\right)^{-1} {\cal J}\, \equiv - \frac{{\cal J}}{\gamma}. \end{equation} Note that the expressions \eqref{eq:entropy-fun-Sen} and \eqref{eq:entropy-fun-extremization} are formally equivalent. We have shown them separately as they serve two slightly different purposes. We propose \eqref{eq:entropy-fun-Sen} as the expression for Sen's entropy function in the rotating cases we consider, where one expects a covariant angular dependent expression evaluated on the poles. On the other hand, we propose \eqref{eq:entropy-fun-extremization} as the extremization principle for the black holes that naturally generalizes\footnote{In the $I_4$-formulation we use here, the relevant extremization principle in the static case can be derived directly from \eqref{eq:H-0} by contracting the left-hand side with $I_4'({\cal H}_0)$ and dividing by $\Iprod{G}{{\cal H}_0} \sqrt{{\cal W}}$, which produces the entropy on the right hand side. If we repeat the exact same steps with the attractor equation \eqref{eq:attr-fin}, we produce the entropy together with the two other terms in \eqref{eq:entropy-fun-extremization}. This derivation bypasses \eqref{eq:entropy-fun-Sen}.} the static result of \cite{Cacciatori:2009iz, Dall'Agata:2010gj}. It is straightforward, if cumbersome, to verify that the variation of \eqref{eq:entropy-fun-extremization} with respect to ${\cal H}_0$ and imposing the constraint \eqref{eq:flow-conds} leads to a set of equations that are solved upon using the attractor equation \eqref{eq:attr-fin-2}, while the constraint on ${\cal H}_0$ imply \eqref{eq:charge-constraints}. Evaluating \eqref{eq:entropy-fun-extremization} for this extremum leads to the entropy \eqref{eq:BHentropy-horizon}, as expected. While one must still solve \eqref{eq:attr-fin-2} to obtain the ${\cal H}_0$ explicitly in terms of the charges, such a solution exists and can be written down explicitly as explained around \eqref{eq:J-general}-\eqref{eq:attr-fin-J}. The extremization based on \eqref{eq:entropy-fun-extremization}, where the angular momentum ${\cal J}$ appears explicitly, can be transformed to a more traditional entropy function that depends on a corresponding fugacity $w$ by an inverse Legendre transform. Computing the derivative of \eqref{eq:entropy-fun-extremization} with respect to ${\cal J}$, one finds \begin{equation}\label{eq:w-def} w \equiv \frac{1}{\pi} \frac{\partial {\cal S}}{\partial {\cal J}} = \frac{\mathrm{j}}{\Iprod{G}{{\cal H}_0}\?\sqrt{I_4({\cal H}_0 + \mathrm{j}\? G )}} \,, \end{equation} i.e. it reproduces the off-diagonal $\phi$-$\tau$ term in the metric \eqref{eq:metr-Sen}. This is the object dual to the angular momentum ${\cal J}$ in the entropy function analysis of \cite{Astefanesei:2006dd}. One may therefore define a new BPS entropy function \begin{equation} {\sf L}({\cal H}_0, w) \equiv {\cal S} -\pi w\? {\cal J} \, \, \rule[-.2cm]{.5pt}{.8cm}_{\,{\cal J} = {\cal J}(w) } \,, \end{equation} where ${\cal J}$ is understood as a function of $w$ by inverting \eqref{eq:w-def}. Explicitly, we find \begin{equation}\label{eq:free-energy-J} {\sf L} = \frac{\pi}{4 \Iprod{G}{{\cal H}_0} \sqrt{{\cal W}(\tilde\mathrm{j})} } \left(\frac13 I_4(\Gamma, {\cal H}_0, {\cal H}_0, {\cal H}_0)+\tilde\mathrm{j}^2 ( I_4(\Gamma, {\cal H}_0, G, G) - 4 \gamma) \right)\,, \end{equation} where \begin{align} \tilde\mathrm{j}(w,{\cal H}_0) =&\? \mathrm{j}({\cal J}(w),{\cal H}_0) \nonumber\\ =&\? \frac{1}{2\?\sqrt{I_4(G)}\?w}\?\left( \sqrt{\frac{\pi^2}{{\Iprod{G}{{\cal H}_0}^2}} -\frac{w^2}{2}\?I_4({\cal H}_0,{\cal H}_0,G,G) + 2\? \sqrt{I_4({\cal H}_0)\?I_4(G)}\?w^2} \right. \nonumber\\ &\?\hspace{1.9cm} \left. - \sqrt{\frac{\pi^2}{{\Iprod{G}{{\cal H}_0}^2}} -\frac{w^2}{2}\?I_4({\cal H}_0,{\cal H}_0,G,G) - 2\? \sqrt{I_4({\cal H}_0)\?I_4(G)}\?w^2} \right)\,, \end{align} and we note that \eqref{eq:w-def} implies that \begin{equation} {\cal W} (\tilde{\mathrm{j}}) = \frac{\tilde\mathrm{j}^2}{\Iprod{G}{{\cal H}_0}^2\?w^2} \,. \end{equation} The entropy function \begin{equation} {\cal E} = {\sf L} + \pi w\? {\cal J}\,, \end{equation} is then of the standard type, where one must extremize with respect to the scalars ${\cal H}_0$ and $w$ to obtain the attractor equations and the expression for the angular momentum. \subsection{The asymptotically AdS\texorpdfstring{$_4$}{4} example} We now turn to an explicit example, namely that of the STU model in the frame \eqref{eq:STU-root}-\eqref{I4-ch-root}, with a purely electric gauging given by \begin{equation}\label{eq:root-G} G = \{ 0,\?0,\?0,\?0,\? g_0,\? g_1,\? g_2,\? g_3 \}^{\rm T}\,, \end{equation} leading to the quantities \begin{equation}\label{eq:root-I4G} I_4(G)= 4\? g_0 g_1g_2g_3 \,, \qquad I^\prime_4(G) = 4\?\{\? g_1 g_2 g_3,\? g_0 g_2 g_3 ,\? g_0 g_1 g_3,\? g_0 g_1 g_2, 0,0,0,0\}^{\rm T}\,. \end{equation} and the cosmological constant $\Lambda = -3 \sqrt{I_4(G)}= -3 \sqrt{4\? g_0 g_1g_2g_3}$, which is real and negative when all $g_I>0$ in \eqref{eq:root-G}. In addition, we are interested in a purely magnetic charge vector $\Gamma$, and therefore we can choose the vector of constants ${\cal H}_0$ as purely magnetic, so that \begin{align}\label{eq:root-Gamma-H} \Gamma = &\? \{\? p^0,\? p^1,\? p^2,\? p^3 ,\? 0,\?0,\?0,\?0 \?\}^{\rm T}\,, \\ {\cal H}_0 = &\? \{\? h^0,\? h^1,\? h^2,\? h^3 ,\? 0,\?0,\?0,\?0 \?\}^{\rm T}\,, \end{align} and the constraint \eqref{eq:final-sph-2} reads \begin{equation} \Iprod{G}{\Gamma} = \sum_I g_I \? p^I = -1 \,. \end{equation} Note that the solution based on the above choices is related to the example discussed in section \ref{Model_ads_vac} and section \ref{sec:example-flow}, by the inverse of \eqref{eq:STU-sym-rot}. However, in this section we prefer to move to this frame, as it allows for a lift to M-theory on S$^7$ \cite{Cvetic:1999xp} and a direct connection to the dual field theory. The objects appearing in \eqref{eq:entropy-fun-extremization} in this frame are then given by \begin{align} \Iprod{G}{{\cal H}_0} = &\? \sum_I g_I \? h^I\,, \qquad \\ I_4({\cal H}_0 \pm \frac{\cal J}{\gamma} G)=&\, 4\,h^0 h^1 h^2 h^3 - (h^0 g_0 - h^i g_i)^2\?\frac{{\cal J}^2}{\gamma^2} + 4\,g_0 g_1 g_2 g_3 \?\frac{{\cal J}^4}{\gamma^4} \nonumber\\ &\? + 4\? (h^1 h^2 g_1 g_2 + h^1 h^3 g_1 g_3 + h^2 h^3 g_2 g_3)\?\frac{{\cal J}^2}{\gamma^2}\,, \\ \Iprod{\Gamma}{ I_4^\prime({\cal H}_0 \pm \frac{\cal J}{\gamma} G ) } = &\, 4\,h^0 h^1 h^2 h^3 \? \sum_I \frac{p^I}{h^I} + 2\? \left(\Iprod{G}{\Gamma}\? \Iprod{G}{{\cal H}_0} -2\?\sum_I g_I^2 p^I h^I \right)\?\frac{{\cal J}^2}{\gamma^2}\,. \end{align} and $\gamma$ is expressed as a product of three constants $\gamma_i$, for $i=1,2,3$, which will be useful below, \begin{align} \gamma_i = &\? \left( g_0 h^0 - g_1 h^1 - g_2 h^2 - g_3 h^3 + 2\?g_i h^i \right)\,, \\ \gamma = &\? \gamma_1\? \gamma_2\? \gamma_3\,, \end{align} where $\gamma$ is in agreement with the definition \eqref{eq:gamma-def}. This entropy function looks somewhat complicated, but simplifies in an expansion around the static limit, ${\cal J}\,, w \rightarrow 0$, for which it collapses to \begin{align}\label{eq:entropy-fun-J0} {\sf L} =&\? \pi\?\frac{\sqrt{h^0 h^1 h^2 h^3}}{\Iprod{G}{{\cal H}_0}}\? \sum_I \frac{p^I}{h^I} \\ &\? - \frac{\pi\ \gamma}{4}\?\sqrt{h^0 h^1 h^2 h^3}\? \Iprod{G}{{\cal H}_0} \? \left( 4 + \sum_{i=1}^3 \frac{1}{\gamma_i}\? \left( \frac{p^0}{h^0} - \frac{p^1}{h^1} - \frac{p^2}{h^2} - \frac{p^3}{h^3} + 2\?\frac{p^i}{h^i} \right) \right)\?\frac{w^2}{\pi^2} + {\cal O}(w^4) \,. \nonumber \end{align} Here, the term for $w=0$ is of the form given in \cite{Benini:2015eyy}, upon imposing the constraint $\Iprod{G}{{\cal H}_0}=1$. The latter can be imposed without loss of generality\footnote{The quantity $\Iprod{G}{{\cal H}_0}$ must be nonzero by regularity, and is conventionally taken to be positive.} in the static case, in view of the homogeneity of \eqref{eq:entropy-fun-J0}, simply amounting to an overall rescaling of the $h^I$. However, such a is symmetry is not clear by inspection of \eqref{eq:entropy-fun-extremization} in the general case. \subsection{The \texorpdfstring{$I_4(G) = 0$}{I4(G) = 0} case} There is a particular simplification in the case where the gauging vector is not generic, i.e. when it does not allow for an AdS$_4$ asymptotics. In this case we find that \begin{equation} {\cal W} = I_4({\cal H}_0) - \mathrm{j}^2\ , \end{equation} and upon gauge fixing the linear combination \begin{equation} \Iprod{G}{{\cal H}_0} = 1\ , \end{equation} we can easily invert the relation for $w(\mathrm{j})$ to find \begin{equation} \tilde\mathrm{j} = \frac{w\ \sqrt{I_4({\cal H}_0)}}{\sqrt{1+w^2}}\ , \qquad \Rightarrow \quad {\cal W} = \frac{I_4({\cal H}_0)}{(1+w^2)}\ . \end{equation} The entropy function in this case takes a particularly simple form, \begin{equation} {\sf L} = \pi\ \sqrt{1+w^2}\ \frac{I_4(\Gamma, {\cal H}_0, {\cal H}_0, {\cal H}_0)}{12 \sqrt{I_4({\cal H}_0)}} + \pi \sqrt{I_4({\cal H}_0)} \frac{w^2}{4\ \sqrt{1+w^2}} (I_4(\Gamma, {\cal H}_0, G, G)-4 \gamma)\ . \end{equation} In the extra special case of asymptotically flat solutions, $I_4'(G) = 0$ and we find that $I_4(\Gamma, {\cal H}_0, G, G) = 4 \gamma = 4$, leading us to \begin{equation}\label{eq:Mink-EF} {\sf L}_{\text{Mink}} = \pi\ \sqrt{1+w^2}\ \frac{I_4(\Gamma, {\cal H}_0, {\cal H}_0, {\cal H}_0)}{12 \sqrt{I_4({\cal H}_0)}}\ . \end{equation} Note that in the static limit, $w=0$, the function \eqref{eq:Mink-EF} is simply equal to the central charge, thus reproducing the extremization for the static black holes in \cite{Cacciatori:2009iz,Dall'Agata:2010gj}. It is now easy to verify explicitly that upon extremization of ${\sf L}_{\text{Mink}} + \pi w {\cal J}$ with respect to $w$ and ${\cal H}_0$ one finds back the explicit asymptotically flat solution of subsection \ref{subsec:nhg-examples}. To show we can change of variables by \begin{equation} \phi^i \equiv \frac{1}{h_i}\ \sqrt{1+w^2}\ \sqrt{h^0 h_1 h_2 h_3}\ . \end{equation} If we further use that the gauge fixing choice $\Iprod{G}{{\cal H}_0} = 1$ leads to $h^0 = -p^0$, we finally find \begin{equation} {\cal E}_{\text{Mink}} = - 2 \pi \frac{\phi^1 \phi^2 \phi^3}{1+w^2} + \pi q_i \phi^i + \pi w {\cal J}\ , \end{equation} in agreement with the answer in \cite{Gomes:2013cca} \footnote{Due to the five-dimensional language in the reference, the exact match works via Wick rotation of $w$ and ${\cal J}$.}. \section*{Acknowledgements} We would like to thank I. Bena, F. Benini, and B. Willett for stimulating discussions, N. Bobev and G. Bossard for useful comments on the draft, and S.M. Hosseini and A. Zaffaroni for discussions and for pointing out typos in previous versions of the paper. KH is supported in part by the Bulgarian NSF grant DN08/3 and the bilateral grant STC/Bulgaria-France 01/6. The work of SK is supported by the KU Leuven C1 grant ZKD1118 C16/16/005, by the Belgian Federal Science Policy Office through the Inter-University Attraction Pole P7/37, and by the COST Action MP1210 The String Theory Universe. CT acknowledges support from the NSF Grant PHY-1125915 and the Agence Nationale de la Recherche (ANR) under the grant Black-dS-String (ANR-16-CE31-0004). CT would like to thank the Simons Center for Geometry and Physics, Stony Brook University for hospitality during some steps of this paper.
1,108,101,565,875
arxiv
\section{Introduction}% \label{sec:introduction} Image steganography, aiming at delivering a modified cover image to secretly transfer hidden information inside with little awareness of the third-party supervision, is a classical computer vision and cryptography problem. Traditional image steganography algorithms go to their great length to hide information into the cover image while little consideration is tilted to payload capacity, also known as the ratio between hidden and total information transferred. The~payload capacity is one significant factor to steganography methods because if more information is to be hidden in the cover, the~visual appearance of the cover is altered further and thus the risk of detection is higher (The source code is available at: \url{https://github.com/adamcavendish/StegNet-Mega-Image-Steganography-Capacity-with-Deep-Convolutional-Network}). The most commonly used image steganography for hiding large files during transmission is embedding a RAR archive (Roshal ARchive file format) after a JPEG (Joint Photographic Experts Group) file. In such way, it can store an infinite amount of extra information theoretically. However, the~carrier file must be transmitted as it is, since any third-party alteration to the carrier is going to destroy all the hidden information in it, even just simply read out the image and save it again will corrupt the hidden information. To maximize the payload capacity while still resistible to simple alterations, pixel level steganography is majorly used, in which LSB (least significant bits) method~\cite{LSBRevisited}, BPCS~\cite{BPCS} (Bit Plane Complexity Segmentation), and their extensions are in dominant. LSB-based methods can achieve a payload capacity of up to 50\%, or otherwise, a vague outline of the hidden image would be exposed (see Figure~\ref{fig:vagueoutline}). However, most of these methods are vulnerable to statistical analysis, and therefore it can be easily detected. \begin{figure}[H] \centering \begin{tabular}{ccc} \adamIncludeFigureCS{0.2}{0.25}{Cover Image}{vague_outline/hidden_outline_visible_covr.png} \adamIncludeFigureCS{0.2}{0.25}{Hidden Image}{vague_outline/hidden_outline_visible_hide.png} \adamIncludeFigureCS{0.2}{0.25}{Embedded Image}{vague_outline/hidden_outline_visible_steg_lsb4.png} \end{tabular} \vspace{-8pt} \caption{Vague Outline Visible in 4-bit LSB Steganography Embedded-Cover-Diversity = 50\%, Hidden-Decoded-Diversity = 50\%, Payload Capacity = 12 bpp.}% \label{fig:vagueoutline} \end{figure} Some traditional steganography methods with balanced attributes are hiding information in the JPEG DCT components. For instance, A. Almohammad's work~\cite{HCJPEG} provides around 20\% of payload capacity (based on the patterns) and still remains undetected through statistical analysis. Most secure traditional image steganography methods recently have adopted several functions to evaluate the embedding localizations in the image, which~enables content-adaptive steganography. HuGO~\cite{HuGO} defines a distortion function domain by giving every pixel a changing cost or embedding impact based on its effect. It uses a weighted norm to represent the feature space. WOW (Wavelet Obtained Weights)~\cite{WOW} embeds information according to the textural complexity of the image regions. Work~\cite{UniDistortion, CASMinStatDetect} have discussed some general ways of content-adaptive steganography to avoid statistical analysis. Work~\cite{CASBatch} is focusing on content-adaptive batched steganography. These methods highly depend on the patterns of the cover image, and therefore the average payload capacity can be hard to~calculate. The major contributions of our work are as follows: \begin{enumerate*}[label=\roman*)] \item[(i)] We propose a methodology to apply neural networks for image steganography to embed image information into image information without any help of traditional steganography methods. \item[(ii)] Our implementation raises image steganography payload capacity to an average of 98.2\% or 23.57 bpp (bits per pixel), changing only around 0.76\% of the cover image (See Figure~\ref{fig:stegnetvslsb3}). \item[(iii)] We propose a new cost function named variance loss to suppress noise pixels generated by generator network. \item[(iv)] Our implementation is robust to statistical analysis and 4 other widely used steganography analysis methods. \end{enumerate*} The decoded rate is calculated by \csdef{CE}{\mathrm{CE}} \csdef{HD}{\mathrm{HD}} \begin{equation} \textrm{Decoded Rate} = 1 - \frac{\sum_{i=1}^{N} \sum_{j=1}^{M} \abs{H_{i,j} - D_{i,j}}}{N \times M} , \end{equation} the cover changing rate is calculated by \begin{equation} \textrm{Cover Changing Rate} = \frac{\sum_{i=1}^{N} \sum_{j=1}^{M} \abs{C_{i,j} - E_{i,j}}}{N \times M} \end{equation} and the bpp (bits per pixel) is calculated by \begin{equation} \textrm{Capacity} = \textrm{Decoded Rate} \times 8 \times 3 \quad \textrm{(bpp)} \end{equation} where \(C, H, E, D\) symbols stand for the cover image (\(C\)), the~hidden image (\(H\)), the~embedded image~(\(E\)) and the decoded image (\(D\)) in correspondence, and ``8, 3'' stands for number of bits per channel and number of channels per pixel respectively. \csundef{HD} \csundef{CE} \begin{figure}[H] \centering \begin{tabular}{ccc} \adamIncludeFigureCS{0.2}{0.25}{Cover Image} {effect_comparison/image_1_covr.png} \adamIncludeFigureCS{0.2}{0.25}{StegNet Embedded} {effect_comparison/image_1_steg_stegnet.png} \adamIncludeFigureCS{0.2}{0.25}{3-bit LSB Embedded} {effect_comparison/image_1_steg_lsb3.png}\\ \adamIncludeFigureCS{0.2}{0.25}{Hidden Image} {effect_comparison/image_1_hide.png} \adamIncludeFigureCS{0.2}{0.25}{StegNet Decoded} {effect_comparison/image_1_dcpt_stegnet.png} \adamIncludeFigureCS{0.2}{0.25}{3-bit LSB Decoded} {effect_comparison/image_1_dcpt_lsb3.png} \end{tabular} \vspace{-8pt} \caption{{~StegNet and 3-bit LSB Comparison Embedded-Cover-Diversity = 0.76\%, Hidden-Decoded- Diversity = 1.8\%, Payload Capacity = 23.57 bpp.}}% \label{fig:stegnetvslsb3} \end{figure} This paper is organized as follows. Section~\ref{sec:relatedwork} will describe traditional high-capacity steganography methods and the convolution neural network used by this paper. Section~\ref{sec:convsteg} will unveil the secret why the neural network can achieve the amount of capacity encoding and decoding images. The~architecture and experiments of our neural network are discussed in Sections~\ref{sec:architecture} and \ref{sec:experiments}, and finally, we'll make a conclusion and put forward some future works in Section~\ref{sec:conclusion}. \section{Related Work}% \label{sec:relatedwork} \vspace{-6pt} \subsection{Steganography Methods}% \label{ssec:stegmethods} Most steganography methods can be grouped into three basic types, which~is image domain steganography, transform domain steganography and file-format-based steganography. Image domain ones have an advantage of simplicity and better payload capacity while being more likely to be detected. Transform domain ones usually have a more complex algorithm but hides pretty well through third-party analysis. File-format-based ones depend very much on the file format which makes it quite fragile to alterations. \subsection{JPEG RAR Steganography}% \label{ssec:jpegrar} The JPEG RAR Steganography is a kind of file-format-based steganography, which~uses a feature in these two file format specifications. (JPEG~\cite{jpegspec} and RAR~\cite{rarspec}) After the JPEG file has scanned the segment of EOI (End Of Image) (0xd9 in hex format), all the remaining segments are ignored (skipped), and therefore any information is allowed to be appended afterward. A RAR file~~\cite{rarspec} has the magic file header ``0x52 0x61 0x72 0x21 0x1a 0x07 0x00'' in hex format (\textquote{Rar!} as characters) and the parser will ignore all the information before the file header. It is possible to dump the binary of the RAR file after the JPEG file, and it'll apparently act as if it is a JPEG image file while it is actually also a RAR archive. However, the~method is very fragile to any file alterations. Third-party surveillance might truncate useless information to save transmission resource or apply some image alterations to attack potential steganography. Any alteration will crash the steganography, and all hidden information is lost. \subsection{LSB (Least Significant Bit) Method}% \label{ssec:lsbmethod} LSB (Least Significant Bit)-based methods~\cite{LSBRevisited} are the most commonly used image domain steganography methods which hide information at the pixel level. Most LSB methods aim at altering parts of the cover image to such an extent that human visual system can barely notice. These methods are motivated by the fact that the visual part of most figures is dominated by the highest bits of each pixel, and the LSB bits (the underlined part of one pixel as shown in Figure~\ref{fig:lsbexplained}) are statistically similar to randomly generated data, and therefore, hiding information via altering LSB cannot change the visual result apparently. \begin{figure}[H] \centering \includegraphics[width=0.5\linewidth]{LSB_explained} \caption{LSB Explaination.}% \label{fig:lsbexplained} \end{figure} The embedding operation of LSB method for the least bit single channel image is described as~follows: \begin{equation*} S_{i} = (C_{i} \land \mathrm{FE_{HEX}}) \lor ((M_{i} \land \mathrm{80_{HEX}}) >> 7) \end{equation*} where \(S_{i}\), \(C_{i}\) and \(M_{i}\) are the \emph{i}th pixel of image after steganography, \emph{i}th pixel of cover image and \emph{i}th bit of the hiding message. Since the least significant bits of the image data should look like random data, there are major two schemes in distributing the hiding data. The~first kind of methods is to put in the hiding message sequentially after encrypting or compressing to achieve the randomness. The~second kind of methods is scattering the hiding data by adopting a mutually acknowledged random seed by which generates the actual hiding sequence~\cite{RobustImageSteg}. \subsection{JPEG Steganography}% \label{ssec:hcjpeg} JPEG steganography, i.e., Chang's work~\cite{JPEGSteg} and A. Almohammad's work~\cite{HCJPEG}, is a part of transform domain steganography. JPEG format examines an image in \(8 \times 8\) blocks of pixels, converts from RGB color space into YCrCb (luminance and chrominance) color space, applies DCT (Discrete Cosine Transformation), quantizes the result, and entropy encodes the rest. After lossy compression, which~is after quantization, the~hidden information is hidden into the quantized DCT components, which~serves as an LSB embedding in the transformed domain. As a result, it is quite hard to detect using statistical analysis and comparably lower payload capacity to LSB method. \subsection{Convolutional Neural Network}% \label{ssec:convnet} Convolutional neural network~\cite{conv}, though dates back to the 1990s, is now trending these years after AlexNet~\cite{alexnet} won the championship of ImageNet competition. It has successfully established new records in many fields like classification~\cite{imagenet2017}, object segmentation~\cite{coco2016}, etc. A lot of factors boosted the progress including the development of modern GPU hardware, the~work of ReLU (Rectified Linear Unit)~\cite{relu} and its extensions, and finally, the~abundance of training data~\cite{imagenet}. Our work also benefits a lot from these factors. The convolution operation is not solely used in neural networks, but also widely used in traditional computer vision methods. For instance, gaussian smoothing kernel is extensively used for image blurring and noise reduction, which, in implementation, is equal to applying a convolution between the original image and a gaussian function. Many other contributions in traditional methods are handcrafted patterns, kernels or filter combinations, i.e.,\ the Sobel-Feldman filter~\cite{SobelFeldmanFilter} for edge detection, Log-Gabol filter~\cite{fischer07cv} for texture detection, HOG~\cite{HOG} for object detection, etc. However, designing and tuning handcrafted patterns are highly technical and might be effective for only some tasks. On the contrary, convolutional neural networks have the advantage of automatically creating patterns for specific tasks through back-propagation~\cite{RumelhartBP} on its own, and even further, high-level features can be easily learned through combinations of convolution operations~\cite{Zeiler_Fergus_2013, olah2017feature,Mahendran_Vedaldi_2014}. \subsection{Autoencoder Neural Network}% \label{ssec:autoencoder} Our method is inspired by traditional autoencoder neural networks~\cite{hinton1994autoencoders}, which~was originally trained to generate an output image the same as input image in appearance. It is usually made up of two neural networks, one encoding network \(h = f(x)\) and one decoding network \(d = g(h)\), restricted under \(d = x\), who finally can learn the conditional probability distribution of \(p(h|x)\) and \(p(x|h)\) correspondently. The~autoencoder architecture has shown the ability to extract salient features in from images seen through shrinking hidden layer (\(h\))'s dimension, which~has been applied to various fields, i.e.,\ denoising~\cite{vincent2008extracting}, dimension reduction~\cite{wang2014generalized}, image generation~\cite{VAE}, etc. \subsection{Neural Network for Steganography}% \label{ssec:nnsteg} Recently there are some works on applying neural networks for steganography. El-Emam~\cite{El-emam_2008} and Saleema~\cite{Saleema_Amarunnishad_2016} work on using neural networks to refine the embedded image generated via traditional steganography methods, i.e., LSB method. Volkhonskiy's~\cite{SGAN} and Shi's~\cite{SSGAN} work focus on generating secure cover images for traditional steganography methods to apply image steganography. Baluja~\cite{Baluja_2017} is working on the same field as StegNet. However, the~hidden image is slightly visible on residual images of the generated embedded images. Moreover, his architecture uses three networks which requires much more GPU memory and takes more time to embed. \section{Convolutional Neural Network for Image Steganography}% \label{sec:convsteg} \vspace{-6pt} \subsection{High-order Transformation}% \label{ssec:highordertrans} In image steganography, we argue that we should not only focus on where to hide information, which~most traditional methods work on, but we should also focus on how to hide it. Most traditional steganography methods usually directly embed hidden information into parts of pixels or transformed correspondances. The~transformation regularly occurs in \textit{where to hide}, either~actively applied in the steganography method or passively applied because of file format. As a result, the~payload capacity is highly related and restricted to the area of the texture-rich part of the image detected by the \textit{handcoded} patterns. DCT-based steganography is one of the most famous transform domain steganography. We~can consider the DCT process in JPEG lossy compression process as a kind of one-level high-order transformation which works at a block level, converting each \(8 \times 8\) or \(16 \times 16\) block of pixel information into its corresponding frequency-domain representation. Even hiding in DCT transformed frequency-domain data, traditional works~\cite{JPEGSteg, HCJPEG} embed hidden information in mid-frequency band via LSB-alike methods, which~eventually cannot be eluded. While in contrast, deep convolution neural network makes multi-level high-order transformations possible for image steganography. Figure~\ref{fig:ConvReceptiveField} shows the receptive field of one high-level kernel unit in a demo of a three-layer convolutional neural network. After lower-level features are processed by kernels and propagated through activations along middle layers, the~receptive field of final higher-level kernel unit is able to absorb 5 lower-level features of the first layer and form its own higher-level feature throughout the training process. \begin{figure}[H] \centering \includegraphics[width=0.2\linewidth]{ConvReceptiveField} \caption{Receptive Field of Convolutional Neural Network.}% \label{fig:ConvReceptiveField} \end{figure} \subsection{Trading Accuracy for Capacity}% \label{ssec:tradingaccforcap} Traditional image steganography algorithms mostly embed hidden information as it is or after applying lossless transformations. After decoding, the~hidden information is extracted as it is or after the corresponding detransformations are applied. Therefore, empirically speaking, it is just as file compression methods, where lossless compression algorithms usually cannot outperform lossy compression algorithms in capacity. We need to think in a ``lossy'' way in order to embed almost equal amount of information into the cover. The~model needs to learn to compress the cover image and the hidden image into an embedding of high-level features and converts them into an image that appears as similar as the cover image, which~comes to the vital idea of trading accuracy for capacity. Trading accuracy for capacity means that we do not limit our model in reconstructing at a pixel-level accuracy of the hidden image, but aiming at ``recreating'' a new image with most of the features in it with a panoramic view, i.e.,\ the spider in the picture, the~pipes' position relatively correct, the~outline of the mountain, etc. In other words, the~traditional approaches work in lossless ways, which~after some preprocessing to the hidden image, the~transformed data is crammed into the holes prepared in the cover image. However, StegNet approach decoded image has no pixel-wise relationship with the hidden image at all, or strictly speaking, there is no reasonable transformation between each pair of corresponding pixels, but the decoded image as a whole can represent the original hidden image's meaning through neural network's reconstruction. In the encoding process, the~model needs to transform from a low-level massive amount of pixel-wise information into some high-level limited sets of featurewise information with an understanding of the figure, and come up with a brand new image similar to the cover apparently but with hidden features embedded. In the decoding process, on the contrary, the~model is shown only the embedded figure, from which both cover and hidden high-level features are extracted, and the hidden image is rebuilt according to network's own comprehension. As shown in Figures~\ref{fig:StegNetResidualHist} and \ref{fig:StegNetResidualFig}, StegNet is not applying LSB-like or simple merging methods to embed the hidden information into the cover. The~residual image is neither simulating random noise (LSB-based approach, see Figure~\ref{fig:LSB3ResidualFig}) nor combining recognizable hidden image inside. The~embedded pattern is distributed across the whole image and even magnified 5 to 10 times, the~residual image is similar to the cover image visually which can help decrease the abnormality exposed to the human visual system and finally avoid to be detected. \begin{figure}[H] \centering \begin{tabular}{cc} \adamIncludeFigure{0.5}{}{residual_comparison/histogram/image_1_diff_covr_steg_hist_stegnet_mag01_sp} \adamIncludeFigure{0.5}{}{residual_comparison/histogram/image_1_diff_hide_dcpt_hist_stegnet_mag01_sp} \end{tabular} \vspace{-16pt} \caption{Residual image histograms shows that the residual error is distributed across the images. \textbf{(a)} Residual between cover and embedded; \textbf{(b)} Residual between hidden and decoded.}% \label{fig:StegNetResidualHist} \end{figure} \vspace{-12pt} \begin{figure}[H] \centering \begin{tabular}{cccc} \adamIncludeFigureCS{0.2}{0.25}{Cover} {residual_comparison/image_1_covr.png} \adamIncludeFigureCS{0.2}{0.25}{StegNet Embedded} {residual_comparison/image_1_steg_stegnet.png} \adamIncludeFigureCS{0.2}{0.25}{StegNet Residual \(\times 05\)}{residual_comparison/magnified/image_1_diff_covr_steg_stegnet_mag05.png} \adamIncludeFigureCS{0.2}{0.25}{StegNet Residual \(\times 10\)}{residual_comparison/magnified/image_1_diff_covr_steg_stegnet_mag10.png}\\ \adamIncludeFigureCS{0.2}{0.25}{Hidden} {residual_comparison/image_1_hide.png} \adamIncludeFigureCS{0.2}{0.25}{StegNet Decoded} {residual_comparison/image_1_dcpt_stegnet.png} \adamIncludeFigureCS{0.2}{0.25}{StegNet Residual \(\times 05\)}{residual_comparison/magnified/image_1_diff_hide_dcpt_stegnet_mag05.png} \adamIncludeFigureCS{0.2}{0.25}{StegNet Residual \(\times 10\)}{residual_comparison/magnified/image_1_diff_hide_dcpt_stegnet_mag10.png} \end{tabular} \vspace{-14pt} \caption{StegNet residual images ``\(\times 05\)'' and ``\(\times 10\)'' are the pixel-wise enhancement ratio.}% \label{fig:StegNetResidualFig} \end{figure} \vspace{-12pt} \begin{figure}[H] \centering \begin{tabular}{cccc} \adamIncludeFigureCS{0.2}{0.25}{Cover} {residual_comparison/image_1_covr.png} \adamIncludeFigureCS{0.2}{0.25}{3-bit LSB Embedded} {residual_comparison/image_1_steg_lsb3.png} \adamIncludeFigureCS{0.2}{0.25}{3-bit LSB Residual \(\times 05\)}{residual_comparison/magnified/image_1_diff_covr_steg_lsb3_mag05.png} \adamIncludeFigureCS{0.2}{0.25}{3-bit LSB Residual \(\times 10\)}{residual_comparison/magnified/image_1_diff_covr_steg_lsb3_mag10.png}\\ \adamIncludeFigureCS{0.2}{0.25}{Hidden} {residual_comparison/image_1_hide.png} \adamIncludeFigureCS{0.2}{0.25}{3-bit LSB Decoded} {residual_comparison/image_1_dcpt_lsb3.png} \adamIncludeFigureCS{0.2}{0.25}{3-bit LSB Residual \(\times 05\)}{residual_comparison/magnified/image_1_diff_hide_dcpt_lsb3_mag05.png} \adamIncludeFigureCS{0.2}{0.25}{3-bit LSB Residual \(\times 10\)}{residual_comparison/magnified/image_1_diff_hide_dcpt_lsb3_mag10.png} \end{tabular} \vspace{-12pt} \caption{ 3-bit LSB residual images ``\(\times 05\)'' and ``\(\times 10\)'' are the pixel-wise enhancement ratio.}% \label{fig:LSB3ResidualFig} \end{figure} The residual image is computed via \begin{equation} R(I_{1}, I_{2}) = \frac{\abs{I_{1} - I_{2}}}{\max \abs{I_{1} - I_{2}}} , \end{equation} and the magnification or the enhancement operation is achieved via \begin{equation} E(I, M) = \mathrm{clip}(I \cdot M, 0, 1) , \end{equation} where \(I\) takes residual images, which~are effectively normalized to \([0, 1]\) and \(M\) is the magnification ratio, which~\(5\) and \(10\) are chosen visualize the differences in this paper. \section{Architecture}% \label{sec:architecture} \vspace{-6pt} \subsection{Architecture Pipeline}% \label{ssec:pipeline} The whole processing pipeline is shown in Figure~\ref{fig:pipeline}, which~consists of two almost identical neural network structure responsible for encoding and decoding. The~identical structures are taken from Autoencoder~\cite{hinton1994autoencoders}, GAN~\cite{GAN}, etc., which~help the neural network model similar high-level features of images in their latent space. The~details of embedding and decoding structure are described in Figure~\ref{fig:StegNetArch}. In the embedding procedure, the~cover image and the hidden image are concatenated by channel while only the embedded image is shown to the network. Two parts of the network are both majorly made up of one lifting layer which lifts from figure channels to a uniform of 32 channels, six~\(3 \times 3\) basic building blocks raising features into high-dimensional latent space and one reducing layer which transforms features back to image space. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{pipeline/Pipeline-multicolor} \vspace{-12pt} \caption{StegNet Processing Pipeline.}% \label{fig:pipeline} \end{figure} The basic building block named \textquote{Separable Convolution with Residual Block} (abbreviated as \textquote{SCR} in the following context) has the architecture as Figure~\ref{fig:SCRBlock}. We~adopt batch-normalization~\cite{batchnorm} and exponential linear unit (ELU)~\cite{elu} for quicker convergence and better result. \subsection{Separable Convolution with Residual Block}% \label{ssec:residualsepconv} Our work adopt the state of the art neural network structure, the~skip connections in Highway Network~\cite{highway}, ResNet~\cite{resnet} and ResNeXt~\cite{resnext}, and separable convolution~\cite{sepconv} together to form the basic building block \textquote{SCR}. The idea behind separable convolution~\cite{sepconv} originated from Google's Inception models~\cite{inceptionv1, inceptionv4} (see Figure~\ref{fig:InceptionV3} for its building blocks), and the hypothesis behind is that \textquote{cross-channel correlations and spatial correlations are decoupled}. Further, in Xception architecture~\cite{sepconv}, it makes an even stronger hypothesis that \textquote{cross-channel correlations and spatial correlations can be mapped completely separately}. Together with skip-connections~\cite{resnet} the gradients are preserved in backpropagation process via skip-connections to frontier layers and as a result, ease the problem of vanishing gradients. \begin{figure}[H] \centering \begin{tabular}{cc} \adamIncludeFigure{0.3}{Embedding Structure}{structure/EncodeStructure} \adamIncludeFigure{0.3}{Decoding Structure}{structure/DecodeStructure} \end{tabular} \vspace{-11pt} \caption{StegNet Network Architecture.}% \label{fig:StegNetArch} \end{figure} \vspace{-12pt} \begin{figure}[H] \centering \includegraphics[width=0.8\linewidth]{SCRBlock/SCRBlock4} \vspace{-6pt} \caption{Separable Convolution with Residual Block.}% \label{fig:SCRBlock} \end{figure} \vspace{-12pt} \begin{figure}[H] \centering \includegraphics[width=0.7\linewidth]{inception/InceptionV3_2} \vspace{-3pt} \caption{Basic Building Block in Inception v3.}% \label{fig:InceptionV3} \end{figure} \subsection{Training}% \label{ssec:training} Learning the end-to-end mapping function from cover and hidden image to embedded image and embedded image to decoded image requires the estimation of millions of parameters in the neural network. It is achieved via minimizing the weighted loss of \(L_{1}\)-loss between the cover and the embedded image, \(L_{1}\)-loss between the hidden and the decoded image, and their corresponding variance losses (variance should be computed across images' height, width and channel). \(C, H, E, D\) symbols stand for the cover image (\(C\)), the~hidden image (\(H\)), the~embedded image (\(E\)) and the decoded image (\(D\)) in correspondence. (See Equations~(\ref{eq:seploss1})--(\ref{eq:loss})) \csdef{CE}{\mathrm{CE}} \csdef{ED}{\mathrm{ED}} \csdef{HD}{\mathrm{HD}} \csdef{Var}{\mathrm{Var}} \begin{align} E_{i} &= F_{\CE}(C_{i}, H_{i}; \Theta_{\CE}) & D_{i} &= F_{\ED}(E_{i}; \Theta_{\ED}) \label{eq:seploss1}\\ L_{\CE} &= \frac{1}{n} \sum_{i=1}^{n} \abs{E_{i} - C_{i}} & L_{\HD} &= \frac{1}{n} \sum_{i=1}^{n} \abs{D_{i} - H_{i}} \label{eq:seploss2} \end{align} \begin{equation} \label{eq:loss} \mathrm{Loss} = \frac{1}{4} (L_{\CE} + L_{\HD} + \Var(L_{\CE}) + \Var(L_{\HD})) \end{equation} \(L_{\CE}\) is used to minimize the difference between the embedded image and the cover image, while~\(L_{\HD}\) is for the hidden image and the decoded image. Choosing only to decode the hidden image while not both the cover and the hidden images are under the consideration that the embedded image should be a concentration of high-level features apparently similar to the cover image whose dimension is half the shape of those two images, and some trivial information has been lost. It would have pushed the neural network to balance the capacity in embedding the cover and the hidden if both images are extracted at the decoding process. Furthermore, adopting variance losses helps to give a hint to the neural network that the loss should be distributed throughout the image, but not putting at some specific position (See Figure~\ref{fig:VarianceDiff} for differences between. The~embedded image without variance loss shows some obvious noise spikes (blue points) in the background and also some around the dog nose). \csundef{Var} \csundef{ED} \csundef{CE} \begin{figure}[H] \centering \begin{tabular}{cc} \adamIncludeFigureCS{0.25}{0.45}{Embedded Image with Variance Loss} {var_loss_effect/stegnet_00_04_steg_bbox} \adamIncludeFigureCS{0.25}{0.45}{Embedded Image without Variance Loss}{var_loss_effect/novar_00_04_steg_bbox} \\ \adamIncludeFigureCS{0.25}{0.45}{Red Box Magnified (with Variance Loss)} {var_loss_effect/stegnet_00_04_steg_mag} \adamIncludeFigureCS{0.25}{0.45}{Red Box Magnified (without Variance Loss)}{var_loss_effect/novar_00_04_steg_mag} \end{tabular} \vspace{-8pt} \caption{Variance Loss Effect on Embedding Results.}% \label{fig:VarianceDiff} \end{figure} \section{Experiments}% \label{sec:experiments} \vspace{-6pt} \subsection{Environment}% \label{ssec:environment} Our work is trained on one NVidia GTX1080 GPU and we adopt a batch size of 64 using Adam optimizer~\cite{adam} with learning rate at \(10^{-5}\). We~use no image augmentation and restrict model's input image to \(64 \times 64\) in height and width because of memory limit. Training with resized \(64 \times 64\) ImageNet can yield pretty good results. We~use 80\% of the ImageNet dataset for training and the remaining for testing to verify the generalization ability of our model. Figure~\ref{fig:onebatch} shows the result of applying StegNet steganography method on a batch of images. \begin{figure}[H] \centering \includegraphics[width=\linewidth]{one_batch_example} \caption{One batch Steganography Example.}% \label{fig:onebatch} \end{figure} \subsection{Statistical Analysis}% \label{ssec:statanalysis} The encoded and decoded images comparison between StegNet and LSB method are presented in Figure~\ref{fig:stegnetvslsb3}. They are very similar though, however, there is one critical flaw about the LSB method, in that it does not suffer through statistical analysis, and therefore LSB method is usually combined with transformations of the hidden image, i.e.,\ compression, randomization, etc. Figure~\ref{fig:stegnetvslsb3hist} is a comparison of histogram analysis between LSB method and our work. It shows a direct view of robustness of StegNet against statistical analysis, which~the StegNet embedded's histogram and the cover image's histogram are much more matching. A more all-around test is conducted through StegExpose~\cite{stegexpose}, which~combines several decent algorithms to detect LSB-based steganography, i.e.,\ sample pair analysis~\cite{samplepairanalysis}, RS analysis~\cite{fridrich2004reliable}, chi-square attack~\cite{westfeld1999attacks} and primary sets~\cite{dumitrescu2002steganalysis}. The~detection threshold is its hyperparameter, which~is used to balance true positive rate and false positive rate of the StegExpose's result. The~test is performed with linear interpolation of detection threshold from 0.00 to 1.00 with 0.01 as the step interval. Figure~\ref{fig:roccurve} is the ROC curve, where true positive stands for an embedded image correctly identified that there are hidden data inside while false positive means a clean figure falsely classified as an embedded image. The~figure is plotted in red-dash-line-connected scatter data, showing that StegExpose can only work a little better than random guessing, the~line in green. In other words, the~proposed steganography method can better resist StegExpose attack. \begin{figure}[H] \centering \begin{tabular}{ccc} \adamIncludeFigureCS{0.2}{0.3}{Cover Image} {hist_analysis/image_1_covr.png} \adamIncludeFigureCS{0.2}{0.3}{StegNet Embedded} {hist_analysis/image_1_steg_stegnet.png} \adamIncludeFigureCS{0.2}{0.3}{3-bit LSB Embedded}{hist_analysis/image_1_steg_lsb3.png} \\ \vspace{-5pt} \adamIncludeFigureCS{0.2}{0.3}{Cover Image Histogram}{hist_analysis/image_1_covr_hist.png} \adamIncludeFigureCS{0.2}{0.3}{StegNet Histogram} {hist_analysis/image_1_steg_stegnet_hist.png} \adamIncludeFigureCS{0.2}{0.3}{3-bit LSB Histogram} {hist_analysis/image_1_steg_lsb3_hist.png} \end{tabular} \vspace{-8pt} \caption{Histogram Comparison between StegNet and Plain LSB.}% \label{fig:stegnetvslsb3hist} \end{figure} \vspace{-12pt} \begin{figure}[H] \centering \includegraphics[width=0.82\linewidth]{ROCCurve/ROC-Curve} \vspace{-4pt} \caption{ROC Curves: Detecting Steganography via StegExpose.}% \label{fig:roccurve} \end{figure} \section{Conclusions and Future Work}% \label{sec:conclusion} We have presented a novel deep learning approach for image steganography. We~show that the conventional image steganography methods mostly do not serve with good payload capacity. The~proposed approach, StegNet, creates an end-to-end mapping from the cover image, hidden~image to embedded image and from embedded image to decoded image. It has achieved superior performance than traditional methods and yet remains quite robust. As seen in Figure~\ref{fig:onebatch}, there is still some noise generated at non-texture-rich areas, i.e.,\ plain white or plain black parts. The~variance loss adopted by StegNet might not be the optimal solution to loss~distribution. In addition to the idea of ``trading accuracy for capacity'', the~embedded image does not need to be even visually similar to the cover image. The~only requirement to the embedded image is to pass the third-party supervision and the hidden image should be successfully decoded after the transmission is complete, and therefore the embedded image can look similar to anything that is inside the cover image dataset while can look nothing related to anything that is inside the hidden image dataset. Some of the state of the art generative models in neural networks can help achieve it, i.e.,\ Variational Autoencoders~\cite{VAE, AAE}, Generative Adversarial Networks~\cite{GAN, WGAN, BEGAN}, etc. Some work is needed for non-equal sized images steganography since ``1:1'' image steganography is huge for traditional judgment; however, the~ability of neural networks still remains to be discovered. Whether it is possible to generate approaches for even better capacity, or with a better visual quality for even safer from detections. Some other work is needed for non-image hidden information steganography, i.e.,\ text information, binary data. In addition to changing the hidden information type, the~cover information type may also vary from text information to even videos. Furthermore, some~work is needed for applying StegNet on lossy-compressed image file formats or third-party spatial translations, i.e.,\ cropping, resizing, stretching, etc. \vspace{6pt} \authorcontributions{% Conceptualization, Y.Y. and X.L.; Data Curation: Y.Y.; Formal Analysis, Y.Y. and X.L.; Funding Acquisition, P.W. and X.L.; Investigation, Y.Y., X.L. and P.W.; Methodology, Y.Y. and X.L.; Project~Administration, P.W. and X.L.; Resources, Y.Y.; Software, Y.Y.; Supervision, P.W.; Validation, Y.Y.; Visualization, Y.Y.; Writing—Original Draft Preparation, Y.Y.; Writing—Review \& Editing, X.L. and Y.Y.} \funding{% This work was supported by the Shanghai Innovation Action Plan Project under grant number 16511101200. } \conflictsofinterest{% The authors declare no conflict of interest. } \reftitle{References}
1,108,101,565,876
arxiv
\section{\uppercase{#1}}} \def\correct#1#2{{#2}} \def\Classification#1{\\[.2cm] Classification numbers:~{#1}.} \newcommand{km s$^{-1}$}{km s$^{-1}$} \newcommand{$^\circ$}{$^\circ$} \newcommand{\del}[1]{{{\sout{\,#1}}}} \newcommand{\add}[1]{{{\textbf{\,#1}}}} \begin{document} \Year{2020} \Page{1}\Endpage{12} \titles{Observation of high Doppler velocity wings in the nascent wind of R Doradus} \authors{ \authord{D.T. Hoai, P.T. Nhung, P. Tuan-Anh, P. Darriulat, P.N. Diep, N.T. Phuong and T.T. Thai} {Vietnam National Space Center (VNSC), Vietnam Academy of Science and Technology (VAST), 18 Hoang Quoc Viet, Cau Giay, Ha Noi, Vietnam\\ } \email{[email protected] \received{\today} \accepted{DD MM YYYY} } \maketitle \markboth{D. T. Hoai}{High Doppler velocity wings in the nascent wind of R Doradus} \begin{abstract} We study the morpho-kinematics in the nascent wind of AGB star R Doradus in the light of high Doppler velocity wings observed in the spectral lines of several species. We probe distances from the star between $\sim$10 and $\sim$100 au using ALMA observations of the emission of five different molecular lines. High Doppler velocity enhancements of the line emission are observed in the vicinity of the line of sight crossing the star, reminiscent of those recently interpreted as gas streams in the nascent wind of a similar AGB star, EP Aqr. They are present in both blue-shifted and red-shifted hemispheres but are not exactly back-to-back. They are accelerated at a typical rate of 0.7 km s$^{-1}$ au$^{-1}$ up to some 20 km s$^{-1}$. Important differences are observed between the emissions of different molecules. We exclude an effect of improper continuum subtraction. However, in contrast to EP Aqr, the line of sight plays no particular role in the R Dor morpho-kinematics, shedding doubt on the validity of a gas stream interpretation.We discuss possible interpretations in terms of stellar pulsations or of rotation of the gas in the environment of the star. We conclude that, in the state of current knowledge, no fully convincing picture of the physics governing the production of such high velocities, typically twice as large as the terminal velocity, can be reliably drawn. New high resolution analyses of observations of the nascent wind of oxygen-rich AGB stars are needed to clarify the issue. \Keywords{stars: AGB and post-AGB, circumstellar matter, stars: individual: R Dor, radio lines: stars} \Classification{97.60-s} \end{abstract} \section{INTRODUCTION}\label{sec1} The occasional presence of large Doppler velocity wings in some line emission spectra of oxygen-rich evolved stars has been known for some time \cite{Cernicharo1997}. They are particularly visible in SiO line profiles and may typically reach twice the terminal wind velocity. Single dish millimetre observations analysed by Winters et al. (2003)\cite{Winters2003} and more recently by de Vincente et al. (2016)\cite{deVicente2016} have suggested that the emission occurs close to the star, where SiO grains have not yet fully formed, and is somehow related to star pulsations. In the past few years, the availability of high angular resolution ALMA observations of the nascent winds of two similar AGB stars, EP Aqr and R Dor, has shed new light on this issue. The presence of high Doppler velocity components in the $^{28}$SiO($\nu=0, J=5-4$) line emission of the nascent wind of EP Aqr was first noted by Homan et al. (2018a)\cite{Homan2018a} in Section 4.4 of their article. The morpho-kinematics of the circumstellar envelope of EP Aqr is known to display axi-symmetry about an axis close to the line of sight \cite{Hoai2019}. The study of SO$_2$ line emission very close to the star gives evidence for rotation about the same axis \cite{Homan2018a, TuanAnh2019}. Inhomogeneity has been revealed in the equatorial plane, close to the plane of the sky, in the form of a spiral of intensity \cite{Homan2018a} and of concentric rings of radial velocity \cite{Hoai2019}. The spiral of intensity was tentatively interpreted by Homan et al. (2018a)\cite{Homan2018a} as evidence for the presence of a companion. In such a context it was therefore natural for Homan et al. (2018a)\cite{Homan2018a} to consider the possibility that the complex dynamics in the wind-companion interaction zone not only accelerate a large portion of the outflow material along the equatorial plane, but also along the polar axis, causing some of this material to have increased velocities along the line of sight. Tuan-Anh et al. (2019)\cite{TuanAnh2019} have then performed a detailed study of the morpho-kinematics of the high velocity wings of the $^{28}$SiO($\nu=0, J=5-4$) line emission and have shown that they are also present, but much weaker, in the $^{12}$CO(2-1) line emission. Their absence from the SO$_2$ line emission, at very short distances from the star, disfavours too sudden an acceleration and, in particular, an interpretation in terms of star pulsation. Tuan-Anh et al. (2019)\cite{TuanAnh2019} propose instead a picture of two narrow polar streams of gas, referred to as jets, being launched from less than 25 au away from the star, building up between $\sim$20 au and $\sim$100 au to a velocity of $\sim$20 km s$^{-1}$\ and fading away at larger distances. Both Homan et al. (2018a)\cite{Homan2018a} and Tuan-Anh et al. (2019)\cite{TuanAnh2019} insist on the complexity of the morpho-kinematics of the nascent wind of EP Aqr when observed with the high angular resolution offered by ALMA and on the difficulty to draw a convincing and reliable picture of the physics at stake. Decin et al. (2018)\cite{Decin2018} were first to discuss the presence of high velocity wings in some line emission spectra of the nascent wind of R Dor, far above the canonical terminal wind velocities although hints of their existence had been mentioned earlier (e.g. Justtanont et al. 2012\cite{Justtanont2012}). They quote a maximal Doppler velocity of 23 km s$^{-1}$\ reached in the line emission of SiO($\nu=0, J=8-7$) (note that the velocity scale in their Figure 8 is a factor 2 too large, we thank Pr Leen Decin for clarification on this point). They show in their Figure 10 typical values of the end point velocity of different line spectra reaching some $\sim$15 km s$^{-1}$. On the basis of a model proposed by Nowotny et al. (2010)\cite{Nowotny2010} they argue that star pulsations cannot significantly contribute to the generation of such high velocities. They conclude that the origin of the large velocities is a genuine physical mechanism not linked to thermal motions of the gas or pulsation behaviour of the atmospheric layers and that their impact on the mass-loss rate cannot be underestimated. They privilege a scenario developed by Homan et al. (2018b)\cite{Homan2018b} suggesting the presence of a rotating disc, seen nearly edge-on, in the close neighbourhood of the star. It covers radial distances between 6 and 25 au and the Doppler velocity is maximal on the inner rim where it reaches 12 km s$^{-1}$. The relation between this disc and the high Doppler velocity wings is the suspected presence of an evaporating companion planet providing the necessary angular momentum to the disc rotation. Evidence for the possible existence of such a companion rests on the observation of high Doppler velocity emission in the blue-shifted hemisphere near the middle of the south-eastern quadrant. This enhanced emission is referred to as the ``blue blob'' by both Decin et al. (2018)\cite{Decin2018} and Homan et al. (2018b)\cite{Homan2018b}. They discuss critically the properties of the ``blue blob'' and carefully conclude that follow-up high-resolution observations are needed to test their claims, and to deeper investigate the true nature of both the disk-like signal and the blue blob. Finally, Vlemmings et al. (2018)\cite{Vlemmings2018} analyse ALMA observations made two years later with a four times better angular resolution than Decin et al. (2018)\cite{Decin2018}, $\sim$35 mas instead of $\sim$150 mas. From continuum emission they resolve the stellar radio photosphere as a circle of 31 mas radius. The analysis covers the compact emission of two lines: SiO($\nu=3, J=5-4$) and SO$_2$($J_{Ka,Kc}=16_{3,13}-16_{2,14}$) and the absorption of a third, $^{29}$SiO($\nu=1, J=5-4$). The line emissions are shown to be properly described by solid body rotation of expanding shells having average radii of 36 mas (2.2 au) and 66 mas (4.0 au) for SiO($\nu=3, J=5-4$) and SO$_2$($J_{Ka,Kc}=16_{3,13}-16_{2,14}$) respectively. The authors claim that their result contradicts the interpretation made by Homan et al. of a nearly edge-on rotating disc. Their model implies a positive radial velocity gradient, at variance with the disc model of Homan et al. (2018b)\cite{Homan2018b} that implies a negative radial velocity gradient. However, following Homan et al., they suggest that the most likely cause of the observed rotation is the presence of a companion. They show a position-velocity diagram illustrating the region of the ``blue blob'' discovered earlier by Decin et al. (2018)\cite{Decin2018}. This diagram is using $^{29}$SiO($\nu=0, J=5-4$) observations about which no detail is given. The Doppler velocity reaches some 15 km s$^{-1}$. The authors discuss it in the framework of their solid body rotation model and recognize that the origin of the fast rotating feature out to $>10R_*$ remains unclear and that this feature could be unrelated to the rotation and represent a seemingly one-sided ejection of material. Apart from the discussion of this feature, the authors do not mention the possible existence of high Doppler velocity wings but simply quote line widths of $\sim5-10$ km s$^{-1}$\ for the SO$_2$ line, $\sim$10 km s$^{-1}$\ for the SiO $\nu=3$ line, and $\sim15-20$ km s$^{-1}$\ for the $^{29}$SiO $\nu=1$ line. A fortiori, they do not discuss possible causes for their existence, such as the effect of stellar pulsations. The lack of a convincing physical picture of the mechanism governing the production of high Doppler velocity wings in the nascent winds of EP Aqr and R Dor, together with the absence of a detailed and dedicated analysis of their morpho-kinematics in the case of R Dor, have motivated the present work. EP Aqr and R Dor are both semi-regular variables of the SRb type and belong to similar spectral classes, M8IIIvar and M8IIIe. They have similar initial masses, between 1 and 2 M$_\odot$, similar mass loss rates, $\sim$1.6 10$^{-7}$ M$_\odot$ yr$^{-1}$ \cite{Hoai2019, Maercker2016} and similar temperatures, within 100 K from 3150 K \cite{Dumm1998}. They are both close to the Sun, at distances of $\sim$119 pc \cite{vanLeeuwen2007,Gaia2018} and $\sim$59 pc \cite{Knapp2003}, respectively. Both display no technetium in their spectrum \cite{Lebzelter1999} and have the same $^{12}$CO/$^{13}$CO abundance ratio of $\sim$10 \cite{TuanAnh2019, Ramstedt2014}. They differ by their pulsation period, 55 days for EP Aqr \cite{Lebzelter2002} and a dual period of 175 and 332 days for R Dor \cite{Bedding1998} but their infrared emissions above black body between 1 and 40 $\mu$m wavelength are similar \cite{Heras2005}, the main difference being a relative enhancement of silicates and depression of aluminium oxide in the EP Aqr dust. The morpho-kinematics of the circumstellar envelope of EP Aqr has been measured up to some 1000 au from the star \cite{Homan2018a, Nhung2019a, Hoai2019, TuanAnh2019}. It is dominated above 200 au by an axi-symmetric radial wind having reached a terminal velocity of $\sim 2+9\sin^2\alpha$ km s$^{-1}$, $\alpha$ being the stellar latitude and the polar axis making an angle of $\sim$10$^\circ$\ with the line of sight. The circumstellar envelope of R Dor has been probed by ALMA with a resolution of $\sim$4 au up to some 60 au from the star \cite{Decin2018, Homan2018b, Vlemmings2018} and the dust has been observed at the VLT with a resolution of 1.2 au \cite{Khouri2016}. In addition, below $\sim$15 au, the analyses of Danilovich et al. (2016)\cite{Danilovich2016}, De Beck \& Olofsson (2018)\cite{DeBeck2018} and Van de Sande et al. (2018)\cite{VandeSande2018} have contributed a considerable amount of detailed information of relevance to the physico-chemistry and dynamics of both dust and gas. At larger distances from the star, an analysis of ALMA observations of SO($J_K=6_5-5_4$) emission \cite{Nhung2019b}, probing distances between 20 and 100 au, gives evidence for the wind to host a radial outflow covering large solid angles and displaying strong inhomogeneity both in direction and radially: the former takes the form of multiple cores and the latter displays a radial dependence suggesting an episode of enhanced mass loss having occurred a century or so ago. In what follows, we explore a possible interpretation of the high velocity wings present in the nascent wind of R Dor in terms similar to those observed in EP Aqr. We study the large Doppler velocity features displayed by line emissions detected between $\sim$15 and $\sim$50 au from the star \cite{Decin2018}, which suggest similarities with the EP Aqr dynamics. These include excitations of five different molecules: CO, SiO, SO, SO$_2$ and HCN. \section{OBSERVATIONS AND DATA REDUCTION}\label{sec2} The data are retrieved from ALMA archives. The SO data (time on source of 2.7 hours) are from project 2017.1.00824.S observed in December 2017 in band 6 with an average of 45 antennas. They have been used by Nhung et al. (2019b)\cite{Nhung2019b} to study the slow wind using the calibrations and deconvolution provided by the standard ALMA pipeline. We have checked the quality of the data reduction and evidence for the Gaussian distribution of the noise and for the proper description of the continuum map is given there, together with channel maps. These data have not been used by other authors and the study of Nhung et al. (2019b)\cite{Nhung2019b} does not address the issue of large Doppler velocity components. All other data (time on source of $\sim$25 minutes) are from project 2013.1.00166.S observed in summer 2015 in band 7 with an average of 39 antennas (Table \ref{tab1}). They have been used by Decin et al. (2018)\cite{Decin2018} who show channel maps but do not perform a detailed analysis of the large Doppler velocity components. These observations, reduced using the results of the standard ALMA pipeline, include datasets associated with significantly different $uv$ coverage, implying different maximal recoverable scales. While this implies important differences in the flux-density measured in the region of the slow wind, we have checked that very similar results are instead obtained in the region of large Doppler velocities explored here at small projected distance from the star. In the present work we also use SiO line data from the same project reduced by us without subtracting the continuum. They were calibrated from the raw data available in the archive and deconvolved using the same procedure as for the continuum subtracted data. \begin{table*} \centering \caption{Line emissions considered in the present work. All are in the vibrational ground state.} \label{tab1} \vspace{0.5cm} \begin{tabular}{cccccc} \hline Line&CO(3-2)&SiO(8-7)& \makecell{SO\\($J_K=6_5$-$5_4$)}&\makecell{SO$_2$\\($13_{4,10}$-$13_{3,11}$)}&HCN(4-3)\\ \hline Frequency (GHz)&345.796&347.331&251.826& 357.165&354.505\\ \hline Beam FWHM (mas$^2$)&180$\times$140&180$\times$130&154$\times$147&160$\times$130&157$\times$145\\ \hline \makecell{Noise \\(mJy\,beam$^{-1}$\,channel$^{-1}$)}& 5.5&4.8&1.1&6.3&8.5\\ \hline \makecell{Channel spacing\\ (km s$^{-1}$)}&0.42& 0.42&0.29&0.41&0.41\\ \hline \makecell{Peak intensity\\(Jy beam$^{-1}$)}&0.74&1.31&0.13&0.17&0.39\\ \hline $E_u/k$ (K)& 33.2&75.0&50.7&123&42.5\\ \hline \end{tabular} \end{table*} \section{LARGE DOPPLER VELOCITY COMPONENTS}\label{sec3} We use coordinates rotated to have the ``blue-blob'' detected by Decin et al. (2018)\cite{Decin2018} at a position angle of approximately 180$^\circ$, meaning that the $x$ axis points 35$^\circ$\ north of east and the $y$ axis 35$^\circ$\ west of north; the $z$ axis points away from us, parallel to the line of sight, and the origin of coordinates is taken at the centre of continuum emission. Doppler velocity ($V_z$) spectra are referred to a local standard of rest velocity of 7.0 km s$^{-1}$. \subsection{Interpretation in terms of gas streams}\label{sec3.1} Projections of the data-cubes on the ($x, V_z$) and ($y, V_z$) planes, to which we refer as P-V maps, are shown in Figure \ref{fig1}. In contrast with standard P-V diagrams, these are not restricted to narrow slits but are summed over the data-cube, namely integrated over $y$ and $x$ respectively. In all cases the larger values of $|V_z|$ are confined near $x= 0$, very much as was observed in EP Aqr \cite{TuanAnh2019}. We define large Doppler velocity components as having $|V_z|>7.5$ km s$^{-1}$\ in order to separate them from the slower wind. Figure \ref{fig2} displays the maps of their integrated intensity. Together with Figure \ref{fig1}, they show an accelerating stream-like morphology, which was already apparent from the progression of the ``blue-blob'' toward the star at a rate of $\sim$0.7 km s$^{-1}$ au$^{-1}$ in the SO$_2$ channel maps displayed in Figure B1 of Decin et al. (2018)\cite{Decin2018}. They are particularly visible in the CO, SiO and SO maps, both in the blue-shifted and red-shifted hemispheres, but more clearly in the former than in the latter. The interpretation of the high Doppler velocity components as streams rather than blobs is justified from their continuity with the slow wind: they only appear as blobs when considering a slice confined to an interval of Doppler velocity. Figure \ref{fig1} shows that they clearly stand out from the region of phase-space covered by the slow wind, making the distinction between high Doppler velocity wings and slow wind meaningful. This is further illustrated in Figure \ref{fig3}, which displays channel maps of the large Doppler velocity components for SO emission; other lines show similar patterns. We note the presence of an enhancement of emissivity near the line of sight in the lower $|V_z|$ intervals of the blue-shifted hemisphere, suggesting the presence of another component closer to the region of the slow wind. While the global picture is dominated by the former, which extends down to $-$20 km s$^{-1}$\ with intensity comparable to the red-shifted component, the presence of this enhancement cannot be ignored and needs to be studied in relation with the complex morpho-kinematics of the slow wind \cite{Nhung2019b}. However, this is beyond the scope of the present work that focuses on the dominant large Doppler velocity components. Globally, as illustrated in Figure \ref{fig4} for the case of SO emission, the $V_z$ spectra are dominated by the slow wind, the high velocity wings having much lower intensities on both the red-shifted and blue-shifted sides. This allows for a reliable evaluation of the end points of the Doppler velocity spectra of the slow wind, which we measure at $\sim$7.0, $\sim$8.3, $\sim$6.2, $\sim$6.0 and $\sim$6.8 km s$^{-1}$\ for CO, SiO, SO, SO$_2$ and HCN respectively on the red-shifted side. Very similar values are obtained on the blue-shifted side. As the maximal recoverable scales pertinent to each line are similar, these differences probably reveal different radial dependence of the molecular relative abundance and/or emissivity being probed along the line of sight. In particular, evidence for stream-like morphology is barely significant in the case of HCN as had first been noted by Decin et al. (2018)\cite{Decin2018}. The fact that the CO, SiO and SO$_2$ observations were made on a same day with a same antenna pattern while HCN was observed the day before with a different antenna pattern but a similar maximal recoverable scale is unlikely to explain the difference. We note that HCN is not expected to form in O-rich environments, its relatively strong emission in the slow wind may be explained by pulsation-induced shock-chemistry and the absence of detection at large Doppler velocities may simply be the result of insufficient sensitivity. The fact that the two candidate streams have the same reach in $|V_z|$ suggests that they are essentially symmetric with respect to the star. However, the red-shifted stream projects on the star within a beam size while the blue-shifted stream spans a significant $y$ interval, implying that such symmetry is not perfect. This may be because the streams are unrelated and that the same reach in $|V_z|$ is accidental. In this case, the streams may make very different angles $i_1$ and $i_2$ with respect to the line of sight as long as their velocities $V_1$ and $V_2$ obey $V_1$cos$i_1$=$V_2$cos$i_2$. But if the same reach in $|V_z|$ is not accidental the two streams are expected to be at small angle to each other and therefore at similar angles $i_1\sim i_2\sim i$ from the line of sight. The effect of de-projection is essentially to change the $z$ scale to the extent that, to first order, $z$ may be approximated by a linear function of $V_z$. In particular, in principle, the large Doppler velocity components may be confined to the very close environment of the star. In order to get some idea of a possible geometry, we use an example illustrated in the left panel of Figure \ref{fig5}, where we assume arbitrarily that a stream velocity of 20 km s$^{-1}$\ is reached over a distance of 60 au along the line of sight. Approximating the stream projections on the P-V maps of Figure \ref{fig1} as shown by black arrows, we find that the red-shifted stream reaches this distance at $\sim -0.1$ arcsec in $x$ and $\sim$0.1 arcsec in $y$ while the blue-shifted stream reaches it at $\sim$0.1 arcsec in $x$ and $\sim-0.35$ arcsec in $y$. This means an angle of $\sim$8$^\circ$\ between the red-shifted stream and the line of sight, an angle of $\sim$20$^\circ$\ between the blue-shifted stream and the line of sight, and an angle of $\sim$14$^\circ$\ between the two streams. Of course, these numbers scale with the arbitrary length of 60 au used in the example and are simply meant to illustrate a possible stream geometry. \begin{figure*} \centering \includegraphics[width=0.9\textwidth]{fig1-pvmap-replot.pdf} \caption{P-V maps in the $V_z$ vs $x$ and vs $y$ planes. The colour scale is in units of Jy arcsec$^{-1}$. Yellow lines show the cuts applied in the definition of the large Doppler velocity components. The SO map extends up to 2 arcsec. The black arrows cover from the origin to $(x,y)$=(0.1,$-$0.35) arcsec and $V_z=-20$ km s$^{-1}$\ in the blue-shifted hemisphere and to $(x,y)$=( $-$0.1,0.1) arcsec and $V_z$=20 km s$^{-1}$\ in the red-shifted hemisphere.} \label{fig1} \end{figure*} \begin{figure*} \centering \includegraphics[width=0.95\textwidth,trim=0.cm .5cm 0.cm 0.5cm,clip]{fig2-highv-replot.pdf} \caption{Intensity maps of the high $|V_z|$ components. Contours show the red-shifted stream, the colour maps show the blue-shifted stream. The colour scale is in units of Jy beam$^{-1}$ km s$^{-1}$. The contour levels are at 10\%, 20\%, 30\%, 50\%, 70\% and 90\% of the peak intensity of the blue-shifted stream. The beams are shown in the lower left corners. The white crosses mark the position of the continuum peak.} \label{fig2} \end{figure*} \begin{figure*} \centering \includegraphics[height=9cm,trim=1.2cm 0.cm 0.cm 1.cm,clip]{channelmap-blue.pdf} \includegraphics[height=9cm,trim=1.2cm 0.cm 0.cm 1.cm,clip]{channelmap-red.pdf} \caption{Channel maps of the large Doppler velocity components of the SO line in the blue-shifted (upper panels) and red-shifted (lower panels) hemispheres. For each hemisphere we use 1 km s$^{-1}$\ bins in $|V_z|$, the velocity is indicated in the upper left corner of each panel. The common colour scale is in units of mJy beam$^{-1}$. The white crosses mark the position of the continuum peak.} \label{fig3} \end{figure*} \begin{figure} \centering \includegraphics[width=0.35\textwidth,trim=1cm 1cm 2.cm 2cm,clip]{so-spec.pdf} \caption{Doppler velocity spectrum of SO emission summed in the sky plane over a 1 arcsec radius circle centred on the star. The red lines are linear fits to the edge of the profile used to define the separation of the large Doppler velocity components from the slow wind.} \label{fig4} \end{figure} \begin{figure*} \centering \includegraphics[width=0.24\textwidth,trim=0.cm -1.cm 0.5cm 0.5cm,clip]{fig3a-stream.pdf} \includegraphics[width=0.6\textwidth,trim=0.cm 1.5cm 0.cm 1.5cm,clip]{fig3b-stream.pdf} \caption{Left panel: illustration of a possible geometry assuming that the streams reach a velocity of 20 km s$^{-1}$\ over a distance of 60 au along the line of sight (see text). Right panels: dependence of the integrated intensity on $x$ (upper panels) and $y$ (lower panels) measured with respect to the stream axes defined by black arrows in Figure \ref{fig1}. Lines are labelled on top of the upper panels. The integration is made over $|V_z|>$ 7.0, 8.3, 6.2 and 6.0 km s$^{-1}$\ for CO, SiO, SO and SO$_2$ respectively. For convenience, different arbitrary scales are used for different lines. Blue and red profiles are for blue-shifted and red-shifted hemispheres respectively. The curves show Gaussian fits.} \label{fig5} \end{figure*} The $x$ and $y$ profiles of the gas streams (excluding HCN) are illustrated in the right panels of Figure \ref{fig5}. They are referred to the stream axes indicated as black arrows in Figure \ref{fig1} and have Doppler velocities in excess of the slow wind end-point velocities listed above. On average, they are well centred to within $\pm$30 mas (1.8 au); Gaussian fits give standard deviations with respect to the mean of 90 mas in $x$ and 120 mas in $y$, meaning, after beam de-convolution, 70 mas (4.2 au) in $x$ and 90 mas (5.4 au) in $y$. The opening angle of the streams depends on their longitudinal extension; using as an example a mean distance of 30 au along the line of sight, this would correspond to an opening angle (standard deviation) of $\pm$9$^\circ$. While beyond the scope of the present article, we note the presence of significant depletions in well-defined regions of the data-cubes, in particular between Doppler velocities of $-1$ and 3 km s$^{-1}$. This is reminiscent of the blue-western depletion observed in EP Aqr by Tuan-Anh et al. (2019)\cite{TuanAnh2019}, who argue that it may be related to the nascent streams. In the present case, the complexity of the observed morpho-kinematics \cite{Nhung2019b} prevents from asserting reliably the existence of such a relation. \subsection{Eliminating a possible effect related to continuum subtraction}\label{sec3.2} The confinement of the red-shifted stream in the vicinity of the line of sight crossing the star in its centre comes as a surprise: at variance with EP Aqr, where axi-symmetry about this line of sight is well established, R Dor displays no obvious axi-symmetry and this line of sight does not seem to play any particular role in the complex morpho-kinematics of the circumstellar envelope \cite{Homan2018b, Vlemmings2018, Nhung2019b}. In particular, the rotation axis observed by Homan et al. [12] makes an angle of only $20\pm20$$^\circ$\ with the plane of the sky. This remark may suggest that the gas stream appearance of the high velocity wings is not real but is mimicked by some effect that has been overlooked. If such were the case, it would also shed doubts on the validity of the gas stream interpretation in the case of EP Aqr. It is therefore essential to review critically such possible effects. The next section discusses possible physical interpretations. Here, we address instead a possible effect of inadequate continuum subtraction. \begin{figure} \centering \includegraphics[height=4.5cm,trim=0.5cm 1.cm 2.cm 2.cm,clip]{spec-blue.pdf} \includegraphics[height=4.5cm,trim=2.9cm 1.cm 2.cm 2.cm,clip]{spec-red.pdf} \caption{Doppler velocity distributions obtained for $|V_z|>8$ km s$^{-1}$\ using two datasets of SiO line emission having significantly different \textit{uv} coverage. The continuum, which has not been subtracted, is seen at the level of $0.6$ Jy. The dataset having the larger maximal recoverable scale is shown for $R<1.5$ arcsec (red) and for $R<0.3$ arcsec (blue). The dataset having the smaller maximal recoverable scale is shown for $R<1.5$ arcsec (black). } \label{fig6} \end{figure} As continuum emission is confined near the star and covers uniformly the observed range of Doppler velocities, inadequate continuum subtraction is a candidate for producing artefacts mimicking high velocity wings emitted along the line of sight near the origin of coordinates. The data used in the present work have been obtained after subtraction in the $uv$ plane of the contribution of continuum emission. An imperfection in the procedure of Fourier-transforming from the $uv$ plane to the sky plane or of producing the clean maps might have generated the undesired artefacts and have remained unnoticed. In order to check on this, we repeated the analysis of the SiO observations without performing any continuum subtraction, namely Fourier-transforming directly the measured visibilities. Not subtracting the continuum is a simplification and obviously does not affect significantly the main results in terms of beam size, maximum recoverable scale, etc. The result is illustrated in Figure \ref{fig6} using two datasets having different antenna configurations. The Doppler velocity distributions obtained for $|V_z|>8$ km s$^{-1}$\ display uniform continuum emission beyond $|V_z|>20$ km s$^{-1}$\ at a level of $\sim 0.60\pm0.03$ Jy on both red-shifted and blue-shifted sides. Here the uncertainty accounts for both noise levels and differences between different data sets. In comparison, the continuum level measured by Decin et al. [5] is 0.65 Jy with an uncertainty that does not exceed 0.01 Jy: our result is therefore $\sim 0.05\pm0.03$ Jy smaller. While of little relevance to the argument of the present article, this small difference is significant and probably reveals systematic differences in the data samples used and in their reduction. This result gives confidence in the reality of the high Doppler velocity components, seen to rise above continuum emission below $|V_z|\sim20$ km s$^{-1}$. It displays a remarkable symmetry between blue-shifted and red-shifted wings, at variance with an earlier interpretation of the ``blue blob'' as causing one-sided ejection of material \cite{Vlemmings2018}. It provides evidence for the emission to be essentially contained within an aperture radius of 0.3 arcsec (18 au). The mean projected acceleration, averaged over the ($V_z$ vs $y$) P-V maps of Figure \ref{fig1} is 0.68$\pm$0.10 km s$^{-1}$ au$^{-1}$. However, to the extent that the large Doppler velocities are interpreted in terms of radial expansion, namely that blue- and red-shifted components are emitted from opposite sides of the star, the nearly perfect blue-red symmetry would suggest that effects of absorption and radiative transfer are small and can be ignored. \subsection{Interpretations in terms of pulsations or rotations}\label{sec3.3} A gas stream interpretation implies that the streams are accelerated to nearly 20 km s$^{-1}$\ over long enough a distance to justify such a description. Quantitatively, we may get an idea of how long if we accept that the streams are approximately symmetric with respect to the star. The projected acceleration being estimated to reach $\sim$0.7 km s$^{-1}$ au$^{-1}$ on the blue-shifted side, the Doppler velocity of 20 km s$^{-1}$\ is reached near 30 au projected distance from the star. If the blue-shifted stream were in the plane of the sky, it would reach a space velocity of 20 km s$^{-1}$\ in $\sim$ 30 au and so would the red-shifted stream. But in projected distance the red-shifted stream stays within some 10 au from the star. Then, it would make an angle of $\cos^{-1}(10/30)=70$$^\circ$\ with the plane of the sky (which contains the blue-shifted stream) contradicting the hypothesis that the two streams are nearly symmetric. If the blue-shifted stream makes an angle $i_B$ and the red-shifted stream an angle $i_R$ with the line of sight, the common stream length is $30/\sin i_B=10/\sin i_R$ au, the right hand side of the equation being an upper limit , namely $\sin i_R<\nicefrac{1}{3}\,\sin i_B$. For $i_B=30$$^\circ$\, $i_R<10$$^\circ$\ corresponding to a stream length of 60 au. This very crude estimate gives the scale of acceptable stream lengths in the hypothesis of approximately symmetric streams; it corresponds to a space acceleration of $\sim$1.5 km s$^{-1}$\ au$^{-1}$. If the acceleration was more sudden other interpretations, such as stellar pulsations or rotation, would become possible. In the case of rotation, one would typically expect a line profile covering twice the maximal rotation velocity. Homan et al. (2018b)\cite{Homan2018b} quote a rotation velocity of only 12 km s$^{-1}$\ at a distance of 6 au from the star. Keplerian rotation would imply that the maximal observed velocity of 20 km s$^{-1}$\ would be reached at only $\sim$2.2 au from the star centre, very close to the stellar surface. The absence of detection in the $^{28}$SiO($\nu=1, J=8-7$) data analysed by Homan et al. (2018b)\cite{Homan2018b} would then have to be blamed on insufficient sensitivity if a Keplerian rotation scenario was retained. Vlemmings et al. (2018)\cite{Vlemmings2018} quote a solid body rotation of 1.0$\pm$0.1 km s$^{-1}$\ at the stellar surface ($\sim$1.9 au) meaning that a rotation velocity of 20 km s$^{-1}$\ would be reached at a distance of $\sim$38 au from the star, nearly twice as far as the canonical limit that has been set. Unfortunately, the authors do not comment on the high Doppler velocity wings in general. They only illustrate the blue-shifted stream, which they claim to be one-sided, with a P-V diagram of $^{29}$SiO($\nu=0, J=5-4$) data that is consistent with the gas stream interpretation. An interpretation in terms of a pulsating spherical shell is equally challenging. It has been explicitly excluded by Decin et al. (2018)\cite{Decin2018} on the basis of a model \cite{Nowotny2010}. For an expanding spherical shell to reproduce the narrow high velocity features observed in Figure 1, its radius must not exceed some 15 to 20 au. In principle, the very high angular resolution observations analysed by Vlemmings et al. (2018)\cite{Vlemmings2018} should allow for a measurement of the maximal Doppler velocity reached within an aperture at au scale. Unfortunately the authors do not address the issue: no measurement of the maximal velocity reached in a very small aperture is quoted. Both interpretations, rotation and pulsation, predict red-blue symmetry in contrast with what is observed. The possible presence of a companion, interfering with the blue-shifted high velocity wing, could then be invoked. Decin et al. (2018)\cite{Decin2018}, followed by Homan et al. (2018b)\cite{Homan2018b} and Vlemmings et al. (2018)\cite{Vlemmings2018} have noted that such presence would provide a welcome explanation of the large angular momentum implied by the observed rotation. \section{CONCLUSION} Evidence has been obtained for a possible description of the nascent wind of AGB star R Dor in terms of a pair of high velocity gas streams emitted nearly back-to-back near the line of sight. They are reminiscent of those observed in similar conditions in the nascent wind of EP Aqr, an AGB star having properties very similar to R Dor. However, at variance with EP Aqr, the R Dor streams make a large angle with the rotation axes favoured by Vlemmings et al. (2018)\cite{Vlemmings2018} for the star and by Homan et al. (2018b)\cite{Homan2018b} for the rotating disc surrounding it. This may suggest that they be artefacts of improper data reduction and/or continuum subtraction but an analysis of continuum-unsubstracted data confirms the validity of the gas stream interpretation. Moreover, interpretations in terms of rotation or pulsations have been shown to meet important difficulties that prevent retaining such interpretations with reasonable confidence. A possibility that has not been seriously explored would be that the large Doppler velocities, rather than revealing large radial wind velocities, would instead reveal large line widths caused by turbulence, which would then be dominantly detected on the Earth side of the star atmosphere and would provide a simple explanation for the nearly perfect blue-red symmetry displayed in Figure 6. In principle, important relevant information should be contained in the very high angular resolution ALMA observations analysed by Vlemmings et al. (2018)\cite{Vlemmings2018} but, as far as we know, the issue has not been addressed by the authors in published material. The presence of large velocity components, reaching three times the terminal wind velocity, less than 100 au from each of two similar AGB stars, cannot be ignored when attempting a description of the mechanism governing the launch of their nascent wind and the breaking of the spherical symmetry present in the Red Giant phase. However, today, no fully convincing interpretation of the physics governing their existence can be reliably proposed. Future high resolution studies of other AGB stars displaying significantly higher Doppler velocities than the terminal wind will help clarifying this important issue. \section*{ACKNOWLEDGEMENT} We are grateful to Dr Ward Homan for sharing with us information on the analysis of R Doradus observations and to Professor Albert Zijlstra for support and advice. We thank the referees Professors Jan Martin Winters and Pierre Lesaffre for useful comments that helped improving the quality of the manuscript. This paper makes use of the following ALMA data: ADS/JAO.ALMA\#2017.1.00824.S and ADS/JAO.ALMA\#2013.1.00166.S. ALMA is a partnership of ESO (representing its member states), NSF (USA) and NINS (Japan), together with NRC (Canada) , MOST and ASIAA (Taiwan), and KASI (Republic of Korea), in cooperation with the Republic of Chile. The Joint ALMA Observatory is operated by ESO, AUI/NRAO and NAOJ. The data are retrieved from the JVO portal (http://jvo.nao.ac.jp/portal) operated by the NAOJ. We are deeply indebted to the ALMA partnership, whose open access policy means invaluable support and encouragement for Vietnamese astrophysics. Financial support from the World Laboratory, VNSC and NAFOSTED is gratefully acknowledged. This research is funded by the Vietnam National Foundation for Science and Technology Development (NAFOSTED) under grant number 103.99-2018.325. \bibliographystyle{ciprefstyle-unsrt}
1,108,101,565,877
arxiv
\section{Simulation} \usepackage{authblk} \usepackage{booktabs} \usepackage{nicefrac} \usepackage{enumitem} \usepackage{bbding} \usepackage{soul} \usepackage[numbers]{natbib} \usepackage{nicefrac} \usepackage{placeins} \usepackage{xcolor} \usepackage{siunitx} \usepackage{tabularx,booktabs} \usepackage{xr-hyper} \def, {, } \def--{--} \renewcommand{\tabcolsep}{0.1cm} \DeclareMathOperator*{\argmax}{argmax} \DeclareMathOperator*{\plim}{plim} \DeclareMathOperator*{\argmin}{argmin} \DeclareMathOperator*{\cov}{cov} \DeclareMathOperator*{\mean}{mean} \DeclareMathOperator*{\modulo}{mod} \DeclareMathOperator*{\var}{var} \DeclareMathOperator*{\tr}{tr} \DeclareMathOperator*{\card}{card} \DeclareMathOperator*{\diag}{diag} \DeclareMathOperator*{\md}{md} \DeclareMathOperator*{\iid}{iid} \DeclareMathOperator*{\RRMSE}{RRMSE} \DeclareMathOperator*{\med}{med} \DeclareMathOperator*{\mad}{mad} \DeclareMathOperator*{\rank}{rank} \DeclareMathOperator*{\IF}{IF} \DeclareMathOperator{\AV}{AVar} \DeclareSymbolFont{lettersA}{U}{txmia}{m}{it} \DeclareMathSymbol{{\rm I\!R}}{\mathord}{lettersA}{"92} \DeclareMathSymbol{\field}{\mathord}{lettersA}{"83} \hyphenation{op-tical net-works semi-conduc-tor} \DeclarePairedDelimiter\ceil{\lceil}{\rceil} \DeclarePairedDelimiter\floor{\lfloor}{\rfloor} \def\boxit#1{\vbox{\hrule\hbox{\vrule\kern3pt \vbox{\kern3pt#1\kern3pt}\kern3pt\vrule}\hrule}} \def\mkcomment#1{\vskip0mm\boxit{\vskip 0mm{\color{orange}\bf#1} {\color{orange}\bf -- MK\vskip 1mm}}\vskip 0mm} \def\sgcomment#1{\vskip0mm\boxit{\vskip 0mm{\color{blue}\bf#1} {\color{blue}\bf -- SG\vskip 1mm}}\vskip 0mm} \def\jjcomment#1{\vskip0mm\boxit{\vskip 0mm{\color{red}\bf#1} {\color{red}\bf -- JJ\vskip 1mm}}\vskip 0mm} \definecolor{pinegreen}{rgb}{0.0, 0.47, 0.44} \def\samcomment#1{\vskip0mm\boxit{\vskip 0mm{\color{pinegreen}\bf#1} {\color{blue}\bf -- OS\vskip 1mm}}\vskip 0mm} \def\hxcomment#1{\vskip0mm\boxit{\vskip 0mm{\color{green}\bf#1} {\color{blue}\bf -- HX\vskip 1mm}}\vskip 0mm} \def\rmcomment#1{\vskip0mm\boxit{\vskip 0mm{\color{purple}\bf#1} {\color{blue}\bf -- RM\vskip 1mm}}\vskip 0mm} \def\mkhcomment#1{\vskip0mm\boxit{\vskip 0mm{\color{violet}\bf#1} {\color{violet}\bf -- MKh\vskip 1mm}}\vskip 0mm} \def\yzcomment#1{\vskip0mm\boxit{\vskip 0mm{\color{blue}\bf#1} {\color{blue}\bf -- YZ\vskip 1mm}}\vskip 0mm} \newtheoremstyle{mytheoremstyle} {0.3cm} {0cm} {\itshape} {} {\scshape} {: } {0em} {} \theoremstyle{mytheoremstyle} \newtheorem{Theorem}{Theorem} \newtheorem{Lemma}{Lemma} \newtheorem{Proposition}{Proposition} \newtheorem{Corollary}{Corollary} \newtheorem{Definition}{Definition} \newtheorem{Conjecture}{Conjecture} \newtheorem{Proof}{Proof} \renewenvironment{proof}{{\noindent \sc Proof:}}{\qed} \newtheoremstyle{myExampleRemarkstyle} {0.3cm} {0cm} {\itshape} {} {\scshape} {: } {0em} {} \theoremstyle{myExampleRemarkstyle} \newtheorem{Example}{Example} \newtheorem{Remark}{Remark} \newtheorem{Assumption}{Assumption} \renewcommand{\theRemark}{\Alph{Remark}} \newtheoremstyle{simuStyle} {0.3cm} {0cm} {} {} {\bfseries} {.} {0em} {} \theoremstyle{simuStyle} \newtheorem{Simulation}{Simulation}[section] \newtheoremstyle{stratStyle} {0.3cm} {0cm} {} {} {\scshape} {: } {0em} {} \theoremstyle{stratStyle} \newtheorem{Strategy}{Strategy} \DeclareSymbolFont{lettersA}{U}{txmia}{m}{it} \DeclareMathSymbol{{\rm I\!R}}{\mathord}{lettersA}{"92} \DeclareMathSymbol{\field}{\mathord}{lettersA}{"83} \def\stackrel{iid}{\sim}{\stackrel{iid}{\sim}} \renewcommand{\theRemark}{\Alph{Remark}} \def{\rm I\!R}{{\rm I\!R}} \def^{\rm T}{^{\rm T}} \DeclareMathOperator*{\Int}{Int} \def\mathbf{0}{{\bf 0}} \def{\bm{\theta}}{{\bm{\theta}}} \def\hat{\bm{\theta}}{\hat{\bm{\theta}}} \def{\bm{\beta}}{{\bm{\beta}}} \def\tilde{\bm{\theta}}{\tilde{\bm{\theta}}} \def{\bm{\mu}}{{\bm{\mu}}} \def{\bm{\pi}}{{\bm{\pi}}} \def\hat{{\bm{\pi}}}{\hat{{\bm{\pi}}}} \def{\bm{\epsilon}}{{\bm{\epsilon}}} \def{\bm{\Omega}}{{\bm{\Omega}}} \def\boldsymbol{H}{\boldsymbol{H}} \def{\bf X}{{\bf X}} \def{\bf x}{{\bf x}} \def{\bf I}{{\bf I}} \def{\bf Y}{{\bf Y}} \def{\bf y}{{\bf y}} \def{\bf A}{{\bf A}} \def{\bf B}{{\bf B}} \def{\bf V}{{\bf V}} \def\lambda_{\max}{\lambda_{\max}} \def\lambda_{\min}{\lambda_{\min}} \DeclareMathOperator*{\f}{f} \DeclareMathOperator*{\corr}{corr} \DeclareMathOperator*{\argzero}{argzero} \def{\mathcal{N}}{{\mathcal{N}}} \def\hat{\bm{\pi}}{\hat{\bm{\pi}}} \def\bm{\theta}{\bm{\theta}} \def\bt_0{\bm{\theta}_0} \def\bm{\Theta}{\bm{\Theta}} \def\bm{\Delta}{\bm{\Delta}} \def\hat{\bt}_{(j,n,H)}{\hat{\bm{\theta}}_{(j,n,H)}} \def\mathbf{c}(n){\mathbf{c}(n)} \def\bm{\omega}{\bm{\omega}} \def\bw_0{\bm{\omega}_0} \def\mathbf{A}(n){\mathbf{A}(n)} \defo_p{o_p} \def\mathcal{O}{\mathcal{O}} \def\O_p{\mathcal{O}_p} \def\mathbf{0}{\mathbf{0}} \def\mathbf{I}{\mathbf{I}} \defD_{\bt}{D_{\bm{\theta}}} \def\tau_n\mathbf{u}^T\bm{\Sigma}^{-\nicefrac{1}{2}}{\tau_n\mathbf{u}^T\bm{\Sigma}^{-\nicefrac{1}{2}}} \def\mathcal{N}\left(0,1\right){\mathcal{N}\left(0,1\right)} \def\mathbb{N}{\mathbb{N}} \def\N^\ast{\mathbb{N}^\ast} \def\bm{\beta}{\bm{\beta}} \def\hat{\bb}{\hat{\bm{\beta}}} \def\hbb^{\text{ridge}}_{\lambda}{\hat{\bb}^{\text{ridge}}_{\lambda}} \def\hbb^{\text{OLS}}{\hat{\bb}^{\text{OLS}}} \def\mathbf{y}{\mathbf{y}} \def\mathbf{X}{\mathbf{X}} \newcommand{\hp}[2]{\hat{\bm{\pi}}\left({#1},n,{#2}\right)} \newcommand{\hpH}[1]{\frac{1}{H}\sum_{h=1}^H\hat{\bm{\pi}}\left({#1},n,\bm{\omega}_{h+jH}\right)} \renewcommand{\a}[1]{\mathbf{a}\left({#1}\right)} \renewcommand{\r}[1]{\mathbf{r}\left({#1},n\right)} \renewcommand{\r}[1]{\mathbf{r}\left({#1},n\right)} \renewcommand{\v}[2]{\mathbf{v}\left({#1},n,{#2}\right)} \newcommand{\vH}[1]{\frac{1}{H}\sum_{h=1}^H\mathbf{v}\left({#1},n,\bm{\omega}_{h+jH}\right)} \newcommand{\DvH}[1]{\frac{1}{H}\sum_{h=1}^HD_{\bt}\mathbf{v}\left({#1},n,\bm{\omega}_{h+jH}\right)} \newcommand{\g}[1]{\mathbf{g}\left({#1},n,H\right)} \newcommand{\h}[1]{\mathbf{h}\left({#1},n,H\right)} \renewcommand{\H}[1]{\mathbf{H}\left({#1},n\right)} \newcolumntype{Y}{>{\centering\arraybackslash}X} \begin{document} \title{Wavelet-Based Moment-Matching Techniques\\ for Inertial Sensor Calibration} \author{St\'ephane~Guerrier$^{*,\dag}$, Juan~Jurado$^*$, Mehran~Khaghani$^*$, Gaetan~Bakalli, Mucyo~Karemera, Roberto~Molinari, Samuel~Orso, John~Raquet, Christine~M.~Schubert~Kabban, Jan~Skaloud, Haotian~Xu \& Yuming~Zhang \thanks{\textit{$^*$The first three authors contributed equally and are alphabetically ordered. $^{\dag}$indicates corresponding author.}} \thanks{\textbf{S. Guerrier} is an Assistant Professor, Faculty of Science \& Geneva School of Economics and Management, University of Geneva, 1205, Switzerland. (E-mail: [email protected]).}% \thanks{\textbf{J. Jurado} is the Director of Education, U.S. Air Force Test Pilot School, Edwards AFB, CA, 93523, USA. (E-mail: [email protected]).} \thanks{\textbf{M. Khaghani} is a Postdoctoral Scholar, Geneva School of Economics and Management, University of Geneva, 1205, Switzerland. (E-mail: [email protected]).} \thanks{\textbf{G. Bakalli} is a PhD candidate, Geneva School of Economics and Management, University of Geneva, 1205, Switzerland. (e-mail: [email protected]).} \thanks{\textbf{M. Karemera} is a Postdoctoral Scholar, Geneva School of Economics and Management, University of Geneva, 1205, Switzerland. (E-mail: [email protected])} \thanks{\textbf{R. Molinari} is a Postdoctoral Scholar, Department of Statistics, Pennsylvania State University, PA, 16801, USA. (E-mail: [email protected])} \thanks{\textbf{S. Orso} is a Postdoctoral Scholar, Geneva School of Economics and Management, University of Geneva, 1205, Switzerland. (E-mail: [email protected])} \thanks{\textbf{J. Raquet} is the Director, IS4S-Dayton, Integrated Solutions for Systems, Inc., Beavercreek, OH, 45324, USA. (E-mail: [email protected]).} \thanks{\textbf{C. M. Schubert Kabban} is an Associate Professor, Department of Mathematics and Statistics, Air Force Institute of Technology, OH, 45324, USA. (E-mail: [email protected]).} \thanks{\textbf{J. Skaloud} is Lecturer, Geodetic Engineering Laboratory, \'Ecole Polytechnique F\'ed\'erale de Lausanne, 1015, Switzerland (Email: [email protected]).} \thanks{\textbf{H. Xu} is a PhD candidate, Geneva School of Economics and Management, University of Geneva, 1205, Switzerland. (E-mail: [email protected]).} \thanks{\textbf{Y. Zhang} is a PhD candidate, Geneva School of Economics and Management, University of Geneva, 1205, Switzerland. (E-mail: [email protected]).} \thanks{DISTRIBUTION STATEMENT A. Approved for public release; Distribution is unlimited 412TW-PA-19483}} \markboth{}% {Shell \MakeLowercase{\textit{et al.}}: Bare Demo of IEEEtran.cls for Journals} \maketitle \begin{abstract} The task of inertial sensor calibration has required the development of various techniques to take into account the sources of measurement error coming from such devices. The calibration of the stochastic errors of these sensors has been the focus of increasing amount of research in which the method of reference has been the so-called ``Allan variance slope method'' which, in addition to not having appropriate statistical properties, requires a subjective input which makes it prone to mistakes. To overcome this, recent research has started proposing ``automatic'' approaches where the parameters of the probabilistic models underlying the error signals are estimated by matching functions of the Allan variance or Wavelet Variance with their model-implied counterparts. However, given the increased use of such techniques, there has been no study or clear direction for practitioners on which approach is optimal for the purpose of sensor calibration. This paper formally defines the class of estimators based on this technique and puts forward theoretical and applied results that, comparing with estimators in this class, suggest the use of the Generalized Method of Wavelet Moments as an optimal choice. \end{abstract} \begin{IEEEkeywords} Allan Variance, Wavelet Variance, Inertial Measurement Unit, Generalized Method of Wavelet Moments, Stochastic Error, Slope Method, Autonomous Regression Method for Allan Variance \end{IEEEkeywords} \section{Introduction}\label{sec:intro} The identification of a probabilistic time series model and the estimation of its relative parameters for the error signal issued from various sensors, such as inertial sensors, is a key challenge in many fields of engineering that has led to a great deal of research being produced. Aside from the size of the calibration data which can entail computational burdens for the mentioned estimation tasks, the stochastic errors of these signals are often complex in nature since they can be characterized by composite (latent) stochastic processes where different underlying models contribute to the observed error signal. Although different approaches exist to perform estimation for the parameters of these processes, the currently adopted standard method for modeling the stochastic error of inertial sensors is the ``Allan Variance Slope Method'' (AVSM)~\cite{IEEE660628} that relies on the Allan Variance (AV) which is widely accepted as being a quantity of reference for the calibration of the stochastic errors issued from (low-cost) inertial sensors. Indeed, the AVSM relies on the fact that certain stochastic processes contributing to the overall signal (such as white noises and random walks) are identifiable based on the slope of the plot. Based on this property, the AVSM requires practitioners to (i) make a log-log plot of the empirical AV of an error signal, (ii) detect the regions of the plot which best represent an assumed model, (iii) estimate the parameters of the latter by estimating the slope of the AV within the selected region (based on which model parameters can be found). This approach is currently widely practiced in industry and academia making it a method of reference for inertial sensor calibration. Despite its popularity however, the AVSM procedure is lengthy and prone to (human) errors as well as having been proven to be statistically inconsistent~\cite{guerrier2016theoretical} thereby implying that, being among others subjective in nature, the resulting parameter estimates can be severely biased and do not improve as the length of the observed signal increases. For the above reasons (i.e. statistical inconsistency and subjective nature of the AVSM), the literature has proposed different alternatives that either make this procedure autonomous or use the AV in a way to deliver consistent estimations (or both). Among these proposals, we can find those that make use of the linearity of logarithmic transforms of the AV to apply regression methods to estimate the parameters of the stochastic models assumed for the observed error signals. A recent example is given in~\cite{Jurado19} where a regularized regression approach is applied to the logarithm of the AV with base 10. Another approach is based on a linear transformation of the AV, more specifically the Haar Wavelet Variance (WV), where a generalized least square approach inverses the mapping between the model-implied WV and the empirical WV~\cite{guerrier2013wavelet}. These moment-matching techniques belong to the class of the generalized method of moments estimators (introduced in \cite{hansen1982large}) where the considered ``moments'' are either the AV, the WV or \textit{functions} of one of these quantities. Given the presence of different moment-matching approaches based on the WV (AV) in order to automatize the sensor calibration process, it is important to understand how these approaches compare both from a theoretical as well as a practical point of view. Indeed, it would be appropriate to study these methods so that practitioners have some criteria that would allow them to choose the approach that best suits their requirements. For the above reasons, this paper intends to study the properties of this class of moment-matching approaches and put forward a proposed optimal moment-matching technique for inertial sensor calibration. Based on this goal, the paper is organized as follows. In Sec. \ref{sec:notation}, we summarize the notational convention used throughout the paper. In Sec. \ref{sec:gmwm} we discuss the class of moment-matching estimators based on (functions of) the WV and formally compare them. Sec. \ref{sec:simulations} compares calibration parameter estimation results using some existing moment-matching approaches based on the calibration of an accelerometer and a gyroscope from an STIM-300 Inertial Measurement Unit (IMU). Finally, in Sec. \ref{sec:conclusion}, we summarize our findings and conclusions. \section{Notational Convention} \label{sec:notation} \textit{Conventions} \vspace{0.4cm} \begin{center} \begin{tabular}{ l l } $(x_t)$ & refers to a sequence of values indexed \vspace{0.1cm}\\ & by integer $t$ \\ $x_t$ & refers to the $t$th value of a sequence \vspace{0.1cm}\\ $Y_t$ & refers to a random variable indexed \\ & by integer $t$ \vspace{0.1cm}\\ $y_t$ & refers to a realization of $Y_t$ indexed \\ & by integer $t$ \vspace{0.1cm}\\ ${\rm I\!R}_{+}$ & refers to the set of positive real numbers \vspace{0.1cm}\\ ${\rm I\!R}_{-}$ & refers to the set of negative real numbers \vspace{0.1cm}\\ $\mathcal{C}^1(\mathcal{A}, \mathcal{B})$ & refers to the set of functions from the set $\mathcal{A}$ \\ & to the set $\mathcal{B}$ whose first derivatives are\\ & continuous \vspace{0.1cm} \\ $||\mathbf{x}||_\mathbf{A}^2$ & denotes the squared Mahalanobis distance,\\ & i.e. $||\mathbf{x}||_\mathbf{A}^2 := \mathbf{x}^T \mathbf{A} \mathbf{x}$ where $\mathbf{x} \in {\rm I\!R}^q$ \\ & and $\mathbf{A} \in {\rm I\!R}^{q \times q}$ \vspace{0.1cm} \\ $|| \mathbf{x} ||_2$ & denotes the {\color{black}$l_2$-norm} of vector $\mathbf{x} \in {\rm I\!R}^q$, \\ & i.e. $|| \mathbf{x} ||_2 := (\sum_{i=1}^q \mathbf{x}_i^2)^{1/2}$ \vspace{0.1cm}\\ {\color{black}$\lVert\mathbf{A}\rVert_S$} & {\color{black}denotes the matrix spectral norm.}\\ \end{tabular} \end{center} \vspace{1.8cm} \textit{Important Notations} \vspace{0.2cm} \begin{center} \begin{tabular}{ l l } $\bm{\Theta}$ & the parameter space \vspace{0.1cm}\\ $\bm{\theta}$ & $(p \times 1)$ \textit{generic} parameter vector such that \\ & $\bm{\theta} \in \bm{\Theta} \subset {\rm I\!R}^p$ \vspace{0.1cm} \\ $\bm{\theta}_0$ & $(p \times 1)$ \textit{true} parameter vector such that \\ & $\bm{\theta}_0 \in \bm{\Theta} \subset {\rm I\!R}^p$ \vspace{0.1cm} \\ $F_{\bm{\theta}}$ & data generating model parameterized by $\bm{\theta}$\vspace{0.1cm} \\ $\mathcal{J}$ & $\mathcal{J} := \left\{x \in \mathbb{N} \; | \; p \leq x < \log_2(T) \right\}$ \vspace{0.1cm}\\ $J$ & an element in the set $\mathcal{J}$, i.e. an integer denoting \\ & the the number of scales such that it is at least \\ & the same as the number of parameters but smaller \\ & than $\log_2{(T)}$ \vspace{0.1cm}\\ $\bm{\nu}$ & $(J \times 1)$ Wavelet variance or Allan variance vector \vspace{0.1cm}\\ $\bm{\nu}(\bm{\theta})$ & $(J \times 1)$ Wavelet variance or Allan variance vector \\ & implied by $\bm{\theta}$ assuming that $F_{\bm{\theta}}$ corresponds to \\ & the true data generating process \vspace{0.1cm}\\ $\mathbf{f}(\cdot)$ & a known vector-valued function such that \\ & $\mathbf{f}: \, {\rm I\!R}_+^J \mapsto \mathcal{G} \subset {\rm I\!R}^J$ \vspace{0.1cm}\\ $\bm{\Omega}$ & a positive definite matrix in ${\rm I\!R}^{J \times J}$ \\ $\hat{\bm{\Omega}}$ & an estimate of the matrix $\bm{\Omega}$ \vspace{0.1cm}\\ $|\cdot|$ & denotes the absolute value \vspace{0.1cm}\\ $\mathbf{A} \boxtimes \mathbf{B}$ & we have that $\mathbf{A} \boxtimes \mathbf{B} := \mathbf{A} \mathbf{B}\mathbf{A}^T$ where $\mathbf{A} \in {\rm I\!R}^{k \times d}$ \\ & and $\mathbf{B} \in {\rm I\!R}^{d \times d}$ \vspace{0.1cm}\\ $T_j$ & number of wavelet coefficients at scale $j \in \mathbb{N} \setminus \{0\}$,\\ & $T_j := T - 2^j + 1$ \\ \end{tabular} \end{center} \section{Generalized Method of Wavelet Functional Moments} \label{sec:gmwm} In order to formalize the framework of reference for this paper, we firstly consider the time series $\left(X_t\right)_{t = 1, \ldots, T}$ which is supposedly generated by a composite stochastic process $F_{\bm{\theta}}$ delivered by the sum of independent sub-processes. We let $F_{\bm{\theta}_0}$ denote the true data-generating process, which is assumed known up to the value of $\bm{\theta}_0$. The vector $\bm{\theta}_0$ is therefore the \textit{true} parameter value which corresponds to a possible value in $\bm{\Theta} \subset {\rm I\!R}^p$. We let $\bm{\theta}$ denote a \textit{generic} parameter vector, which should therefore not be confused with the true parameter $\bm{\theta}_0$. In order to discuss the estimation of $\bm{\theta}_0$, let us consider the AV or WV which can be computed on the time series $(X_t)$ for different (dyadic) scales of decomposition $J$. For the purpose of this work we will however consider $J \in \mathcal{J}$ scales such that there are \textit{at least} the same number of scales as of parameters. With this in mind, we introduce a class of estimators of $\bm{\theta}_0$ that we define as follows \begin{equation} \hat{\bm{\theta}} := \underset{\bm{\theta} \in \bm{\Theta} }{\argmin} \; \| \mathbf{f}(\hat{\bm{\nu}}) - \mathbf{f}(\bm{\nu}(\bm{\theta}))\|_{\bm{\Omega}}^2, \label{eq:f:estimator} \end{equation} where $\hat{\bm{\nu}} \in {\rm I\!R}_+^J$ and $\bm{\nu}(\bm{\theta}) \in {\rm I\!R}_+^J$ denote respectively a suitable estimator of the AV or WV computed on $(X_t)$ and the model-based counterpart (i.e. the AV or WV implied by the assumed model $F_{\bm{\theta}}$). The vector-valued function $\mathbf{f}(\cdot)$ is such that $\mathbf{f}: \, {\rm I\!R}_+^J \mapsto \mathcal{G} \subset {\rm I\!R}^J$ and is assumed known. Moreover, $\bm{\Omega} \in {\rm I\!R}^{J \times J}$ is a positive definite matrix which, if estimated, shall be denoted as $\widehat{\bm{\Omega}}$ (instead of $\bm{\Omega}$) in order to emphasize the stochastic nature of the matrix. Since the AV is a special case of the (Haar) WV (see \cite{flandrin1992wavelet,percival1994long, percival2015wavelet} for details), we choose to call the class of estimators in Eq. (\ref{eq:f:estimator}) as ``Generalized Method of Wavelet Functional Moments'' estimators (GMWFM). The latter is quite general and includes, among others, the Generalized Method of Wavelet Moments (GMWM) proposed in \cite{guerrier2013wavelet} or the Autonomous Regression Method for Allan Variance (ARMAV) of \cite{Jurado19}. Indeed, the GMWM corresponds to the choice $\mathbf{f}(\mathbf{x}) = \mathbf{x}$, while the ARMAV is based on $\mathbf{f}_i(\mathbf{x}_i) = \log_{10}(\mathbf{x}_i), \; i = 1,\ldots, J$, where $\mathbf{f}_i(\cdot)$ and $\mathbf{x}_i$ denote the $i$-th element of $\mathbf{f}(\cdot)$ and $\mathbf{x}$, respectively. In addition, the GMWM and the ARMAV are based on different but relatively similar choices of the matrix $\bm{\Omega}$. In this paper, we investigate the requirements on the function $\mathbf{f}(\mathbf{x})$ to ensure that the estimator $\hat{\bm{\theta}}$ is consistent and asymptotically normally distributed. Moreover, we discuss whether an optimal choice for $\mathbf{f}(\mathbf{x})$ exists. For this purpose, we need to define a set of assumptions that will be used in order to investigate these properties. Therefore, let us study the first assumption regarding injectivity of the function $\mathbf{g}(\bm{\theta}) := \mathbf{f}(\bm{\nu}(\bm{\theta}))$ which can be found below. \setcounter{Assumption}{0} \renewcommand{\theHAssumption}{otherAssumption\theAssumption} \renewcommand\theAssumption{\Alph{Assumption}} \begin{Assumption}[Injectivity] \label{assum:injectiviy} The functions $\mathbf{f}(\cdot)$ and $\bm{\nu}(\cdot)$ are such that $\mathbf{f}(\cdot)$ is injective in ${\rm I\!R}_+^J$ and $\bm{\nu}(\cdot)$ is injective in $\bm{\Theta}$. \end{Assumption}\vspace{0.25cm} If this assumption holds, then a direct consequence is that $\mathbf{g}(\bm{\theta})$ is injective in $\bm{\Theta}$. More precisely, the first part of Assumption \ref{assum:injectiviy} is rather mild since the function $\mathbf{f}(\cdot)$ can be chosen in such a way as to respect this condition. However, the second part of the assumption can be challenging to prove. For example, \cite{guerrier2016identifiability} considered the injectivity of the function $\bm{\nu}(\cdot)$ and provide a series of results allowing to verify this property for various classes of latent time series models. The latter demonstrates that the second part of Assumption \ref{assum:injectiviy} would hold for the class of models considered in \cite{guerrier2013wavelet}, with a few exceptions. For example, if the time series contains a drift with parameter $\omega$ it is necessary to assume that the sign of $\omega$ is known (since $\bm{\nu}(\bm{\theta})$ only depends on $\omega^2$). A general strategy to prove whether Assumption \ref{assum:injectiviy} holds for a specific model can be found in \cite{komunjer2012global} (which is also used in \cite{guerrier2016identifiability}) while in the lemma further on we prove the second requirement of Assumption \ref{assum:injectiviy} (i.e. $\bm{\nu}(\cdot)$ is injective in $\bm{\Theta}$) for the general model considered in \cite{Jurado19}. The latter model is a composite model made by the sum of a (1) quantization noise with parameter $Q^2 \in {\rm I\!R}_+$, (2) white noise with parameter $\sigma^2 \in {\rm I\!R}_+$, (3) bias instability with parameter $B \in {\rm I\!R}_+$, (4) random walk with parameter $\gamma^2 \in {\rm I\!R}_+$ and (5) drift with parameter $\omega \in {\rm I\!R}_+$. \begin{Lemma} \label{lem:injectivity} Let \begin{equation*} \bm{\theta} := \left[Q^2 \;\;\; \sigma^2 \;\;\; B \;\;\; \gamma^2 \;\;\; \omega \right] \in \bm{\Theta} \subset {\rm I\!R}_+^5, \end{equation*} and let $c$ be a positive constant. Then, the function \begin{equation*} \bm{\nu}_j(\bm{\theta}) := c\left(\frac{3Q^2}{2^{2j}} + \frac{\sigma^2}{2^j} + \frac{2\log(2)}{\pi} B^2 + \frac{\gamma^2 2^j}{3} + \omega^2 2^{2j -1}\right), \label{eq:wv:theo} \end{equation*} is injective in $\bm{\Theta}$. \end{Lemma}\vspace{0.25cm} \begin{Remark} The positive constant $c$ is simply related to the choice of the AV or (Haar) WV: in the case of the former we have that $c = 1$ while for the Haar WV we have $c = \nicefrac{1}{2}$.\\ \end{Remark} \noindent \textsc{Proof:} First we notice that it is sufficient to show that \begin{equation*} \bm{\nu}^*(\bm{\theta}) = \bm{\nu}^*(\bm{\theta}^*), \end{equation*} if and only if $\bm{\theta} = \bm{\theta}^*$, where $\bm{\nu}^*(\bm{\theta})$ denotes the first 5 elements of the vector $\bm{\nu}(\bm{\theta})$. Moreover, the function $\bm{\nu}^*(\bm{\theta})$ can be reparametrized as a function of $\bm{\beta}$ defined as \begin{equation*} \bm{\beta} := \left[Q^2 \;\;\; \sigma^2 \;\;\; B^2 \;\;\; \gamma^2 \;\;\; \omega^2 \right], \end{equation*} where the only difference with $\bm{\theta}$ is that the elements $B$ and $\omega$ are squared. Since the latter elements are positive (the sign of $\omega$ is known and is assumed positive for this proof without loss of generality), the square function is also injective and this implies that if the WV is injective for their squares, by composition of injective functions it is also injective for the original values. Therefore, it is sufficient to show that \begin{equation*} \bm{\nu}^*(\bm{\beta}) = \bm{\nu}^*(\bm{\beta}^*), \end{equation*} if and only if $\bm{\beta} = \bm{\beta}^*$. We start by computing the Jacobian matrix $\mathbf{J}(\bm{\beta})$ which is defined as \begin{equation*} \begin{aligned} \mathbf{J}(\bm{\beta}) &:= \frac{\partial}{\partial \bm{\beta}^T} \; \bm{\nu}^*(\bm{\beta})\\ &= c \begin{bmatrix} \nicefrac{3}{2} & \nicefrac{1}{2} & \nicefrac{2 \log(2)}{\pi} & \nicefrac{2}{3} & 2 \\ \nicefrac{3}{16} & \nicefrac{1}{4} & \nicefrac{2 \log(2)}{\pi} & \nicefrac{4}{3} & 8 \\ \nicefrac{3}{64} & \nicefrac{1}{8} & \nicefrac{2 \log(2)}{\pi} & \nicefrac{8}{3} & 32 \\ \nicefrac{3}{256} & \nicefrac{1}{16} & \nicefrac{2 \log(2)}{\pi} & \nicefrac{16}{3} & 128 \\ \nicefrac{3}{1024} & \nicefrac{1}{32} & \nicefrac{2 \log(2)}{\pi} & \nicefrac{32}{3} & 512 \\ \end{bmatrix}. \end{aligned} \end{equation*} Since $\mathbf{J}(\bm{\beta})$ does not depend on $\bm{\beta}$ we let $\mathbf{J} := \mathbf{J}(\bm{\beta})$ which, based on the mean-value theorem, allows us to write \begin{equation*} \begin{aligned} \bm{\nu}^*(\bm{\beta}) - \bm{\nu}^*(\bm{\beta}^*) &= \bm{\nu}^*(\bm{\beta}) - \left[\bm{\nu}^*(\bm{\beta}) + \mathbf{J} \cdot ( \bm{\beta}^* - \bm{\beta})\right]\\ &= \mathbf{J} \cdot (\bm{\beta} - \bm{\beta}^*). \end{aligned} \end{equation*} Since we have that \begin{equation*} \det \left(\mathbf{J}\right) = c^5\frac{84357 \log(2)}{1024 \pi} > 0, \end{equation*} the only solution of the equation \begin{equation*} \mathbf{J} \cdot ( \bm{\beta} - \bm{\beta}^*) = \bm{0}, \end{equation*} is $\bm{\beta}^* = \bm{\beta}$, which concludes the proof. \hfill $\blacksquare$ \vspace{0.25cm} Having discussed Assumption \ref{assum:injectiviy} which appears to be reasonable to assume in general (given the different cases in which it is verified), we now consider the other set of assumptions that are needed to prove consistency of the estimator $\hat{\bm{\theta}}$. \begin{Assumption}[Compactness] \label{assum:compact} The set $\bm{\Theta}$ is compact. \end{Assumption} \begin{Assumption}[Consistency] \label{assum:consistent} For all $j \in \left\{1, \ldots, J\right\}$, we have \begin{align*} | \hat{\bm{\nu}}_j - {\bm{\nu}}_j(\bm{\theta}_0)| = o_{\rm p}(1). \end{align*} Moreover, if $\bm{\Omega}$ is estimated by $\widehat{\bm{\Omega}}$ then we have \begin{align*} ||\widehat{\bm{\Omega}} - \bm{\Omega}||_{S} = o_{\rm p}(1). \end{align*} \end{Assumption} \begin{Assumption}[Continuity] \label{assum:contuity:f} The function $\mathbf{g}(\bm{\theta}) :=\mathbf{f}(\bm{\nu}(\bm{\theta}))$ is continuous in $\bm{\Theta}$. \end{Assumption}\vspace{0.25cm} Assumption \ref{assum:compact} is a common regularity condition which is typically assumed for most estimation problems or is replaced by other types of constraints. Its main purpose is to ensure that certain quantities that we will consider in the proofs will be bounded in order to ensure convergence. Assumption \ref{assum:consistent} is rather mild and lower-level conditions equivalent to this assumption can, for example, be found in \cite{percival1995estimation} for the WV (as well as in \cite{guerrier2016fast} under weaker conditions) or by combining these results with the work of \cite{percival1994long} who showed the equivalence between the AV and WV. Finally, Assumption \ref{assum:contuity:f} requires the function $\mathbf{f}(\bm{\nu}(\bm{\theta}))$ to be continuous in $\bm{\Theta}$ which is the case when both $\mathbf{f}(\cdot)$ and $\bm{\nu}(\cdot)$ are continuous within their respective composition domains. Since the function $\bm{\nu}(\bm{\theta})$ is continuous in $\bm{\Theta}$ for nearly all models of interest (such as those considered in \cite{guerrier2016identifiability} or the model discussed in \cite{Jurado19}), it is sufficient for $\mathbf{f}(\cdot)$ to be continuous in ${\rm I\!R}_+^J$ to satisfy this assumption. Based on these assumptions, we can state the following consistency result. \begin{Theorem} \label{thm:consistent} Under Assumptions \ref{assum:injectiviy} to \ref{assum:contuity:f}, we have that \begin{equation*} || \hat{\bm{\theta}} - \bm{\theta}_0 ||_2 = o_{\rm p}(1). \end{equation*} \end{Theorem}\vspace{0.25cm} \noindent \textsc{Proof:} Let \begin{equation*} Q(\bm{\theta}) := \| \mathbf{g}(\bm{\theta}_0) - \mathbf{g}(\bm{\theta})\|_{\bm{\Omega}}^2, \end{equation*} where $\mathbf{g}(\bm{\theta})$ is defined in Assumption \ref{assum:contuity:f}. Then, we have \begin{equation*} Q(\bm{\theta}) \leq || \bm{\Omega} ||_{S} \| \; \mathbf{g}(\bm{\theta}_0) - \mathbf{g}(\bm{\theta})\|_2^2. \end{equation*} Therefore, Assumption \ref{assum:injectiviy} implies that $Q(\bm{\theta})$ has a unique minimum in $\bm{\theta} = \bm{\theta}_0$. Next, Assumption \ref{assum:contuity:f} directly implies the continuity of the function $Q(\bm{\theta})$ in $\bm{\Theta}$. Moreover, from the continuous mapping theorem together with Assumptions \ref{assum:consistent} and \ref{assum:contuity:f}, we have $\lvert\hat{\mathbf{g}}_j-\mathbf{g}_j(\bm{\theta}_0)\rvert= o_{\rm p}(1)$ for all $j \in \left\{1, \ldots, J\right\}$. Then, following the same strategy as in \cite{guerrier2016fast} (Proposition 3.1.), we obtain \begin{equation*} \sup_{\bm{\theta} \in \bm{\Theta}}\; | \widehat{Q}(\bm{\theta}) - Q(\bm{\theta})| = o_{\rm p}(1), \end{equation*} where \begin{equation*} \widehat{Q}(\bm{\theta}) := \| \mathbf{f}(\hat{\bm{\nu}}) - \mathbf{f}(\bm{\nu}(\bm{\theta}))\|_{\widehat{\bm{\Omega}}}^2\, . \end{equation*} Therefore, Theorem 2.1 of \cite{newey1994large} can be applied to obtain the consistency of $\hat{\bm{\theta}}$ thereby concluding the proof.\hfill $\blacksquare$ \vspace{0.25cm} Theorem \ref{thm:consistent} implies that any GMWFM estimator is consistent under the same conditions needed to ensure the consistency of the GMWM estimator provided that the function $\mathbf{f}(\cdot)$ is both injective (see Assumption \ref{assum:injectiviy}) and continuous (see Assumption \ref{assum:contuity:f}). Therefore, the requirements on the function $\mathbf{f}(\cdot)$ are rather mild but we shall see that this function has a more relevant impact on the asymptotic distribution of the estimator. Before introducing this result, as for the result on consistency, we first state and discuss relevant assumptions. \begin{Assumption}[Interior and Convex] \label{assum:int} The vector $\bm{\theta}_0$ is such that $\bm{\theta}_0 \in \Int(\bm{\Theta})$ and $\bm{\Theta}$ is convex. \end{Assumption} \vspace{0.4cm} \begin{Assumption}[Function Differentiability] \label{assum:cov} The function $\mathbf{f}(\cdot)$ is such that $\mathbf{f} \in \mathcal{C}^1({\rm I\!R}^J_+, \mathcal{G})$ allowing us to define \begin{equation*} \mathbf{F}(\bm{\theta}_0) := \left. \frac{\partial}{\partial \mathbf{x}^T} \; \mathbf{f}(\mathbf{x}) \right|_{\mathbf{x} = \bm{\nu}(\bm{\theta}_0)}\, . \end{equation*} Moreover, defining the matrices \begin{equation*} \bm{\Omega}^*\left[\bm{\theta}_0, \mathbf{F}\right] := \mathbf{F}(\bm{\theta}_0)^T \boxtimes \bm{\Omega}, \end{equation*} and \begin{equation*} \mathbf{A}(\bm{\theta}_0) := \left. \frac{\partial}{\partial \bm{\theta}^T} \; \bm{\nu}(\bm{\theta}) \right|_{\bm{\theta} = \bm{\theta}_0}\,, \end{equation*} then, the matrix \begin{equation*} \mathbf{H}\left[\bm{\theta}_0, \bm{\nu}, \bm{\Omega}, \mathbf{F}\right] := \mathbf{A}(\bm{\theta}_0)^T \boxtimes \bm{\Omega}^*\left[\bm{\theta}_0, \mathbf{F}\right], \end{equation*} exists and is non-singular. \end{Assumption} \begin{Assumption} \label{assum:nu:norm} The estimator $\hat{\bm{\nu}}$ has the following asymptotic distribution \begin{equation*} \sqrt{T_J}\left(\hat{\bm{\nu}}-\bm{\nu}(\bm{\theta}_0)\right) \xrightarrow[T\rightarrow\infty]{\mathcal{D}} \mathcal{N}\left(\mathbf{0},\mathbf{V}(\bm{\theta}_0)\right), \end{equation*} where $\mathbf{V}(\bm{\theta}_0) := \cov(\hat{\bm{\nu}})$ is a positive-definite symmetric matrix. \end{Assumption}\vspace{0.25cm} The topological requirements of Assumption \ref{assum:int} are quite mild although stronger than necessary. Indeed, the fact that $\bm{\theta}_0$ is required to be an interior point of the convex space $\bm{\Theta}$ is convenient (but not strictly necessary) to ensure that expansions (such as Taylor expansions) can be made between $\bm{\theta}_0$ and an arbitrary point in $\bm{\Theta}$. Assumption \ref{assum:cov} contains different requirements but what it basically requires is that the function $\mathbf{f}(\cdot)$ is differentiable in such a way that it can be used to make Taylor expansions for the purposes of demonstrating the asymptotic normality of the estimator $\hat{\bm{\theta}}$. Based on these expansions we obtain expressions that deliver the matrix $\mathbf{H}\left[\bm{\theta}_0, \bm{\nu}, \bm{\Omega}, \mathbf{F}\right]$ which needs to be positive-definite in order for the estimator to have an asymptotic variance (and hence define an asymptotic distribution). Finally, Assumption \ref{assum:nu:norm} is required for any estimator which makes use of moments (such as the AV or WV) to deliver asymptotic normality of the estimator itself. This assumption is verified under few additional conditions compared to those required for Assumption \ref{assum:consistent}, as highlighted again in \cite{percival1995estimation}, \cite{serroukh2000statistical} and, under weaker conditions, in \cite{guerrier2016fast}. Using these assumptions, we obtain the following result. \begin{Theorem} \label{thm:asym:norm} Under Assumptions \ref{assum:injectiviy} to \ref{assum:nu:norm}, the estimator $\hat{\bm{\theta}}$ has the following asymptotic distribution \begin{equation*} \sqrt{T_J}\left(\hat{\bm{\theta}}-\bm{\theta}_0\right) \xrightarrow[T\rightarrow\infty]{\mathcal{D}} \mathcal{N}\left(\mathbf{0}, \bm{\Sigma}\left[\bm{\theta}_0, \bm{\nu}, \bm{\Omega}, \mathbf{F}\right]\right), \end{equation*} where \begin{equation*} \bm{\Sigma}\left[\bm{\theta}_0, \bm{\nu}, \bm{\Omega}, \mathbf{F}\right] := \mathbf{B}\left[\bm{\theta}_0, \bm{\nu}, \bm{\Omega}, \mathbf{F}\right] \boxtimes \mathbf{V}(\bm{\theta}_0), \end{equation*} and \begin{equation*} \mathbf{B}\left[\bm{\theta}_0, \bm{\nu}, \bm{\Omega}, \mathbf{F}\right] := \mathbf{H}\left[\bm{\theta}_0, \bm{\nu}, \bm{\Omega}, \mathbf{F}\right]^{-1} \mathbf{A}(\bm{\theta}_0)^T \bm{\Omega}^*\left[\bm{\theta}_0, \mathbf{F}\right]. \end{equation*} \end{Theorem}\vspace{0.25cm} \noindent \textsc{Proof: } Let $\bm{\Theta}(T) := \left\{\mathbf{x} \in {\rm I\!R}^p \,| \,\, ||\mathbf{x} - \bm{\theta}_0 ||_2 \leq d(T) \right\}$, where $d(T) = o(1)$. Moreover, we also define $\bm{\Theta}^*(T) := \bm{\Theta} \cap \bm{\Theta}(T)$. Since $\hat{\bm{\theta}}$ is consistent by Theorem~\ref{thm:consistent} (based on Assumptions~\ref{assum:injectiviy} to \ref{assum:contuity:f}), there exists a function $d(T)$ such that \begin{equation} \begin{aligned} \hat{\bm{\theta}} :=& \underset{\bm{\theta} \in \bm{\Theta} }{\argmin} \; \| \mathbf{f}(\hat{\bm{\nu}}) - \mathbf{f}(\bm{\nu}(\bm{\theta}))\|_{\bm{\Omega}}^2\\ =& \underset{\bm{\theta} \in \bm{\Theta}^*(T)}{\argmin} \; \| \mathbf{f}(\hat{\bm{\nu}}) - \mathbf{f}(\bm{\nu}(\bm{\theta}))\|_{\bm{\Omega}}^2 + o_{\rm p}(1). \end{aligned} \label{eq:proof:as:norm} \end{equation} Within the set $\bm{\Theta}^*(T)$, which shrinks towards $\bm{\theta}_0$ as the sample size $T$ increases, we can expand $\mathbf{f}\left(\hat{\bm{\nu}}\right)$ and $\mathbf{f}\left(\bm{\nu}(\bm{\theta})\right)$ around $\bm{\nu}(\bm{\theta}_0)$ using a Taylor expansion to obtain: \begin{equation*} \begin{aligned} \mathbf{f}\left(\hat{\bm{\nu}}\right) &= \mathbf{f}\left(\bm{\nu}(\bm{\theta}_0)\right) + \mathbf{F}\left(\bm{\theta}_0\right) \left(\hat{\bm{\nu}} - \bm{\nu}(\bm{\theta}_0) \right) + o_{\rm p}(1)\\[0.2cm] \mathbf{f}\left(\bm{\nu}(\bm{\theta})\right) &= \mathbf{f}\left(\bm{\nu}(\bm{\theta}_0)\right) + \mathbf{F}\left(\bm{\theta}_0\right) \left(\bm{\nu}(\bm{\theta}) - \bm{\nu}(\bm{\theta}_0) \right) + o(1). \end{aligned} \end{equation*} Therefore, by combining this result with (\ref{eq:proof:as:norm}), we obtain \begin{equation} \begin{aligned} \hat{\bm{\theta}} &= \underset{\bm{\theta} \in \bm{\Theta}^*(T)}{\argmin} \; \| \mathbf{F}\left(\bm{\theta}_0\right) \left(\hat{\bm{\nu}} - \bm{\nu}(\bm{\theta})\right) \|_{\bm{\Omega}}^2 + o_{\rm p}(1) \\ &= \underset{\bm{\theta} \in \bm{\Theta}^*(T)}{\argmin} \; \| \hat{\bm{\nu}} - \bm{\nu}(\bm{\theta}) \|_{\bm{\Omega}^*}^2 + o_{\rm p}(1), \end{aligned} \label{eq:asymp:equivalence} \end{equation} where, similarly to the definition of Assumption \ref{assum:cov}, \begin{equation*} \bm{\Omega}^* := \bm{\Omega}^*\left[\bm{\theta}_0, \mathbf{F}\right] = \mathbf{F}\left(\bm{\theta}_0\right)^T \boxtimes \bm{\Omega}. \end{equation*} Next, we consider the following approximation of $\hat{\bm{\theta}}$, \begin{equation*} \tilde{\bm{\theta}} := \underset{\bm{\theta} \in \bm{\Theta}}{\argmin} \; \| \hat{\bm{\nu}} - \bm{\nu}(\bm{\theta}) \|_{\bm{\Omega}^*}^2. \end{equation*} Proposition 4.2 of \cite{guerrier2016fast} implies, under the current assumption framework, that \begin{equation*} \sqrt{T_J}\left(\tilde{\bm{\theta}}-\bm{\theta}_0\right) \xrightarrow[T\rightarrow\infty]{\mathcal{D}} \mathcal{N}\left(\mathbf{0},\bm{\Sigma}\left[\bm{\theta}_0, \bm{\nu}, \bm{\Omega}, \mathbf{F}\right]\right). \end{equation*} Since $\hat{\bm{\theta}} = \tilde{\bm{\theta}} + o_{\rm p}(1)$, a direct application of Slutsky's theorem allows to conclude that the above results remains true for $\hat{\bm{\theta}}$ and we obtain \begin{equation*} \sqrt{T_J}\left(\hat{\bm{\theta}}-\bm{\theta}_0\right) \xrightarrow[T\rightarrow\infty]{\mathcal{D}} \mathcal{N}\left(\mathbf{0},\bm{\Sigma}\left[\bm{\theta}_0, \bm{\nu}, \bm{\Omega}, \mathbf{F}\right]\right), \end{equation*} which concludes the proof.\hfill $\blacksquare$ \vspace{0.25cm} An implication of this result (made evident in particular from Eq. (\ref{eq:asymp:equivalence}) in the proof) is the fact that, no matter which choice is made for the function $\mathbf{f}(\cdot)$ and the matrix $\bm{\Omega}$ (provided that they satisfy the previously mentioned assumptions), we can define a matrix $\bm{\Omega}^*$ (that depends upon $\mathbf{f}(\cdot)$ and $\bm{\Omega}$) such that we can express the estimator as \begin{equation} \label{eq:argmin_eq} \hat{\bm{\theta}} := \underset{\bm{\theta} \in \bm{\Theta}}{\argmin} \; \| \hat{\bm{\nu}} - \bm{\nu}(\bm{\theta}) \|_{\bm{\Omega}^*}^2 . \end{equation} Therefore, as long as the matrix $\bm{\Omega}^* := \bm{\Omega}^*\left[\bm{\theta}_0, \mathbf{F}\right]$ is positive definite, the estimator $\hat{\bm{\theta}}$ is asymptotically normally distributed under the above assumptions and the only aspect that is affected by the change of $\bm{\Omega}^*$ is the efficiency of the resulting estimator. Consequently, the choice of a specific function $\mathbf{f}(\cdot)$ (which respects the required properties) only contributes to modifying the weighting matrix $\bm{\Omega}^*$ thereby delivering approximately the same results for any such function $\mathbf{f}(\cdot)$. The weighting matrix $\bm{\Omega}^*$ is therefore crucial to the efficiency of the estimator $\hat{\bm{\theta}}$. As shown in the corollary below, the optimal choice (in terms of asymptotic efficiency) of $\bm{\Omega}^*$ is the inverse of $\mathbf{F}(\bm{\theta}_0) \boxtimes \mathbf{V}(\bm{\theta}_0)$. Although the true $\mathbf{V}(\bm{\theta}_0)$ is unknown in practice, it can be consistently estimated by the estimator proposed in \cite{andrews1991heteroskedasticity} or with the approach discussed in \cite{guerrier2016fast}. Moreover, the corollary of Theorem \ref{thm:asym:norm} presented below shows how asymptotically optimal estimators can be constructed for the GMWFM. \begin{Corollary} \label{coro:optim} Under Assumptions \ref{assum:injectiviy} to \ref{assum:nu:norm} (i.e. the same conditions of Theorem~\ref{thm:asym:norm}), the estimator $\hat{\bm{\theta}}$ based on the function $\mathbf{f}(\cdot)$ and the matrix $\bm{\Omega}^\circ := [\mathbf{F}(\bm{\theta}_0) \boxtimes \mathbf{V}(\bm{\theta}_0)]^{-1}$ is asymptotically efficient in the class of GMWFM estimators. \end{Corollary}\vspace{0.25cm} \noindent \textsc{Proof: } Under our assumptions, it is easy to verify that the asymptotic covariance matrix of $\hat{\bm{\theta}}$ is given by \begin{equation*} \bm{\Sigma}[\bm{\theta}_0,\bm{\nu},\bm{\Omega}^\circ,\mathbf{F}] = \left[\mathbf{A}(\bm{\theta}_0)^T\boxtimes\mathbf{V}(\bm{\theta}_0)^{-1}\right]^{-1}. \end{equation*} We proceed by demonstrating that the difference between the asymptotic covariance matrix in Theorem~\ref{thm:asym:norm} and the above covariance matrix leads to a positive semi-definite matrix. Following Section 5.2 in \cite{newey1994large}, it is easy to show that \begin{align*} \bm{\Sigma}[\bm{\theta}_0,\bm{\nu},\bm{\Omega},\mathbf{F}] &- \bm{\Sigma}[\bm{\theta}_0,\bm{\nu},\bm{\Omega}^\circ,\mathbf{F}]\\ &=\mathbf{H}\left[\bm{\theta}_0, \bm{\nu}, \bm{\Omega}, \mathbf{F}\right]^{-1}\boxtimes\mathbb{E}\left[\mathbf{W}\mathbf{W}^T\right], \end{align*} where \begin{align*} \mathbf{W} := &\mathbf{A}(\bm{\theta}_0)^T \bm{\Omega}^*\left[\bm{\theta}_0, \mathbf{F}\right]\mathbf{Z}\\ - &\mathbf{H}\left[\bm{\theta}_0, \bm{\nu}, \bm{\Omega}, \mathbf{F}\right] \bm{\Sigma}[\bm{\theta}_0,\bm{\nu},\bm{\Omega}^\circ,\mathbf{F}] \mathbf{A}(\bm{\theta}_0)^T\mathbf{F}(\bm{\theta}_0)^T \bm{\Omega}^\circ \mathbf{Z}, \end{align*} and $\mathbf{Z}$ is a random vector such that $$\mathbb{E}\left[\mathbf{Z}\mathbf{Z}^T\right] = \mathbf{F}(\bm{\theta}_0) \mathbf{V}(\bm{\theta}_0) \mathbf{F}(\bm{\theta}_0)^T = \left(\bm{\Omega}^{\circ}\right)^{-1}.$$ The result follows since $\mathbb{E}\left[\mathbf{W}\mathbf{W}^T\right]$ is positive semi-definite, which concludes the proof. \hfill $\blacksquare$ \vspace{0.25cm} \iffalse \begin{Corollary} By choosing $\bm{\Omega}=\mathbf{V}(\bm\theta_0)^{-1}$:\\ (i) GMWM is asymptotically efficient.\\ (ii) ARMAV is not asymptotically efficient. \end{Corollary} \noindent \textsc{Proof: } (i) GMWM has $\mathbf{F}(\bm{\theta}_0)=\mathbf{I}_J$.\\ (ii) ARMAV has $\mathbf{F}(\bm{\theta}_0)=\nicefrac{1}{\log(10)}\mathbf{D}(\bm{\theta}_0)$ where $\mathbf{D}(\bm{\theta}_0)$ is a diagonal matrix with elements $(\nicefrac{1}{\bm{\nu}_1(\bm{\theta}_0)},\ldots,\nicefrac{1}{\bm{\nu}_J(\bm{\theta}_0)})$ in the diagonal.\hfill $\blacksquare$ \fi Corollary \ref{coro:optim} shows that, under suitable conditions, any estimator belonging to the class of GMWFM estimators can be asymptotically optimal provided that it is based on the function $\mathbf{f}(\cdot)$ and the matrix $\bm{\Omega}^\circ := [\mathbf{F}(\bm{\theta}_0)\mathbf{V}(\bm{\theta}_0) \mathbf{F}(\bm{\theta}_0)^T]^{-1}$. This implies that there exist an infinite number of possible efficient estimators (based on different functions $\mathbf{f}(\cdot)$ and matrix $\bm{\Omega}^\circ$) leading to the same optimal asymptotic covariance matrix $[\mathbf{A}(\bm{\theta}_0)^T\boxtimes\mathbf{V}(\bm{\theta}_0)^{-1}]^{-1}$. In the case where $\mathbf{f}(\mathbf{x}) = \mathbf{x}$, the matrix $\bm{\Omega}^\circ$ has the simplest expression given by $\bm{\Omega}^\circ = \mathbf{V}(\bm{\theta}_0)^{-1}$ since $\mathbf{F}(\bm{\theta}_0) = \mathbf{I}$ thereby also suggesting that its (consistent) estimation is more straightforward in practice. The choice of the function $\mathbf{f}(\mathbf{x}) = \mathbf{x}$ presents several other advantages compared to possible alternative choices. For example, this function allows the estimator to be solved analytically for various commonly used models. This is of particular importance for inertial sensor calibration as most models considered in this field allow for such a closed form solution. Indeed, suppose that there exists a matrix $\mathbf{X}$ that does not depend on $\bm{\theta}$ such that $\bm{\nu}(\bm{\theta})$ can be expressed as $\bm{\nu}(\bm{\theta}) = \mathbf{X}\, \mathbf{h}(\bm{\theta})$ for all $\bm{\theta} \in \bm{\Theta}$, where $\mathbf{h}(\cdot)$ is an injective vector-valued function such that $\mathbf{h}: \, \bm{\Theta} \mapsto \mathcal{H} \subset {\rm I\!R}^p$. This is, for example, the case for the model considered in Lemma~\ref{lem:injectivity}. Indeed, denoting $a:=\nicefrac{2\log(2)}{\pi}$, the function $\bm{\nu}(\bm{\theta})$ can be expressed as follows \begin{equation} \bm{\nu}(\bm{\theta}) = c \underbrace{\begin{bmatrix} \frac{3}{2^2} & \frac{1}{2^1} & a & \frac{2^1}{3} & 2^1\\ \frac{3}{2^4} & \frac{1}{2^2} & a & \frac{2^2}{3} & 2^3\\ \vdots & \vdots & \vdots & \vdots & \vdots\\ \frac{3}{2^{2J}} & \frac{1}{2^{J}} & a & \frac{2^J}{3} & 2^{2J-1} \end{bmatrix}}_{\mathbf{X}} \underbrace{\begin{bmatrix} Q^2\\ \sigma^2\\ B^2\\ \gamma^2\\ \omega^2 \end{bmatrix}}_{\mathbf{h}(\bm{\theta})} . \label{eq:example:linear:nu} \end{equation} Hence, taking the parameter vector defined in Lemma~\ref{lem:injectivity}, the vector-valued function $\mathbf{h}(\bm{\theta})$ is the identity for all elements except for $B$ and $\omega$ for which it is the square function. Since all parameters (are assumed to) belong to ${\rm I\!R}_+$, we have that the function $\mathbf{h}(\bm{\theta})$ is injective which, in general, allows us to write \begin{equation*} \hat{\bm{\theta}} := \underset{\bm{\theta} \in \bm{\Theta} }{\argmin} \; \| \hat{\bm{\nu}} - \bm{\nu}(\bm{\theta})\|_{\bm{\Omega}}^2 = \mathbf{h}^{-1}(\hat{\bm{\vartheta}}), \end{equation*} where \begin{equation*} \hat{\bm{\vartheta}} := \underset{\bm{\vartheta} \in \mathcal{H}}{\argmin} \; \| \hat{\bm{\nu}} - \mathbf{X} \, \bm{\vartheta}\|_{\bm{\Omega}}^2, \end{equation*} and where we let $\bm{\vartheta} := \mathbf{h}(\bm{\theta})$. Moreover, since the function $\lVert\hat{\bm{\nu}} - \mathbf{X} \, \bm{\vartheta}\rVert_{\bm{\Omega}}^2$ is differentiable in $\bm{\vartheta}$, we have \begin{equation*} \hat{\bm{\vartheta}} := \underset{\bm{\vartheta} \in \mathcal{H}}{\argmin} \; \| \hat{\bm{\nu}} - \mathbf{X} \, \bm{\vartheta}\|_{\bm{\Omega}}^2 = \underset{\bm{\vartheta} \in \mathcal{H} }{\argzero} \; \mathbf{X}^T \bm{\Omega} \left(\hat{\bm{\nu}} - \mathbf{X} \bm{\vartheta}\right). \end{equation*} Therefore, we have that \begin{equation*} \mathbf{X}^T \bm{\Omega} \mathbf{X} \hat{\bm{\vartheta}} = \mathbf{X}^T \bm{\Omega} \hat{\bm{\vartheta}} \hat{\bm{\nu}}, \end{equation*} which corresponds to the standard (weighted) least-squares equations. Under Assumption \ref{assum:cov}, $\mathbf{X}^T \bm{\Omega} \mathbf{X}$ is non-singular, we thus obtain \begin{equation} \hat{\bm{\vartheta}} = \left(\mathbf{X}^T \bm{\Omega} \mathbf{X} \right)^{-1}\mathbf{X}^T \bm{\Omega} \hat{\bm{\nu}}, \label{eq:gmwm:simple:trans1} \end{equation} which provides a simple plug-in estimator for $\bm{\theta}_0$ defined as \begin{equation} \hat{\bm{\theta}} = \mathbf{h}^{-1} \left[ \left(\mathbf{X}^T \bm{\Omega} \mathbf{X} \right)^{-1}\mathbf{X}^T \bm{\Omega} \hat{\bm{\nu}} \right]. \label{eq:gmwm:simple:trans2} \end{equation} The above closed-form solution is therefore a first advantage of choosing $\mathbf{f}(\mathbf{x}) = \mathbf{x}$. Moreover there are a few practical advantages stemming from this setting, the first of which is the fact that, given a closed form solution for this class of models, no optimization is required to compute the estimates thereby delivering computationally fast solutions. In addition, even if the model of interest contains a subset of this class of models, this closed form solution can be used as an approximate method to quickly obtain ``good'' starting values that can increase the computational efficiency of the optimization procedure required to solve Eq. \eqref{eq:f:estimator}. Finally, the above form allows to obtain the exact form of the asymptotic variance of $\hat{\bm{\vartheta}}$ up to the value of $\mathbf{V}(\bm{\theta}_0)$ (i.e. the asymptotic variance of $\hat{\bm{\nu}}$) which, using the delta method, would allow us to obtain the exact variance of $\hat{\bm{\theta}}$ for this class of models. \begin{figure*}[!h] \centering \includegraphics[width=1\textwidth]{boxplot.pdf} \caption{Empirical distribution of the estimations of the GMWM, ARMAV and AVSM approaches for the parameters of the stochastic error of an accelerometer and a gyroscope for signal lengths of $T=\num{2.5e6}$ (top row) and $T=\num{6.3e6}$ (bottom row) respectively.} \label{fig:boxplot} \end{figure*} Aside from the computational advantage of using the function $\mathbf{f}(\mathbf{x}) = \mathbf{x}$ in Eq. (\ref{eq:f:estimator}) for the class of models for which $\bm{\nu}(\bm{\theta})$ can be expressed as $\mathbf{X}\mathbf{h}(\bm{\theta})$ (or as a ``good'' starting value for other models), there is another potential advantage of using the identity function for the purposes of estimation which relates to their bias. Indeed, the standard estimators of AV or WV are unbiased (see for example~\cite{percival1995estimation}), meaning that $\mathbb{E}\left[\hat{\bm{\nu}}\right] = \bm{\nu}\left(\bm{\theta}_0\right)$ and implying that $\mathbb{E}[ \hat{\bm{\vartheta}} ] = \mathbf{h}(\bm{\theta}_0)$ based on Eq.~\eqref{eq:gmwm:simple:trans1}. A first implication of these properties is that, aside from consistency, it is possible to show that the estimates of most of the parameters in models whose theoretical WV can be expressed as $\mathbf{X}\mathbf{h}(\bm{\theta})$ are unbiased (i.e. when $\mathbf{h}_i(\cdot)$ is the identity or a linear function). However, if the theoretical WV cannot be expressed in the latter form, formal proofs to determine the finite sample behaviour of the estimators defined in Eq. (\ref{eq:f:estimator}) may be hard to derive. Nevertheless, an intuitive argument would support the employment of the function $\mathbf{f}(\mathbf{x}) = \mathbf{x}$ since it directly makes use of unbiased estimators of the WV to match their theoretical counterpart (which is a desirable property in order to achieve unbiasedness with respect to the parameter of interest $\bm{\theta}_0$). To better highlight the concepts behind the above reasoning, we compare the following two asymptotically equivalent estimators: \begin{enumerate} \item $\hat{\bm{\theta}}$ based on the choice of function $\mathbf{f}(\mathbf{x}) = \mathbf{x}$ and a (non-random) weight matrix $\bm{\Omega}_1$, \item $\tilde{\bm{\theta}}$ based on another choice of function, such that $\mathbb{E}\left[\mathbf{f} \left(\hat{\bm{\nu}}\right)\right] \neq \mathbf{f}(\mathbb{E}\left[ \hat{\bm{\nu}}\right])$ and a (non-random) weight matrix $\bm{\Omega}_2$. \end{enumerate} We then consider the expected value of the objective function of the first estimator $\hat{\bm{\theta}}$: \begin{equation*} \mathbb{E}\left[\| \hat{\bm{\nu}} - \bm{\nu}(\bm{\theta})\|_{\bm{\Omega}_1}^2\right] = \| \mathbb{E}\left[ \hat{\bm{\nu}} - \bm{\nu}(\bm{\theta})\right]\|_{\bm{\Omega}_1}^2 + \tr \left( \bm{\Omega}_1 \var \left(\hat{\bm{\nu}} \right) \right). \end{equation*} Since the second term of the above equation does not depend on $\bm{\theta}$, we let $b_1 := \tr \left( \bm{\Omega}_1 \var \left(\hat{\bm{\nu}} \right) \right)$ and we can write \begin{equation*} \begin{aligned} \mathbb{E}\left[\| \hat{\bm{\nu}} - \bm{\nu}(\bm{\theta})\|_{\bm{\Omega}_1}^2\right] &= \| \bm{\nu}(\bm{\theta}_0) - \bm{\nu}(\bm{\theta})\|_{\bm{\Omega}_1}^2 + b_1, \end{aligned} \end{equation*} since $\mathbb{E}\left[ \hat{\bm{\nu}} \right] = \bm{\nu}(\bm{\theta}_0)$. Therefore, this function is unbiased in the sense that it is minimized at the true value $\bm{\theta}_0$. Following the argument in \cite{han2006gmm}, it is therefore expected that the bias of this estimator will be large if bias in the objective function of the corresponding estimator is large. Next, we consider the objective function of the second estimator and, recalling that $\mathbf{g}(\bm{\theta}) :=\mathbf{f}(\bm{\nu}(\bm{\theta}))$ as defined in Assumption \ref{assum:contuity:f}, we define $\Delta(\bm{\theta}_0) := \mathbb{E}[\mathbf{f}(\hat{\bm{\nu}})] - \mathbf{g}(\bm{\theta}_0)$ and $b_2 := \tr \left( \bm{\Omega}_2 \var \left(\mathbf{f}(\hat{\bm{\nu}}) \right) \right)$. Using these definitions, we obtain \begin{equation*} \begin{aligned} &\mathbb{E}\left[\| \mathbf{f}(\hat{\bm{\nu}}) - \mathbf{g}(\bm{\theta})\|_{\bm{\Omega}_2}^2\right] = \| \mathbb{E}\left[\mathbf{f}(\hat{\bm{\nu}}) - \mathbf{g}(\bm{\theta}) \right]\|_{\bm{\Omega}_2}^2 + b_2\\ &\quad =\| \mathbf{g}(\bm{\theta}_0) + \Delta(\bm{\theta}_0) - \mathbf{g}(\bm{\theta}) \|_{\bm{\Omega}_2}^2 + b_2. \end{aligned} \end{equation*} Moreover, by applying the mean value theorem it is possible to assess the order of $\Delta(\bm{\theta}_0)$: \begin{equation*} \begin{aligned} \Delta(\bm{\theta}_0) &= \mathbb{E}[\mathbf{f}(\hat{\bm{\nu}})] - \mathbf{g}(\bm{\theta}_0)\\ &=\mathbb{E}[\mathbf{g}(\bm{\theta}_0) + \mathbf{F}\left(\bm{\nu}(\bm{\theta}^*)\right) \left(\hat{\bm{\nu}} - \bm{\nu}(\bm{\theta}_0) \right)] - \mathbf{g}(\bm{\theta}_0)\\ &= T_J^{-\nicefrac{1}{2}} \; \mathbb{E}[\sqrt{T_J} \mathbf{F}\left(\bm{\nu}(\bm{\theta}^*)\right) \left(\hat{\bm{\nu}} - \bm{\nu}(\bm{\theta}_0) \right)] \\ &= \mathcal{O}(T_J^{-\nicefrac{1}{2}}), \end{aligned} \end{equation*} where $\bm{\theta}^* \in \bm{\Theta}$, $\bm{\nu}(\bm{\theta}^*)$ is on the line connecting $\bm{\nu}(\bm{\theta}_0)$ and $\hat{\bm{\nu}}$, and the term $\sqrt{T_J} \mathbf{F}\left(\bm{\nu}(\bm{\theta}^*)\right) \left(\hat{\bm{\nu}} - \bm{\nu}(\bm{\theta}_0)\right)$ is $\mathcal{O}_{\rm p}(1)$ by the continuous mapping theorem and Assumption \ref{assum:consistent}. Therefore, we have \begin{equation*} \begin{aligned} &\underset{\bm{\theta} \in \bm{\Theta} }{\argmin} \; \mathbb{E}\left[\| \mathbf{f}(\hat{\bm{\nu}}) - \mathbf{g}(\bm{\theta})\|_{\bm{\Omega}_2}^2\right]\\ &\quad = \underset{\bm{\theta} \in \bm{\Theta} }{\argmin} \; \| \mathbf{g}(\bm{\theta}_0) + \Delta(\bm{\theta}_0) - \mathbf{g}(\bm{\theta}) \|_{\bm{\Omega}_2}^2, \end{aligned} \end{equation*} implying that bias of the objective function is of order $\mathcal{O}(T_J^{-1})$ and, consequently, the bias of $\tilde{\bm{\theta}}$ is also of order $\mathcal{O}(T_J^{-1/2})$. As a result, we expect estimators based on the choice $\mathbf{f}(\mathbf{x}) = \mathbf{x}$ to have relatively small biases compared to other choices of $\mathbf{f}(\mathbf{x})$. An important example is when $\mathbf{f}(\mathbf{x})$ is a convex/concave function. More specifically, using Jensen's inequality we have the following \begin{equation} \label{eq:jensen_ineq} \begin{cases} \mathbb{E}\left[\mathbf{f} \left(\hat{\bm{\nu}}\right)\right] > \mathbf{f}(\mathbb{E}\left[ \hat{\bm{\nu}}\right]) & \text{if $\mathbf{f}$ is strictly convex},\\[0.1cm] \mathbb{E}\left[\mathbf{f} \left(\hat{\bm{\nu}}\right)\right] < \mathbf{f}(\mathbb{E}\left[ \hat{\bm{\nu}}\right]) & \text{if $\mathbf{f}$ is strictly concave}.\\ \end{cases} \end{equation} \iffalse Considering the above inequalities, we can rewrite the objective function in Eq. \eqref{eq:f:estimator} as a system of $J$ equations as follows: \begin{equation} \mathbf{f} \left(\hat{\bm{\nu}}\right) = \mathbf{f} \left(\bm{\nu}\left(\bm{\theta}^*\right)\right) + \bm{\varepsilon}, \label{eq:system} \end{equation} where $\bm{\theta}^*$ is the solution for $\bm{\theta}$ to this system and the vector $\bm{\varepsilon}$ is such that $\bm{\varepsilon} \sim \mathcal{N}\left(\mathbf{0}, \bm{\Sigma}\right)$, where $\bm{\Sigma}$ is a variance matrix for the residuals $\bm{\varepsilon}$. {\color{pinegreen}\st{By the definition in Eq. }\eqref{eq:system} \st{we have that the left hand side of the inequalities discussed earlier can be re-expressed as}} {\color{pinegreen}\st{while (given the unbiased estimator $\hat{\bm{\nu}}$) the right hand side of the inequalities can be rewritten as}} {\color{pinegreen}\st{therefore delivering} Taking the expectation of Eq.~\eqref{eq:system} yields} \begin{equation*} \begin{cases} \mathbf{f}\left(\bm{\nu}\left(\bm{\theta}^*\right)\right) > \mathbf{f}(\bm{\nu}(\bm{\theta}_0)) & \text{if $\mathbf{f}(\cdot)$ is convex},\\[0.1cm] \mathbf{f}\left(\bm{\nu}\left(\bm{\theta}^*\right)\right) < \mathbf{f}(\bm{\nu}(\bm{\theta}_0)) & \text{if $\mathbf{f}(\cdot)$ is concave},\\ \end{cases} \end{equation*} {\color{pinegreen}where $\hat{\bm{\nu}}$ is supposed unbiased.} These inequalities suggest that, if the function $\mathbf{f}(\cdot)$ is either convex or concave, the solution $\bm{\theta}^*$ is not equal to the true parameter vector $\bm{\theta}_0$ thereby implying that, according to the assumed model $F_{\bm{\theta}}$, estimators that make use of non-linear (concave or convex) functions for $\mathbf{f}(\cdot)$ can be biased in finite samples with respect to the parameters of interest. \hxcomment{The inequalities yield by Eq.~\eqref{eq:system} should not be strictly unequal. Equality holds when $\mathbf{f}()$ is linear} \fi Having delivered different theoretical results and arguments comparing the use of different functions $\mathbf{f}(\cdot)$ for the purpose of parameter estimation as defined in Eq. \eqref{eq:f:estimator}, the next section performs some simulation studies where, using different models for the stochastic error of the sensors, we compare the performance of different GMWFM estimators. \section{Simulation Results} \label{sec:simulations} \begin{figure}[!h] \centering \includegraphics[width=0.49\textwidth]{summary.pdf} \caption{Comparison of estimated root mean squared error betweeen GMWM, ARMAV and AVSM approaches for the parameters of the stochastic error of an accelerometer and a gyroscope for signal lengths of $T=\num{2.5e6}$ (top row) and $T=\num{6.3e6}$ (bottom row) respectively.} \label{fig:summary} \end{figure} In this section, we compare the estimation performance (in terms of bias and variance) across two GMWFM estimators, namely the GMWM and ARMAV, as well as the standard AVSM as a reference. To carry out this comparison we make use of the parameter estimates (based on the GMWM) of the stochastic processes identified from the real calibration data coming from the X-axis accelerometer and X-axis gyroscope of a STIM-300 IMU \cite{stim}. Hence, the parameter estimates on this real calibration data were considered as being the true parameter values for simulation purposes and their values (along with the respective models) are presented in Table \ref{tab:trueParams}. For each sensor (with respective stochastic models), two Monte Carlo simulation settings were considered based on two different sample sizes (long and short) and with sampling frequency fixed at $250$ Hz. In the ``long" signal setting, the sample size was set to $T=\num{6.3e6}$, corresponding to $7$ hours of calibration data, while in the ``short" signal setting the sample size was set to $T=\num{2.5e6}$, corresponding to $2.5$ hours of calibration data. Each estimation was repeated 3000 times and the empirical distributions of the three estimators are presented in Fig. \ref{fig:boxplot}. \begin{table}[hbt] \centering \caption{True parameter values for Monte Carlo simulations} \label{tab:trueParams} \begin{tabularx}{0.5\textwidth}{*{5}{Y}} \toprule & \multicolumn{2}{c}{\bf Gyro} & \multicolumn{2}{c}{\bf Accel}\\ \cmidrule(lr){2-3} \cmidrule(l){4-5} $\bm{\theta}$ & \bf Value & \bf Units & \bf Value & \bf Units\\ \cmidrule(r){1-1} \cmidrule(lr){2-2} \cmidrule(lr){3-3} \cmidrule(lr){4-4} \cmidrule(l){5-5} QN ($Q$) & - & - & \num{1.79e-6} & m/s \\ WN ($\sigma$) & \num{1.57e-1} & deg/$\sqrt{\text{hr}}$ & \num{4.70e-2} & m/s/$\sqrt{\text{hr}}$ \\ RW ($\gamma$) & \num{1.34e0} & deg/hr/$\sqrt{\text{hr}}$ & \num{4.35e1} & m/s/hr/$\sqrt{\text{hr}}$ \\ DR ($\omega$) & - & - & \num{4.14e1} & m/s/hr/hr\\ \bottomrule \end{tabularx} \end{table} As shown in Fig. \ref{fig:boxplot}, it would appear that the GMWM approach delivers the best overall performances in terms of bias and dispersion, whereas the ARMAV and AVSM approaches have alternating performances according to the parameter of interest. When comparing the different lengths of the signals (i.e. long and short settings), we can observe that the bias and variance of all GMWFM estimators appear to marginally improve as confirmed in Fig. \ref{fig:summary} that summarizes the overall estimation performance of each approach in terms of Root Mean Square Error (RMSE). From Fig.~\ref{fig:boxplot}-\ref{fig:summary}, the GMWM displays the best performance overall (i.e. lower RMSE) across all parameters regardless of sample length. In contrast, AVSM tends to exhibit larger RMSE, especially when estimating the parameter of the quantization noise ($Q$). In the accelerometer simulations, the ARMAV remains close to the performance of the GMWM while, in the gyroscope simulations, the ARMAV shows similar performance to the AVSM when estimating the white noise parameter ($\sigma$), while all three approaches perform similarly when estimating random walk ($\gamma$). The simulation results in this section therefore seem to confirm the conclusions made in the previous sections based on the developed theoretical results. Indeed, it would appear that the choice of the function $\mathbf{f}(\mathbf{x}) = \mathbf{x}$ would be the optimal choice when considering a GMWFM estimator for the purpose of (automatic) sensor calibration. \section{Conclusions} \label{sec:conclusion} This paper discussed the properties and performance of a general class of estimators denoted as GMWFM. Being based on moment-matching techniques through the use of different functions of the WV, these estimators put forward different approaches to perform (automatic) sensor stochastic calibration. Given the variety of proposed functions that build this class of estimators, this paper analysed and proved the properties of such estimators thereby suggesting that the optimal estimator in this class is the one based on the identity function which corresponds to the GMWM. These conclusions are supported by the simulation study which consequently suggest that the GMWM should be the preferred estimator among the GMWFM estimators for the purposes of stochastic calibration of inertial sensors. \FloatBarrier \bibliographystyle{unsrt}
1,108,101,565,878
arxiv
\section{Introduction} In the last few years the area of quantum computation has gained much momentum (for a review see, e.g., ref. \cite{divincenzo}). The power of quantum computers is mainly due to the possibility of working with a superposition of $|0>$ and $|1>$ qubits with coefficients being complex numbers $\alpha $ and $\beta$, i.e. with states $|\psi>= \alpha |0>+\beta|1>$, providing an enormous number of parallel computations by the generation of a superposed state of a large number of terms. Quantum computers can do unitary transformations and (final) measurements inducing an instantaneous state reduction to $|0>$ or $|1>$ with the probability $|\alpha|^2$ or $|\beta|^2$, respectively [1]. Two of the most important achievements so far have been the discoveries of the quantum algorithms for factoring integers \cite{shor} and for the search of unstructured databases \cite{grover}, which achieve, respectively, an exponential and a square root speed up compared to their classical analogues. Another interesting algorithm exploiting the above mentioned ones in conjunction is that counting the cardinality $t$ of a given set of elements present in a flat superposition of states \cite{brassard}. In a recent work \cite{carlini}, we showed how an extended use of this counting algorithm can be further exploited to construct unitary and fully reversible operators emulating at the quantum level a set of classical probabilistic algorithms. Such classical probabilistic algorithms are characterized by the use of random numbers during the computation, and they give the correct answer with a certain probability of success, which can be usually made exponentially close to one by repetition. The quantum randomized algorithms described in ref. \cite{carlini} also naturally select the 'correct' states with a probability and an accuracy which can be made exponentially close to one in the end of the computation, and since the final measuring process is only an option which may not be used, they can be included as partial subroutines for further computations in larger and more complex quantum networks. As explicit examples, we showed how one can design polynomial time algorithms for studying some problems in number theory, such as the test of the primality of an integer, of the 'prime number theorem' and of a certain conjecture about the asymptotic number of representations of an even integer as a sum of two primes. In this paper we will use the methods of ref. \cite{carlini} to build a polynomial time quantum algorithm which checks whether a composite number $k$ is of Carmichael type. We start in section II by recalling the main definitions and properties of Carmichael numbers. In section III we describe the quantum algorithm for the test of Carmichael numbers. Section IV is devoted to the description of a quantum algorithm which counts the number of Carmichaels smaller than a given integer. Finally, we conclude in section V with some discussion on the results obtained. \section{Carmichael numbers} Carmichael numbers are quite famous among specialists in number theory, as they are quite rare and very hard to test. They are defined as composite numbers $k$ such that [7, 8] \begin{equation} a^{k-1}\equiv 1 ~~\bmod ~ k \label{uno} \end{equation} for every base $1<a<k$, $a$ and $k$ being relative coprimes, or $GCD(a, k)=1$. For later convenience, we also introduce the function $G_k(a)\equiv \Theta[GCD(a, k)]$, where $\Theta[1]=1$ and $\Theta =0$ otherwise. In particular, it can be shown that an integer $k$ is a Carmichael number if and only if $k$ is composite and the maximum of the orders of $a$ mod $k$, for every $1\leq a<k$ coprime to $k$, divides $k-1$. It then follows that every Carmichael number is odd and the product of three or more distinct prime numbers (the smallest Carmichael number is $561=3\times 11\times 17$). Recently, it has also been proven that there are infinitely many Carmichael numbers \cite{alford}. On a classical computer, it is hard to test whether a composite number $k$ is Carmichael, as it requires $O[k/\log\log k]$ evaluations of $a^{k-1} ~\bmod ~k$. In principle, there is a quite straightforward method to check whether a composite number $k$ is of the Carmichael type, provided a complete factorization of $k$ itself is known. The algorithm would use the fact that the number of bases $1<a<k$ coprime to $k$ and which satisfy eq. (\ref{uno}), i.e. for which $k$ is a pseudoprime, can be written as $F(k)=\prod_{p_i}~GCD (p_i-1, k-1)$, where the $p_i$'s are the prime factors of $k$, i.e. $k=\prod p_i^l$ [10, 11]. If $k$ is Carmichael, using Lagrange theorem one can easily show that $F(k)$ must be equal to the Euler function $\phi(k)$, which represents the number of integers smaller than $k$ and coprime with $k$. Since, given $k=\prod p_i^l$, the Euler function is also known and equal to $\phi(k)= k\prod_{p_i}(1-1/p_i)$ \cite{ribenboim}, the algorithm would only require the complete factorization of $k$ and the evaluation of $F(k)$ and $\phi(k)$. Unfortunately, since the simple use of Shor's quantum algorithm by itself does not look as an efficient tool for the full factorization of a composite integer (as it would require intermediate tests of primality, see, e.g., our comments in ref. \cite{carlini}), this method does not look much promising at present. Instead, in this paper we will describe a quantum algorithm which directly tests whether a composite number is of Carmichael type without the need of knowing a priori a complete factorization of $k$, but by counting how many bases $a$ satisfy condition (\ref{uno}). The power of the algorithm relies on a particular property of the function $F(k)$, i.e. that for an arbitrary composite integer $k$, $F$ divides $\phi$, or $F(k)=\phi(k)/m$, with $m=1, 2, ...$ (see, e.g., ref. \cite{erdos}). In particular, if $k$ is Carmichael we have $m=1$, while if $k$ is not Carmichael we have $m\geq 2$. In other words, if $k$ is Carmichael, then there are no bases $a$ which do not satisfy condition (\ref{uno}), while if $k$ is not Carmichael, then at least half of the bases $a$ satisfy this condition. It is mainly the existence of such a gap which allow us to design an efficient quantum probabilistic algorithm for the certification of Carmichael numbers. \section{Is $k$ Carmichael ?} The main idea underlying our quantum computation is the repeated use of the counting algorithm COUNT originally introduced by Brassard et al. \cite{brassard}. The algorithm COUNT makes an essential use of Grover's unitary operation $G$ for extracting some elements from a flat superposition of quantum states, and Shor's Fourier operation $F$ for extracting the periodicity of a quantum state. Grover's unitary transformation is given by $G=-WS_0WS_1$, where the Walsh-Hadamard transform $W$ is defined as \begin{equation} W|a>\equiv {1\over \sqrt{k}}\sum_{b=0}^{k-1}(-1)^{a\cdot b}|b> \label{w} \end{equation} (with $a\cdot b$ being the qubitwise product of $a$ and $b$), $S_0\equiv I-2|0><0|$ and $S_1\equiv I-2\sum_{w}|w><w|$, where $|w>$ are the searched states. Shor's operation is, instead, given by the Fourier transform\footnote{Note that $W|0>=F|0>=\sum_{a=0}^{k-1}|a>/\sqrt{k}$.} \begin{equation} F|a>\equiv {1\over \sqrt{k}}\sum_{b=0}^{k-1}e^{2i\pi ab/k}|b>. \label{f} \end{equation} The COUNT algorithm can be summarized by the following sequence of operations: \vspace{1cm} {\bf COUNT}: ~~1) $(W|0>)(W|0>)=\sum_{m=0}^{P-1}|m>\sum_{a=0}^{k-1}|a>$ ~~2) $\rightarrow (F\otimes I)[\sum_{m=0}^{P-1}|m>G^m(\sum_{a=0}^{k-1}|a>)]$ ~~3) $\rightarrow \mbox{measure} ~~|m>$. \vspace{1cm} Since the amplitude of the set of states $|w>$ after $m$ iterations of $G$ is a periodic function of $m$, the estimate of such a period by Fourier analysis and the measurement of the ancilla qubit $|m>$ will give information on the size $t$ of this set, on which the period itself depends. The parameter $P$ determines both the precision of the estimate $t$ and the computational complexity of the COUNT algorithm (which requires $P$ iterations of $G$). Our quantum algorithm uses COUNT for estimating the number $t_k\equiv \phi(k)-F(k)$ of bases for which a given composite $k$ is not pseudoprime (i.e. the number of bases comprimes to $k$ which do not satisfy condition \rf{uno}), and of $R$ ancilla qubits $|m_i>$ which will be finally measured. At first, we have to select the composite number $k$, which can be done, e.g., by use of the quantum analogue of Rabin's randomized primality test \cite{rabin} as described in ref. \cite{carlini}, and which will take only $poly(\log k)$ steps.\footnote{The quantum algorithm for primality test of a given integer $k$ counts the number ${\tilde t}_k$ of bases $1\leq a<k$ which are witnesses to the compositness of $k$, i.e. such that $W_k(a)=0$, which happens when at least one of the two conditions, $(i) ~a^{k-1} \bmod k \neq 1$ or $(ii) ~\exists ~i\in [1, m] ~/~ \gcd(a^{(k-1)/2^i}, k)\neq 1$, with $k-1\equiv 2^mn$, is satisfied (while $W_k(a)=1$ if neither (i) nor (ii) are satisfied). The algorithm exploits the gap between the number ${\tilde t}_k$ of witnesses $a$ with $W_k(a)=0$, which, for a composite number $k\equiv k_{co}$ is given by ${\tilde t}_k\geq 3(k-1)/4$ [10, 13], while for a prime number $k\equiv k_{pr}$ is given by ${\tilde t}_k=0$.} We can then proceed with the main core of the quantum Carmichael test algorithm, by starting with the state \begin{equation} |\psi_0>\equiv |0>_1....|0>_R|0>|0>, \label{4} \end{equation} act on each of the first $R+1$ qubits with a Walsh-Hadamard transform $W$, producing, respectively, the flat superpositions $\sum_{m_i=0}^{P-1}|m_i>/ \sqrt{P}$, for $i=1, ... R$, and $\sum_{a=0}^{k}|a>/\sqrt{k}$, then perform a $CTRL-NOT$ operation on the last qubit (i.e., flipping the value of this qubit) subject to the condition that the state $|a>$ is coprime with $k$, \footnote{This can be done, e.g., using a separate routine which runs on a classical computer and exploits the Euclid algorithm. Otherwise, a unitary transformation representing the $|a>$-controlled Euclid decomposition $E(a)$ can also be easily obtained by use of $l\simeq O[\log k]$ extra ancilla qubits and by building the state $|r_1\equiv k \bmod a>|r_2\equiv a\bmod r_1>|r_3\equiv r_1\bmod r_2>... |r_{l+1}\equiv r_{l-1}\bmod r_l>|r_l\bmod r_{l+1}> |E(a)\equiv\Theta[r_{l+1}]>$, where the last operation $\Theta$ is performed upon the condition that the previous ancilla qubit ($r_l \bmod r_{l+1}$) assumes the value $|0>$. The computational complexity of this quantum subroutine is polynomial in $\log k$.} and finally act on the $|a>$ qubits with an $|m_1>....|m_R>$-'controlled' Grover operation $G^m$ selecting the bases $|a>$ for which $k$ is not a pseudoprime from those for which it is a pseudoprime. We thus obtain the state \begin{eqnarray} |\psi_1>&\equiv &{\sum_{m_1=0}^{P-1}|m_1>\over \sqrt{P}}.... {\sum_{m_R=0}^{P-1}|m_R>\over \sqrt{P}} \nonumber \\ &\times & {G^{\sum_{i=1}^R m_i}\over \sqrt{k}} [\sum_{G_k=1}|a>|1>+\sum_{G_k=0}|a>|0>], \label{6} \end{eqnarray} where for $G$ we use $S_1\equiv I-2\sum_{Z_k(a)=0}|a><a|$, with the function $Z_k(a)$ defined as $Z_k(a)=0$ when condition \rf{uno} is not satisfied, and $Z_k(a)=1$ if condition \rf{uno} is satisfied. \footnote{A unitary transformation which represents the function $Z_k(a)$ can be easily performed by adding an extra ancilla qubit and building the $|a>$-controlled state $|Z_k(a)\equiv\Theta[a^{k-1}\bmod k]>$. The operator $S_1\sum_a|a>=-\sum_a(-1)^{Z_k(a)}|a>$ can then be easily realized by tensoring the states $|a>$ with the ancilla qubit $|e>\equiv [|0>-|1>]/\sqrt{2}$ and acting with $U_{Z_k(a)}:|a>|e>\rightarrow |a>|e + Z_k(a)\bmod 2>$. All the operations leading to the evaluation of $Z_k(a)$, except the last for the phase change, have to be undone again, as usual, before acting with $S_1$ and $G$.} In the following we will also assume that $P\simeq O[poly (\log k)]$, so that the steps required to compute the repeated Grover operations $G^{m_1+....+m_R}$ is polynomial in $\log k$. We then define the quantities \begin{equation} \sin\theta_k\equiv \sqrt{t_k\over k} \label{7} \end{equation} and \begin{eqnarray} k_{m_1....m_R}&\equiv & \sin [2(m_1+....+m_R)+1]\theta_k \nonumber \\ l_{m_1....m_R}&\equiv & \cos [2(m_1+....+m_R)+1]\theta_k, \label{7a} \end{eqnarray} where $t_k$ is the number of bases $a$ for which $Z_k(a)=0$, and the states \begin{eqnarray} |B_1^k>&\equiv & {1\over \sqrt{t_k}}\sum_{Z_k(a)=0}|a> \nonumber\\ |B_2^k>&\equiv & {1\over \sqrt{\phi(k)-t_k}}\sum_{Z_k(a)=1}|a>. \label{8} \end{eqnarray} Next we apply Shor's Fourier transform on each of the first $R$ ancilla qubits in order to extract the periodicity $\theta_k$ which is hidden in the amplitudes $k_{m_1....m_R}$ and $l_{m_1....m_R}$, i.e. we transform $|\psi_1>$ into \begin{eqnarray} |\psi_2>&\equiv &{\sum_{m_1, l_1=0}^{P-1}e^{2i\pi l_1m_1/P}|l_1>\over P}.... \nonumber \\ &\times &{\sum_{m_R, l_R=0}^{P-1}e^{2i\pi l_Rm_R/P}|l_R>\over P} \nonumber \\ &\times &[(k_{m_1....m_R}|B_1^k>+ l_{m_1....m_R}|B_2^k>)|1> \nonumber \\ &+&|Rest>|0>], \label{10} \end{eqnarray} where the state $|Rest>$ is the result of the operation $G^m$ acting on the bases $|a>$ which are not coprime with $k$. Finally, we perform a measurement of the last qubit. If we get $|0>$, we start again the whole algorithm from eq. \rf{4}. If, instead, we obtain $|1>$, we can proceed since eq. \rf{10} is reduced to the state (which contains only bases for which $G_k(a)=1$) \begin{eqnarray} |\psi_3>&\equiv &{1\over 2}\sum_{l_1,...l_R=0}^{P-1}|l_1>....|l_R> e^{-i\pi (l_1+....+l_R)P} \nonumber \\ &\times & \biggl [e^{i\pi f_k^{(R)}}\prod_{i=1}^{R} s_{l_i+}^{(P)}(-i|B_1^k>+|B_2^k>) \nonumber \\ &+&e^{-i\pi f_k^{(R)}}\prod_{i=1}^{R} s_{l_i-}^{(P)}(i|B_1^k>+|B_2^k>)\biggr ], \label{11} \end{eqnarray} where we have introduced the following quantities, \begin{eqnarray} f_k&\equiv & {P\theta_k\over \pi}~~~~ ;~~~~0\leq f_k\leq {P\over 2} \nonumber \\ f_k^{(R)}&\equiv & f_k\left [R+{(1-R)\over P}\right ] \label{12} \end{eqnarray} and \begin{equation} s_{l_i\pm}^{(P)}\equiv {\sin\pi (l_i\pm f_k)\over P\sin [\pi(l_i\pm f_k)/P]}. \label{13} \end{equation} It is easy to see that the probability of measuring the last qubit in eq. \rf{10} in the state $|1>$ is given by $P_{|1>}=\phi(k)/k$, which means that (using the asymptotic behaviour $\phi(k)\simeq k/\log\log k$) we require an average number $T_{av}\simeq (P_{|1>})^{-1}\simeq O[\log\log k]$ of steps to obtain eq. \rf{11}. Now, with eq. \rf{11} at hand, we can count the bases coprime with $k$ for which $k$ is not a pseudoprime. There are then two possibilities: either $k\equiv k_C$ is Carmichael, in which case $t_{k_C}=0$ and therefore $\theta_{k_C}=f_{k_C}=0$; or $k\equiv k_{NC}$ is not Carmichael, for which $t_{k_{NC}}\geq {k_{NC}}/2$ and $\theta_{k_{NC}}\geq \pi/4$, implying that $P/4\leq f_{k_{NC}}\leq P/2$. Looking at eq. (\ref{11}), we can see that, in the case when $k$ is Carmichael, $G$ effectively acts as an identity operator, so that $|\psi_3>$ simplifies to \begin{equation} |\psi_3>\rightarrow |0>_1....|0>_R|B_2^k>~~~~;~~~~ \mbox{when $k=k_C$}. \label{14} \end{equation} On the other hand, when $k$ is not Carmichael, almost all of the ancilla qubits in $|\psi_3>$ will be in a state different from $|0>_1....|0>_R$. In fact, the probability of finally measuring $|0>_1....|0>_R$ when $k$ is not Carmichael is given by \begin{eqnarray} P(|0>_1....|0>_R)\biggr |_{k_{NC}}&=& \left ({\sin \pi f_k\over P\sin {\pi f_k\over P}}\right )^{2R}\biggr |_{k_{NC}} \nonumber \\ &\equiv &(\alpha_k)^{2R}\biggr |_{k_{NC}}\leq \left ({\sqrt{2}\over P}\right )^{2R}, \label{15} \end{eqnarray} since we have that $f_{k_{NC}}\geq P/4$. The quantum algorithm is probabilistic since, if in the final measurement process of the $R$ ancilla qubits we obtain a state with {\it at least one} of the qubits different from $|0>$, we can declare with {\it certainty} that the number {\it $k$ is not Carmichael}; on the other hand, if {\it all} the ancilla qubits are in the state $|0>$, we can claim with an {\it error probability smaller than} $O[P^{-2R}]$ that the number {\it $k$ is Carmichael}. One important feature of the quantum algorithm is that clearly, if the intermediate and final measurement steps are omitted, it is unitary and fully reversible, and as such it can be used as a subroutine unitary transform inside a larger and more complicated algorithm (see next section). Another crucial feature is the existence of a {\it gap} between the cardinalities (essentially $F(k)$) of the domain of the test function $Z_k(a)$ when $k$ is Carmichael and when it is not. Finally, the computational complexity of the quantum algorithm can be written as $S_P \simeq O[kRPS_G/\phi(k)]$, with the number of steps required for $G$ given (using $P\simeq O[poly(\log k)]$) by $S_G\simeq O[poly(\log k)]$, so that we get $S_P\simeq O[R ~poly(\log k)(\log\log k)]$, which is polynomial in $\log k$. \section{Counting Carmichael numbers} One further and interesting problem in which the quantum algorithm of the previous section can be explicitly used is for the test of a conjecture by Pomerance et al. \cite{pomerance} concerning the asymptotic distribution $t_N$ of Carmichael numbers smaller than a given integer $N$, which, $\forall$ fixed $\epsilon >0$ and $\forall N>N_0(\epsilon)$, should be lower bounded by\footnote{ The existence of the upper bound $t_N|_{th}\leq O[N l(N)^{-(1-\epsilon)}]$ is proven in ref. \cite{pomerance} (see also ref. \cite{erdos2}).} \begin{eqnarray} t_N\biggl |_{th}&\geq &O\left [{N\over l(N)^{2+\epsilon}}\right ] \nonumber \\ l(N)&\equiv &N^{\log e(\log\log\log N)/(\log\log N)}. \label{a1} \end{eqnarray} The quantum algorithm (which is also discussed in more details in ref. \cite{carlini}) consists of a sub-loop which checks whether a given composite $k$ is Carmichael, by counting the bases for which it is not a pseudoprime, and a main loop which counts the number of Carmichaels smaller than $N$. In particular, we have: \vspace{1cm} {\bf MAIN-LOOP}: ~~{\it Count $\sharp \{k | k=k_C< N \}$ using {\bf COUNT} with $G\rightarrow {\tilde G}$ and $S_1\rightarrow {\tilde S}_1\equiv 1-2\sum_{k_C}|k_C><k_C|$ (parameter $Q$)} {\bf SUB-LOOP}: ~~{\it Parallel compositeness and Carmichael certification tests $\forall ~k_{co}<N$ (parameter $P$) and (approximate) construction of ${\tilde S}_1$}. \vspace{1cm} The construction of the operator ${\tilde S}_1$ in the SUB-LOOP of the algorithm first needs the selection of composites $k_{co}<N$. This is done, again, using the quantum randomized primality test described in ref. \cite{carlini}. In particular, one starts with the state \begin{eqnarray} |{\bar \psi}_0>&\equiv &{1\over \sqrt{N}}\sum_{k=1}^{N}|k-1>|0>_1|0>_2|0>_3 \nonumber \\ &\times &|0>_4|0>_G|0>_c, \label{22} \end{eqnarray} acting on the ancilla qubit $|0>_1$ with $F$ (producing the flat superposition $\sum_{m=0}^{P-1}|m>_1/\sqrt{P}$), on the ancilla qubit $|0>_2$ with a $|k-1>$-'controlled' $F$ (producing the flat superposition $\sum_{a=0}^{k-1}|a>_2/\sqrt{k}$) and an $|m>_1$-'controlled' ${\hat G}^m$ (with Grover's $\hat G$ selecting bases with $W_k(a)=0$), again with an $F$ on the $|m>_1$ ancilla qubits, then evaluating the function $[1-\Theta[m+1]]$ on the $|0>_c$ ancilla qubit, and finally undoing all the previous operations except the last one, obtaining \begin{eqnarray} |{\bar \psi}_1>&\equiv &{1\over \sqrt{N}} \biggl [\biggl (\sum_{k_{pr}}|k>|0>_{1, 2} \nonumber \\ &+&\sum_{k_{co}}|k>|C^k>_{1, 2}\biggr )|0>_c \nonumber \\ &+&\sum_{k_{co}}|k>(|0>_{1, 2}-|C^k>_{1, 2})|1>_c\biggr ]|0>_{3, 4, G}, \label{psi1} \end{eqnarray} where $|C^k>_{1, 2}$ is a correction term which has been defined in ref. \cite{carlini} and is s.t. ${~}_{1, 2}<C^k|C^k>_{1, 2}={~}_{1, 2}<C^k|0>_{1, 2}=\beta_k^2$, with $\beta_k\equiv (\sin \pi g_k)/(P\sin \pi g_k/P)$, $g_k\equiv P(\arcsin\sqrt{{\tilde t}_k/k})/ \pi$, and $\beta_{k_{pr}}=1$ ($\beta_{k_{co}}\leq 2/[\sqrt{3}P]$). Then, we proceed with the selection of Carmichael numbers among the composites $k_{co}<N$. To do so, one has to act on the $|0>_3$ qubit with $F$ (producing the flat superposition $\sum_{m=0}^{P-1}|m>_3/\sqrt{P}$), on $|0>_4$ with a $|k-1>$-'controlled' $F$ (producing the superposition of base states $\sum_{a=0}^{k-1}|a>_4/\sqrt{k}$), on $|0>_G$ with an $|a>_4$-'controlled' Euclid $E(a)$ operation (selecting the $a$ coprimes with $k$), with an $|m>_3$-'controlled' Grover transform $G^m$ on the $|a>_4$ qubits (selecting the bases for which $k$ is not a pseudoprime), followed by a Fourier transform $F$ and a phase change $S_0$ on the ancilla qubit $|m>_3$ conditioned upon the last ancilla qubit in $|{\bar \psi}_1>$ being in the state $|1>_c$, undo again the previous operations (except $S_0$, $E(a)$ and the first $F$ on $|m>_3$) and finally also undo $[1-\Theta(m+1)]$ on the $|\cdot >_c$ qubit. In this way, defining ${\tilde S}_1$ as the sequence of the all these unitary transformations, one obtains the state (see FIG. 1) \footnote{For more details over the quite straightforward but lenghty algebra leading to eq. \rf{30} see ref. \cite{carlini}.} \begin{eqnarray} {\tilde S}_1|{\bar \psi}_0>&\equiv &[(|\Psi>+|E>)|1>_G \nonumber \\ &+&|REST>|0>_G]|0>_c, \label{30} \end{eqnarray} where \begin{eqnarray} |\Psi>&\equiv & {1\over \sqrt{N}} \sum_{k=1}^{N}(-1)^{F_k}|k-1>|0>_{1, 2, 3}{\sum_{G_{k-1}=1}|a>_4\over \sqrt{k}} \nonumber \\ |E>&\equiv & {1\over \sqrt{N}}\biggl [2\sum_{k_{co, C}}|k>|C^k>_{1, 2}|0>_3 {\sum_{G_k=1}|a>_4\over \sqrt{k}} \nonumber \\ &+& \sum_{k_{co, NC}}\sin\Phi_k|k>(|C^k>_{1, 2} \nonumber \\ &-&|0 >_{1, 2})|e^k>_{3, 4}\biggr ], \label{31} \end{eqnarray} $F_{k+1}\equiv 1$ for $k=k_C$ and $F_{k+1}\equiv 0$ for $k=k_{NC}$, \begin{equation} \sin\Phi_k\equiv\sqrt{\phi(k)\over k}, \label{24b} \end{equation} $|REST>$ defines the contribution (which, together with the state $|e^k>_{3, 4}$ - with norm ${~}_{3, 4}<e^k|e^k>_{3, 4}= 4\alpha_k^2$ - we do not write here for the sake of simplicity) from the bases with $G_k(a)=0$, and the last qubit selects the contribution from the bases with $G_k(a)=1$ ($|1>_G$) or with $G_k(a)=0$ ($|0>_G$). \begin{figure}[htbp] \centerline{\epsfxsize=8.6cm \epsfbox{Carm8.eps}} \caption{The quantum network for the construction of the state ${\tilde S}_1 |{\bar \psi}_0>$. Selection of composites is done in $|{\bar \psi}_1>$, selection of Carmichaels is done in ${\tilde S}_1|{\bar \psi}_0>$. The operator $C$ is defined as $C\equiv 1- \Theta_{m+1}$.} \label{fig} \end{figure} In particular, one can show that the norm of the correction term $|E>$ in eq. \rf{31} is upper bounded by \begin{eqnarray} <E|E>&=&{4\over N}\biggl [\sum_{k_{co, C}}{\phi(k)\over k}\beta_k^2+ \sum_{k_{co, NC}}{\phi(k)\over k}(1-\beta_k^2)\alpha_k^2\biggr ] \nonumber \\ &\leq & {4\pi^2\over 3P^2}. \label{32} \end{eqnarray} Moreover, it can be shown that the overall contribution to the state \rf{31} coming from the bases $a$ for which $G_k(a)=1$ and the last ancilla qubit is in the state $|1>_G$, i.e. $|\Phi>\equiv |\Psi>+|E>$, has a norm $<\Phi|\Phi>=[\sum_{k=1}^{N}\phi(k)/k]/N\simeq \pi^2/6$. Next, Grover's transform $\tilde G$ entering the MAIN-LOOP of the algorithm, i.e. that counting the total number $t_N$ of $k_C<N$, can be written as \begin{equation} \tilde G\equiv U_2~{\tilde S}_1~~~~;~~~~U_2\equiv -W^{(k)}S_0^{(k)}W^{(k)}, \label{35} \end{equation} where now the operations $W^{(k)}$ and $S_0^{(k)}$ are acting on the states $|k>$. Then, starting from $|{\bar \psi}_0>$ given by formula (\ref{22}) and tensoring it with another flat superposition of ancilla states, i.e. \begin{equation} |{\bar \psi}_2>\equiv {1\over \sqrt{Q}}\sum_{m=0}^{Q-1}|m>_5|{\bar \psi}_0>, \label{43} \end{equation} acting on $|{\bar \psi}_0>$ with the $|m>_5$-'controlled' ${\tilde G}^m$ and with $F$ on $|m>_5$, and exploiting the linearity of the unitary transformation ${\tilde S}_1$ when acting on $|\Phi>|1>_G$ and on $|REST>|0>_G$, after some elementary algebra we get (see ref. \cite{carlini} for more details)\footnote{We omit $|0>_c$ in eq. \rf{44} for simplicity.} \begin{eqnarray} |{\bar \psi}_3>&\equiv &\biggl [{1\over 2}\sum_{n=0}^{Q-1}e^{i\pi n(1-1/Q)} |n>_5[e^{-i\pi f_Q}s^{(Q)}_{n-} \nonumber \\ &\times &(i|G>+|B>)+e^{i\pi f_Q}s^{(Q)}_{n+}(-i|G>+|B>)] \nonumber \\ &+&{1\over Q}\sum_{m, n=0}^{Q-1}e^{2i\pi mn/Q}|n>_5|E_m>\biggr ]|1>_G \nonumber \\ &+& {1\over Q}\sum_{m, n=0}^{Q-1}e^{2i\pi mn/Q}|n>_5 \nonumber \\ &\times &{\tilde G}^m|REST>|0>_G, \label{44} \end{eqnarray} where we have defined, similarly to section III, \begin{eqnarray} \sin\theta_N&\equiv & \sqrt{t_N\over N} \nonumber \\ f_Q&\equiv &{Q\theta_N\over \pi }, \label{45b} \end{eqnarray} the 'good' and 'bad' states, respectively, as \begin{eqnarray} |G>&\equiv &{\sum_{k_C}|k>|0>_{1, 2}\over \sqrt{t_N}} \nonumber \\ |B>&\equiv &{\sum_{k_{NC}}|k>|0>_{1, 2}\over \sqrt{N-t_N}}, \label{good} \end{eqnarray} the 'error' term as \begin{equation} |E_n>\equiv\sec\theta_N\left [\sum_{j=1}^n ~l_{n-j}{\tilde G}^{j-1}\right ] U_2|E>, \label{42} \end{equation} with $l_j\equiv \cos(2j+1)\theta_N$, and $s^{(Q)}_{n\pm}$ as in eq. (\ref{13}). Finally, we measure the last ancilla qubit $|\cdot >_G$. If we get $|0>_G$, we start again building the state $|{\bar\psi}_0>$ as in eq. \rf{22}. Otherwise, if we get $|1>_G$ (i.e., the part of $|{\bar\psi}_3>$ coming from the bases with $G_k(a)=1$), we can go on to the last step of the algorithm and further measure the first ancilla qubit $|\cdot >_5$ in $|{\bar\psi}_3>$. \footnote{Since the probability of measuring the last qubit in eq. \rf{44} in the state $|1>_G$ is given, this time, by ${\tilde P}_{|1>_G}=<\Phi|\Phi>$, this means that we require only an average number $T_{av}\simeq ({\tilde P}_{|1>_G})^{-1} \simeq O[1]$ of repetitions of the algorithm from eq. \rf{22} to eq. \rf{44}.} Using the expected estimate that $\theta_N\sim O[1/l(N)^{1+\epsilon/2}]$, and by choosing \begin{equation} Q\simeq O[l(N)^{\beta}]~~~~;~~~~\beta>1+\epsilon/2, \label{q} \end{equation} we get the ansatz $1< f_Q<Q/2-1$, for which it can be shown \cite{brassard} that the probability $\tilde W$ to obtain any of the states $|f_->_5$, $|f_+>_5$, $|Q-f_->_5$ or $|Q-f_+>_5$ \footnote{Where $f_-\equiv [f_Q]+\delta f$ and $f_+\equiv f_- +1$, with $0<\delta f<1$.} in the final measurement is given by \footnote{Formula \rf{46} is calculated (see ref. \cite{carlini}) from the estimate of ${\tilde W}_{E_n}$ (the contribution coming from terms in eq. \rf{44} involving $|E_n>$), using the upper bound $<E_n|E_n>\leq O[n^2] <E|E>$ and choosing $P\simeq c~Q$, with $c\gg 1$. An alternative to this choice, for reducing the 'error' probability ${\tilde W}_{E_n}$, is to repeat the counting algorithm a sufficient number of times, as done in section III.} \begin{equation} {\tilde W}\geq {8\over \pi^2}. \label{46} \end{equation} This means that with a high probability we will always be able to find one of the states $|f_{\pm}>_5$ or $|P-f_{\pm}>_5$ and, therefore, to evaluate the number $t_N$ from eq. (\ref{45b}). Since in general $f_Q$ is not an integer, the measured ${\tilde f}_Q$ will not match exactly the true value of $f_Q$, and consequently (defining ${\tilde t}_N\equiv N\sin^2{\tilde \theta}_N$, with ${\tilde \theta}_N={\tilde \theta}_N({\tilde f}_Q)$) we will have an error over $t_N$ \cite{brassard} given by \begin{eqnarray} |\Delta t_N|_{exp}&\equiv &|{\tilde t}_N-t_N|\leq\pi{N\over Q} \left [{\pi \over Q}+2\sqrt{t_N\over N}\right ] \nonumber \\ &\simeq &O\left [{N\over Q}~l(N)^{-(1+\epsilon/2)}\right ]. \label{49} \end{eqnarray} Then, if we want to check the theoretical formula for $t_N$ with a precision up to some power $\delta$, with $0<\delta\ll \epsilon$ in $l(N)$, i.e. with \begin{equation} |\Delta t_N|_{th}\simeq O[N~ l(N)^{-(2+\epsilon +\delta)}], \label{50} \end{equation} we have to impose that $|\Delta t_N|_{exp}< |\Delta t_N|_{th}$, which implies that we can take $Q$ as given by eq. \rf{q} with $\beta> 1+\epsilon/2 +\delta$.\footnote{One can further minimize the errors (i.e., boost the success probability $\tilde W$ exponentially close to one and achieve an exponential accuracy) by repeating the whole algorithm and using the majority rule \cite{brassard}.} The computational complexity of the quantum algorithm can be finally estimated as $S_Q\simeq O[QPS_G]\geq O[l(N)^{2+\epsilon +2\delta}]$, i.e. superpolynomial but still subexponential in $\log N$.\footnote{ The contribution from a single Grover's transform is $S_G\simeq O[poly(\log N)]$, which is dominated by the contribution coming from $QP\simeq c~Q^2$. Furthermore, the use of $R$ ancilla qubits, as done in section III, instead of the choice $P\simeq c~Q$, would lead to the (subexponential in $\log N$) complexity $S_Q\geq l(N)^{(1+\epsilon/2+\delta )(1+1/R)}$.} \section{Discussion} Our quantum algorithms testing and counting Carmichael numbers make essential use of some of the basic blocks of quantum networks known so far, i.e. Grover's operator for the quantum search of a database \cite{grover}, Shor's Fourier transform for extracting the periodicity of a function \cite{shor} and their combination in the counting algorithm of ref. \cite{brassard}. The most important feature of our quantum probabilistic algorithms is that the coin tossing used in the correspondent classical probabilistic ones is replaced here by a unitary and reversible operation, so that the quantum algorithm can even be used as a subroutine in larger and more complicated networks. Our quantum algorithm may also be useful for other similar tests and counting problems in number theory if there exists a classical probabilistic algorithm which somehow can guarantee a good success probability. Finally, it is known that in a classical computation one can count, by using Monte-Carlo methods, the cardinality of a set which satisfies some conditions, provided that the distribution of the elements of such a set is assumed to be known (e.g., homogeneous). One further crucial strength and novelty of our algorithm is also in the ability of efficiently and successfully solve problems where such a knowledge or regularities may not be present. \vspace{33pt} \noindent {\Large \bf Acknowledgements} \bigskip A.H.'s research was partially supported by the Ministry of Education, Science, Sports and Culture of Japan, under grant n. 09640341. A.C.'s research was supported by the EU under the Science and Technology Fellowship Programme in Japan, grant n. ERBIC17CT970007; he also thanks the cosmology group at Tokyo Institute of Technology for the kind hospitality during this work. Both authors would like to thank Prof. N. Kurokawa for helpful discussions.
1,108,101,565,879
arxiv
\section{\protect\bigskip} \section{Introduction} In the recent years, several notable progresses have been made in the research and characterization of new closed-form exactly solvable systems in quantum mechanics \cit {gomez,gomez2,gomez3,gomez4,gomez5,gomez6,quesne1,quesne,quesne2,quesne3,odake,sasaki,ho,odake2,sasaki2,dutta,grandati2,grandati3 . The obtained systems are regular rational extensions of some shape-invariant potentials \cite{cooper,Dutt,Gendenshtein} and are associated to families of exceptional orthogonal polynomials (EOP) built from the Laguerre or Jacobi classical orthogonal polynomials. In all the considered cases, the initial potentials belong to the second category (as defined in \cite{grandati}) of primary translationally shape-invariant potentials (TSIP):\ the extended potentials of the $J1$ and $J2$ series (associated to the Jacobi EOP) are obtained from the generic second category potentials (Darboux-P\"{o}schl-Teller or Scarf of the hyperbolic or trigonometric types), as for the extended potentials of the $L1,$ $L2$ and L3$ series, they are obtained from the unique exceptional second category potential which is the isotonic one. If we except the specific case of the harmonic oscillator which has been extensively treated \cit {shnol',samsonov,tkachuk,gomez,carinena,fellows,grandati2}, the solvable extensions of first category potentials have been much less studied. Refering to the classification established in \cite{grandati}, the exceptional first category primary TSIP are the one-dimensional harmonic oscillator (HO), the Morse potential and the effective radial Kepler-Coulomb (ERKC) system, whereas the generic first category primary TSIP include the trigonometric and hyperbolic Rosen-Morse potentials as well as the Eckardt potential. A general study of the possible extensions of a large number of exactly solvable potentials from the point of view of conditionally solvable potentials has been made by Junker and Roy \cite{junker}. The case of the Morse potential has been also considered by Gomez-Ullate et al \cite{gomez} who have determined the algebraic deformations of this system which are solvable by polynomials. In \cite{grandati3} we have developped a new approach which allows to generate an infinite set of regular exactly solvable extensions starting from every TSIP in a very direct and systematic way without taking recourse to any ansatz. This approach is based on a generalization of the usual SUSY partnership built from excited states. The corresponding Darboux-B\"{a cklund Transformations (DBT), which are covariance transformations for the class of Riccati-Schr\"{o}dinger (RS) equations \cite{grandati}, are based on regularized RS functions which are obtained by using discrete symmetries acting on the parameters of the considered family of potentials. Considering the isotonic oscillator, we have obtained the three infinite sets $L1$, $L2$ and $L3$ of regular rationally solvable extensions of this potential and have given a simple and transparent proof of the shape-invariance of the potentials belonging to the $L1$ and $L2$ series. In the present paper we show that the same approach can be applied to generate infinite towers of solvable rational extensions from every exceptional first category potential. As shown in \cite{grandati}, the first category primary TSIP can be reduced into a harmonic one by a change of the variable which satisfies a constant coefficient Riccati equation. The exceptional cases correspond to the cases where this equation degenerates into a linear equation or a Riccati equation with a double root in the right-hand member, namely the HO, the Morse and ERKC potentials. In this cases the bound states are expressible in terms of generalized Laguerre Polynomials (GLP) \cit {cooper,Dutt}. The paper is organized as follows. After recalling briefly the basic elements of our approach, we test its efficiency on the simple and exhaustively studied case of one-dimensional HO, retrieving very simply the results already obtained in \cit {shnol',samsonov,tkachuk,gomez,carinena,fellows,grandati2}. In the second and third parts, we treat successively the Morse and ERKC systems, building the associated towers of solvable regular extensions and characterizing their eigenstates. For the Morse potential we recover the algebraic deformations described by Gomez-Ullate et al \cite{gomez}, the extensions being not strictly isospectral to the primary potential. For the ERKC potential we obtain two disctinct regimes with respect to the value of the "angular momentum" parameter. In the first regime the extensions are strictly isospectral to the primary potential whereas in the second regime they are not. Contrarily to the case of the second category potentials the extensions of the exceptional first category potentials do not inherit of the shape invariance properties of the primary potential. \section{Darboux-B\"{a}cklund Transformations (DBT) and regular extensions} \subsection{General scheme} Consider a family of one-dimensional hamiltonians\ indexed by a multiparameter $a$ \begin{equation*} \widehat{H}(a)=-d^{2}/dx^{2}+V(x;a),\ a\in \mathbb{R}^{m},\ x\in I\subset \mathbb{R} \end{equation*} If $\psi _{\lambda }(x;a)$ is an eigenstate of $\widehat{H}(a)$ associated to the eigenvalue $E_{\lambda }(a)$, then its logarithmic derivative w_{\lambda }(x;a)=-\psi _{\lambda }^{\prime }(x;a)/\psi _{\lambda }(x;a)$, that we will call a Riccati-Schr\"{o}dinger (RS) function, satisfies a particular Riccati equation of the following form \begin{equation} -w_{\lambda }^{\prime }(x;a)+w_{\lambda }^{2}(x;a)=V(x;a)-E_{\lambda }(a). \label{edr4} \end{equation} Eq(\ref{edr4}) is called the Riccati-Schr\"{o}dinger (RS) equation \cit {grandati} for the level $E_{\lambda }(a)$. The RS function $w_{\lambda }(x;a)$ presents a simple pole at each node of the eigenstates $\psi _{\lambda }(x;a)$. It is a well-known fact that the set of general Riccati equations is invariant under the group $\mathcal{G}$ of smooth $SL(2,\mathbb{R})$-valued curves $Map(\mathbb{R},SL(2,\mathbb{R}))$ \cite{carinena2,Ramos}. The particular of Riccati-Schr\"{o}dinger equations is, as for it, preserved by a specific subset of $\mathcal{G}$. These transformations, called Darboux- \"{a}cklund Transformations (DBT), are build from any solution $w_{\nu }(x;a) $ of the initial RS equation Eq(\ref{edr4}) as \cit {carinena2,Ramos,grandati} \begin{equation} w_{\lambda }(x;a)\overset{A\left( w_{\nu }\right) }{\rightarrow }w_{\lambda }^{\left( \nu \right) }(x;a)=-w_{\nu }(x;a)+\frac{E_{\lambda }(a)-E_{\nu }(a }{w_{\nu }(x;a)-w_{\lambda }(x;a)}, \label{transfoback2} \end{equation where $E_{\lambda }(a)>E_{\nu }(a)$. Then $w_{\lambda }^{\left( \nu \right) } $ is a solution of the RS equation: \begin{equation} -w_{\lambda }^{\left( \nu \right) \prime }(x;a)+\left( w_{\lambda }^{(\nu )}(x;a)\right) ^{2}=V^{\left( \nu \right) }(x;a)-E_{\lambda }(a), \label{eqtransform} \end{equation with the same energy $E_{\lambda }(a)$ as in Eq(\ref{edr4}) but with a modified potential \begin{equation} V^{\left( \nu \right) }(x;a)=V(x;a)+2w_{\nu }^{\prime }(x;a). \label{pottrans} \end{equation} The corresponding eigenstate of $\widehat{H}^{\left( \nu \right) }(a)=-d^{2}/dx^{2}+V^{\left( \nu \right) }(x;a)$ can be written \begin{equation} \psi _{\lambda }^{\left( \nu \right) }(x;a)=\exp \left( -\int dxw_{\lambda }^{(\nu )}(x;a)\right) \sim \frac{1}{\sqrt{E_{\lambda }\left( a\right) -E_{\nu }(a)}}\widehat{A}\left( w_{\nu }\right) \psi _{\lambda }(x;a), \label{foDBT} \end{equation where $\widehat{A}\left( a\right) $ is a first order operator given by \begin{equation} \widehat{A}\left( w_{\nu }\right) =d/dx+w_{\nu }(x;a). \label{opA} \end{equation} From $V$, the DBT generates a new potential $V^{\left( \nu \right) }$ (quasi) isospectral to the original one and its eigenstates are directly obtained from those of $V$ via Eq(\ref{foDBT}). If the initial system is exactly solvable, which is the case of the translationally shape invariant potentials (TSIP), this scheme allows to build new exactly solvable potentials. Nevertheless, in general, $w_{\nu }(x;a)$ and then the transformed potential $V^{\left( \nu \right) }(x;a)$ are singular at the nodes of $\psi _{\nu }(x;a)$. For instance, if $\psi _{n}(x;a)$ ($\nu =n$) is a bound state of \widehat{H}(a)$, $V^{\left( n\right) }$ is regular only when $n=0$, that is when $\psi _{n=0}$ is the ground state of $\widehat{H}$, and we recover the usual SUSY partnership in quantum mechanics. Starting from an excited state, that is for $n\geq 1$, the transformed potential presents exactly $n$ second order poles and a priori we cannot use $A\left( w_{n}\right) $ to build a regular potential. We can however envisage to use any other regular solution of Eq(\ref{edr4}) as long as it has no zero on the considered real interval I$, even if it does not correspond to a physical state. As shown in the case of the isotonic oscillator, we can obtain such solutions by using specific discrete symmetries $\Gamma _{a}$ which are covariance transformations for the considered family of potentials. $\Gamma _{a}$ acts on the parameters of the potential and transforms the RS function of a\ physical eigenstate w_{n} $ into a non singular but unphysical RS function $v_{n}(x;a)=\Gamma _{a}\left( w_{n}(x;a)\right) $ associated to the eigenvalue $\widetilde{E _{n}(a)=\Gamma _{a}\left( E_{n}(a)\right) $. For a solvable TSIP, $w_{n}$ and $v_{n}$ are known in closed form and the regular extended potential (see Eq(\ref{pottrans}) and Eq(\ref{foDBT})) \begin{equation} \widetilde{V}^{\left( n\right) }(x;a)=V(x;a)+2v_{n}^{\prime }(x;a) \end{equation is then (quasi) isospectral to $V(x;a)$ with eigenstates given by (see Eq \ref{transfoback2})) \begin{equation} \left\{ \begin{array}{c} w_{\lambda }^{\left( n\right) }(x;a)=-v_{n}(x;a)+\frac{E_{k}(a)-\widetilde{E _{n}(a)}{v_{n}(x;a)-w_{k}(x;a)} \\ \psi _{k}^{\left( n\right) }(x;a)=\exp \left( -\int dxw_{k}^{(n)}(x;a)\right) \sim \frac{1}{\sqrt{E_{k}\left( a\right) \widetilde{E}_{n}(a)}}\widehat{A}\left( v_{n}\right) \psi _{k}(x;a \end{array \right. , \label{foext} \end{equation for the energy $E_{k}(a)$. Interestingly, such combinations of Darboux-B\"{a}cklund transformations and discrete symmetries appears as natural covariance groups for Painlev\'{e} equations \cite{Adler2}. Very recently another type of discrete symmetries have been also considered by Plyushchay et al \cite{plyushchay1,plyushchay2} in a different context. \subsection{One dimensional harmonic oscillator} To illustrate this general scheme we consider the well studied example \cit {shnol',samsonov,tkachuk,gomez,carinena,fellows,grandati2} of the 1D HO which is simplest exceptional first category TSIP. The corresponding potential with zero ground level$\ $($E_{0}(\omega )=0$) is given by \begin{equation} V(x,\omega )=\frac{\omega ^{2}}{4}x^{2}-\frac{\omega }{2}. \end{equation} Its spectrum is well known \begin{equation} E_{n}\left( \omega \right) =n\omega ;\quad \psi _{n}\left( x\right) \sim H_{n}\left( \sqrt{\omega /2}x\right) \exp \left( -\omega x^{2}/4\right) \end{equation and the corresponding RS functions $w_{n}(x)$ can be written as terminating continued fractions \cite{grandati} a \begin{equation} w_{n}(x,\omega )=w_{0}(x,\omega )+R_{n}(x,\omega ), \label{RS functions OH1} \end{equation wher \begin{equation} \left\{ \begin{array}{c} w_{0}(x,\omega )=\frac{\omega }{2}x \\ R_{n}(x,\omega )=-\frac{n\omega }{\omega x-}\Rsh ...\Rsh \frac{\left( n-j+1\right) \omega }{\omega x-}\Rsh ...\Rsh \frac{1}{x}=-\left( \log H_{n}\left( \sqrt{\omega /2}x\right) \right) ^{\prime } \end{array \right. \label{RS functions OH2} \end{equation} The unique parameter transformation which preserves the functional form V(x,\omega )$ is the $\omega $ inversio \begin{equation} \omega \overset{\Gamma _{\omega }}{\rightarrow }\left( -\omega \right) ,\left\{ \begin{array}{c} V(x;\omega )\overset{\Gamma _{\omega }}{\rightarrow }V(x;\omega )+\omega \\ w_{n}(x;\omega )\overset{\Gamma _{\omega }}{\rightarrow }v_{n}(x;\omega )=w_{n}(x;-\omega ) \end{array \right. \end{equation $v_{n}(x;\omega )$ satisfying \begin{equation} -v_{n}^{\prime }(x;\omega )+v_{n}^{2}(x;\omega )=V(x;\omega )-E_{-\left( n+1\right) }\left( \omega \right) , \label{oregpot} \end{equation that is, $E_{n}\left( \omega \right) \overset{\Gamma _{\omega }}{\rightarrow }E_{-\left( n+1\right) }\left( \omega \right) $. From Eq.(\ref{RS functions OH1}) and Eq.(\ref{RS functions OH2}) we deduce \begin{equation} v_{n}(x;\omega )=v_{0}(x;\omega )+Q_{n}(x;\omega ), \label{oregRSfct} \end{equation with \begin{equation} v_{0}(x;\omega )=-\frac{\omega }{2}x \label{oregRSfct2} \end{equation and \begin{eqnarray} Q_{n}(x;\omega ) &=&-\frac{n\omega }{\omega x+}\Rsh ...\Rsh \frac{\left( n-j+1\right) \omega }{\omega x+}\Rsh ...\Rsh \frac{1}{x} \label{oregRSfct3} \\ &=&-\left( \log H_{n}\left( i\sqrt{\omega /2}x\right) \right) ^{\prime }. \notag \end{eqnarray} Clearly, $Q_{n}(x;\omega )$ does not present any singularity on the real line, except possibly one at the origin. Indeed the terminating continued fraction has only positive terms which implies that there is no positive singularity and then, since the potential has a even parity, any singularity on the whole real axis, except one at the origin when the number $n$ of denominators is odd. This can be recovered more directly from the expression of $Q_{n}$ in terms of Hermite polynomials of imaginary argument since the Hermite polynomials have all their zeros on the real line, with a zero at the origin for odd $n$. Using the correspondence between Hermite and Laguerre polynomials given by \begin{equation} H_{n}\left( i\sqrt{\omega /2}x\right) =\left\{ \begin{array}{c} \left( -1\right) ^{m}2^{2m}m!L_{m}^{-1/2}\left( -\omega x^{2}/2\right) ,\quad n=2m \\ \left( -1\right) ^{m}2^{2m+1}m!i\sqrt{\omega /2}xL_{m}^{1/2}\left( -\omega x^{2}/2\right) ,\quad n=2m+1 \end{array \right. \label{Herm_Lag} \end{equation the regularity properties of are direct consequences of the Kienast-Lawton-Hahn theorem \cite{szego,magnus} which establishes \emph{Kienast-Lawton-Hahn's Theorem } \ \ \ \ \ Suppose that $\alpha \notin \left\{ -n,...,-1\right\} $. Then \mathit{L}_{n}^{\left( \alpha \right) }\left( z\right) $ admits \ \ \ \ \ \ \ \ \ \ \ \ 1) $n$ positive zeros if $\alpha >-1$ \ \ \ \ \ \ \ \ \ \ \ \ 2) $n+\left[ \alpha \right] +1$ positive zeros if -n<\alpha <-1$ ($\left[ \left\vert \alpha \right\vert \right] $ means the integer part of $\alpha $) \ \ \ \ \ \ \ \ \ \ \ \ 3) No positive zero if $\alpha <-n$ \ \ \ \ The number of negative zeros is always $0$ or $1$. \ \ \ \ \ \ \ \ \ \ \ \ 1) $0$ if $\alpha >-1$ \ \ \ \ \ \ \ \ \ \ \ \ 2) $0$ if $-2k-1<\alpha <-2k$ and $1$ if $-2k<\alpha <-2k+1$, with $-n<\alpha <-1$ \ \ \ \ \ \ \ \ \ \ \ \ 3) $0$ if $n$ is even and $1$ if $n$ is odd, with \alpha <-n$ \ \ \ \ Only when $\alpha \in \left\{ -n,...,-1\right\} ,$ we have a zero of $\mathit{L}_{n}^{\left( \alpha \right) }\left( z\right) $ at the origin with multiplicity $\left\vert \alpha \right\vert $. If $\alpha $ decreases through an odd value in $\left\{ -n,...,-1\right\} $, a negative zero is gained and a positive one is lost. If the crossed value is even, simultaneously two zeros,\ one negative and one positive, disappear. \ \ \ \ \ \ \ \ \ \ \ \ Applying the DBT $A\left( v_{n}\right) $ to $w_{k}$ (see Eq(\re {transfoback2})), we obtain \begin{equation} w_{k}(x;\omega )\overset{A\left( v_{n}\right) }{\rightarrow }w_{k}^{\left( n\right) }(x;\omega )=-v_{n}(x;\omega )+\frac{E_{n+1+k}(\omega )} v_{n}(x;\omega )-w_{k}(x;\omega )}, \label{backOH} \end{equation where $w_{k}^{\left( n\right) }(x;\omega )$ is an RS function at energy E_{k}(\omega )$ for the extended potential \begin{equation} V^{\left( n\right) }(x;\omega )=V(x;\omega )+2v_{n}^{\prime }(x;\omega )=V(x;\omega )-\omega +2Q_{n}^{\prime }(x;\omega ). \label{oregSUSYpart} \end{equation} We recover here the results obtained in \cit {shnol',samsonov,tkachuk,gomez,carinena,fellows,grandati2}. In particular, for $n=1$ $\widetilde{V}^{\left( 1\right) }(x)$ is the $l=1$ isotonic potential \begin{equation} V^{\left( 1\right) }(x;\omega )=V(x;\omega )-\omega +\frac{2}{x^{2}}=\frac \omega ^{2}}{4}x^{2}+\frac{2}{x^{2}}-\frac{3\omega }{2} \end{equation and for $n=2$, $V^{\left( 2\right) }(x;\omega )$ is the CPRS \cite{carinena} potential \begin{equation} V^{\left( 2\right) }(x;\omega )=\frac{\omega ^{2}}{4}x^{2}+4\omega \frac \omega ^{2}x^{2}-1}{\left( \omega ^{2}x^{2}+1\right) ^{2}}-\frac{3}{2}\omega . \end{equation} For every $n\geq 0$, $V^{\left( n\right) }(x;\omega )$ is (quasi)isospectral to $V(x;\omega )$ \begin{equation} V^{\left( n\right) }(x;\omega )\underset{iso}{\equiv }V(x;\omega ). \label{oregSUSYpart2} \end{equation and regular on the real line $\mathbb{R}$ if $n$ is even and on the positive half real line $\mathbb{R}^{+\ast }$ if $n$ is odd. To keep the same definition domain for the initial and extended potentials, we then must consider only even values of $n=2m$. The isospectrality established above is not strict. Indeed, we have clearly \begin{equation} v_{n}^{\prime }(x;\omega )+v_{n}^{2}(x;\omega )=V^{\left( n\right) }(x;\omega )-E_{-\left( n+1\right) }\left( \omega \right) , \end{equation that is, $-v_{n}(x;\omega )$ is a regular RS function for the extended potential $V^{\left( n\right) }(x;\omega )$, associated to the eigenvalue E_{-\left( n+1\right) }\left( \omega \right) <0$. Then \begin{equation} \psi _{-}^{\left( 2m\right) }(x;\omega )\sim \exp \left( +\int v_{2m}(x;\omega )dx\right) =\frac{\exp \left( -\frac{\omega x^{2}}{4}\right) }{H_{2m}\left( i\sqrt{\omega /2}x\right) }\sim \frac{\exp \left( -\frac \omega x^{2}}{4}\right) }{L_{m}^{-1/2}\left( -\omega x^{2}/2\right) } \end{equation is a physical eigenstate of $\widehat{H}^{\left( 2m\right) }$ and more precisely its fundamental state. Consequently the superpartner of the extended potential $V^{\left( 2m\right) }(x;\omega )$ is \begin{equation} \widetilde{V}^{\left( 2m\right) }(x;\omega )=V^{\left( 2m\right) }(x;\omega )-2v_{2m}^{\prime }(x;\omega )=V(x;\omega ),\ m\geq 1 \label{SUSYpartHerm} \end{equation and we recover the fact that the DBT $A\left( v_{n}\right) $ is a backward SUSY partnership. The eigenfunctions of $\widehat{H}^{\left( 2m\right) }(\omega )=-d^{2}/dx^{2}+V^{\left( 2m\right) }(x;\omega )$ corresponding to the energies $E_{k}(\omega )$ are given by \begin{equation} \psi _{k}^{\left( 2m\right) }(x;\omega )=\exp \left( -\int dxw_{k}^{(2m)}(x;a)\right) \sim \frac{1}{\sqrt{E_{2m+1+k}(\omega )}}\widehat A}\left( v_{2m}\right) \psi _{k}(x;\omega ), \end{equation that is, using Eq(\ref{Herm_Lag}) \begin{equation} \psi _{k}^{\left( 2m\right) }(x;\omega )\sim P_{\left( m,k\right) }(x)\frac \exp \left( -\omega x^{2}/4\right) }{L_{m}^{-1/2}\left( -\omega x^{2}/2\right) }, \label{foextHO} \end{equation where the polynomials \begin{equation} P_{\left( m,k\right) }(x)=\frac{1}{2}L_{m}^{-1/2}\left( -\omega x^{2}/2\right) H_{k+1}\left( \sqrt{\omega /2}x\right) +\sqrt{\omega /2 xL_{m-1}^{1/2}\left( -\omega x^{2}/2\right) H_{k}\left( \sqrt{\omega /2 x\right) \label{polHerm} \end{equation of respective degrees $2m+k+1$ constitute, with the constant $1$, an orthogonal family on the real line with respect to the weight \begin{equation} w_{m}(x)=\frac{\exp \left( -\omega x^{2}/2\right) }{\left( L_{m}^{-1/2}\left( -\omega x^{2}/2\right) \right) ^{2}}. \label{poidsHerm} \end{equation} Note that for $k=2l,\ l\in \mathbb{N}$, we have \begin{eqnarray} P_{\left( m,2l\right) }(x) &\sim &\sqrt{\omega /2}x\left( L_{m}^{-1/2}\left( -\omega x^{2}/2\right) L_{l}^{1/2}\left( \omega x^{2}/2\right) +L_{m-1}^{1/2}\left( -\omega x^{2}/2\right) L_{l}^{-1/2}\left( \omega x^{2}/2\right) \right) \\ &=&\sqrt{\omega /2}xL_{l}^{1/2}\left( \omega x^{2}/2\right) \notag \end{eqnarray where $L_{l}^{1/2}\left( z\right) $ is an EOP of the $L1$ series \cit {gomez4}. This is coherent with the fact that the 1D HO on is obtained as the singular limit at $a\rightarrow 0$ of the isotonic potential \begin{equation} V(x;\omega ,a)=\frac{\omega ^{2}}{4}x^{2}+\frac{a(a-1)}{x^{2}}-\omega \left( a+\frac{1}{2}\right) . \label{isotpot} \end{equation} For $a>0$ the presence of the centrifugal barrier restricts the definition domain to the positive half line$\ x>0$ and the energy spectrum include only the level $E_{k}\left( \omega \right) =k\omega $ associated to an even quantum number $k=2l$. The $L1$ series of rational extensions of $V(x;\omega ,a)$ is then built using DBT based on RS functions actually regularized via the same $\omega $ inversion $\Gamma _{\omega }$ that we used above for the HO \cite{grandati3}. Finally, for the odd values of the quantum number $k=2l+1$ we can write \begin{equation} P_{\left( m,2l+1\right) }(x)\sim \left( l+1\right) L_{m}^{-1/2}\left( -\omega x^{2}/2\right) L_{l+1}^{-1/2}\left( \omega x^{2}/2\right) -\sqrt \omega /2}xL_{m-1}^{1/2}\left( -\omega x^{2}/2\right) L_{l}^{1/2}\left( \omega x^{2}/2\right) . \end{equation} \section{Morse potential} The Morse potential with zero ground level ($E_{0}(a,b)=0$) is the second exceptional primary TSIP of the first category \cite{grandati}. It is given by \cite{morse,cooper,Dutt} \begin{equation} V(y;a,b)=b^{2}y^{2}-2\left( a+\frac{\alpha }{2}\right) by+a^{2},\ a,b>0 \label{potMorse} \end{equation where $y=\exp \left( -\alpha x\right) >0,\ x\in \mathbb{R}$. It possesses exactly $\left[ a\right] $ bound states ( $\left[ a\right] $ being the integer part of $a$) which are given by \begin{equation} \psi _{n}\left( x;a,b\right) \sim y^{a/\alpha -n}e^{-by/\alpha }\mathit{L _{n}^{2(a/\alpha -n)}(2by/\alpha ),\ n\in \left\{ 0,...,\left[ a\right] -1\right\} , \label{foMorse} \end{equation with the corresponding energies $E_{n}(a)=a^{2}-a_{n}^{2},$ where a_{k}=a-k\alpha $. In terms of the $y$ variable, the associated RS equation is \begin{equation} \alpha yw_{n}^{\prime }(y;a,b)+w_{n}^{2}(y;a,b)=V(y;a,b)-E_{n}\left( a\right) \end{equation \qquad and its solutions associated to the physical eigenstates Eq(\re {foMorse}) are \begin{equation} w_{n}(y;a,b)=w_{0}(y;a,b)+R_{n}(y;a,b), \label{RS functions Morse1} \end{equation wher \begin{equation} w_{0}(y;a,b)=-by+a \label{RS functions Morse2} \end{equation and \begin{eqnarray} R_{n}(y;a,b) &=&-\frac{E_{n}\left( a\right) }{a+a_{1}-2by-}\Rsh ...\Rsh \frac{E_{n}\left( a\right) -E_{j-1}\left( a\right) }{a_{j-1}+a_{j}-2by-}\Rsh ...\Rsh \frac{E_{n}\left( a\right) -E_{n-1}\left( a\right) } a_{n-1}+a_{n}-2by} \label{RS functions Morse3} \\ &=&-n\alpha +\alpha y\left( \log \mathit{L}_{n}^{2(a/\alpha -n)}(2by/\alpha )\right) ^{\prime }. \notag \end{eqnarray} The only parameters transformation under which the Morse potential Eq(\re {potMorse}) is covariant, is \begin{equation} \left( a,b\right) \overset{\Gamma _{a,b}}{\rightarrow }\left( \underset -a_{-1}}{\underbrace{-a-1}},-b\right) ,\left\{ \begin{array}{c} V(x;a,b)\overset{\Gamma _{a,b}}{\rightarrow }V(x;a,b)-E_{-1}\left( a\right) \\ w_{n}(x;a,b)\overset{\Gamma _{a,b}}{\rightarrow v_{n}(x;a,b)=w_{n}(x;-a_{-1},-b) \end{array \right. \label{discreteMorse} \end{equation where \begin{equation} \alpha yv_{n}^{\prime }(y;a,b)+v_{n}^{2}(y;a,b)=V(y;a,b)-E_{-(n+1)}\left( a\right) , \end{equation since $a_{k}\overset{\Gamma _{a,b}}{\rightarrow }a_{-(k+1)}$ and E_{n}\left( a\right) \overset{\Gamma _{a,b}}{\rightarrow a_{-1}^{2}-a_{-(n+1)}^{2}=E_{-(n+1)}\left( a\right) -E_{-1}\left( a\right) $. From Eq.(\ref{RS functions Morse2}) and Eq.(\ref{RS functions Morse3}), we deduce \begin{equation} v_{n}(x;a,b)=v_{0}(x;a,b)+Q_{n}(x;a,b), \label{MregRSfct1} \end{equation where \begin{equation} v_{0}(y,a,b)=by-a_{-1} \label{MregRSfct2} \end{equation and \begin{eqnarray} Q_{n}(y,a,b) &=&-\frac{E_{-(n+1)}\left( a\right) -E_{-1}\left( a\right) } -\left( a_{-1}+a_{-2}\right) +2by-}\Rsh ...\Rsh \frac{E_{-(n+1)}\left( a\right) -E_{-j}\left( a\right) }{-\left( a_{-j}+a_{-j-1}\right) +2by-}\Rsh ...\Rsh \frac{E_{-(n+1)}\left( a\right) -E_{-n}\left( a\right) }{-\left( a_{-n}+a_{-n-1}\right) +2by} \label{MregRSfct3} \\ &=&-n\alpha +\alpha y\left( \log \mathit{L}_{n}^{-2(a/\alpha +1+n)}(-2by/\alpha )\right) ^{\prime }. \notag \end{eqnarray} The Kienast-Lawton-Hahn's theorem ensures that for even values of $n$, Q_{n}(y,a,b)$ and then $v_{n}(x;a,b)$ are regular for every $y>0$, that is, every $x\in \mathbb{R}$. Applying the DBT $A\left( v_{n}\right) $ (see Eq \ref{transfoback2})) to $w_{k}$ gives \begin{equation} w_{k}(x;a,b)\overset{A\left( v_{n}\right) }{\rightarrow }w_{k}^{\left( n\right) }(x;a,b)=-v_{n}(x;a,b)+\frac{E_{k}\left( a\right) -E_{-(n+1)}\left( a\right) }{v_{n}(x;a,b)-w_{k}(x;a,b)}, \end{equation where $w_{k}^{\left( n\right) }(x;\omega )$ satisfies \begin{equation} -w_{k}^{\left( n\right) \prime }(x;a,b)+\left( w_{k}^{\left( n\right) }(x;a,b)\right) ^{2}=V^{\left( n\right) }(x;a,b)-E_{k}\left( a\right) , \end{equation with \begin{equation} V^{\left( n\right) }(x;a,b)=V(x;a,b)+2v_{n}^{\prime }(x;a,b)=V(y;a_{-1},b)+E_{-1}\left( a\right) -2\alpha yQ_{n}^{\prime }(y;a,b). \label{regextMorse} \end{equation \ In the following we consider the case where $n$ takes even values $n=2m$. V^{\left( 2m\right) }(x;a,b)$ is then regular on the positive half line and isospectral to $V(x;a,b)$ \begin{equation} V^{\left( 2m\right) }(x;a,b)\underset{iso}{\equiv }V(x;a,b) \end{equation} Again, as in the preceding case, the isospectrality is not strict since \ \ \ \ \ \ \ \begin{equation} v_{n}^{\prime }(x;a,b)+v_{n}^{2}(x;a,b)=V^{\left( 2m\right) }(x;a,b)-E_{-(n+1)}\left( a\right) , \end{equation that is, $-v_{n}(x;a,b)$ is a regular RS function for the extended potential $V^{\left( 2m\right) }(x;a,b)$, associated to the eigenvalue E_{-(n+1)}\left( a\right) <0$. The asymptotic behaviour of the corresponding eigenstate is \begin{equation} \psi _{-}^{\left( 2m\right) }(x;a,b)=\exp \left( +\int v_{2m}(x;a,b)dx\right) \underset{x\rightarrow \pm \infty }{\sim }e^{-\left( a+\left( 2m+1\right) \alpha \right) x}\exp \left( -\frac{b}{\alpha e^{-\alpha x}\right) \label{fondextMorse} \end{equation from which we deduce that $\psi _{-}^{\left( 2m\right) }$ is the fundamental state for $H^{\left( 2m\right) }$. The superpartner of the extended potential $V^{\left( 2m\right) }(x;a,b)=V(x;a,b)+2v_{2m}^{\prime }(x;a,b)$ is then defined as \begin{equation} \widetilde{V}^{\left( 2m\right) }(x;a,b)=V^{\left( 2m\right) }(x;a,b)+2\left( -v_{2m}^{\prime }(x;a,b)\right) =V(x;a,b),\ n\geq 1 \end{equation and the DBT $A\left( v_{2m}\right) $ is a backward SUSY partnership. We recover here the results obtained by G\'{o}mez-Ullate, Kamran and Milson \cite{gomez} in a different way. The excited physical eigenstate\ of $\widehat{H}^{\left( 2m\right) }(a,b)=-d^{2}/dx^{2}+V^{\left( 2m\right) }(x;a,b)$ at the energy E_{k}\left( a\right) ,\ k\geq 0,$ is given by (see Eq(\ref{foext})) \begin{equation} \psi _{k}^{\left( 2m\right) }(x;a,b)=\exp \left( \frac{1}{\alpha }\int d \frac{w_{k}^{\left( 2m\right) }(y;a,b)}{y}\right) \sim \frac{1}{\sqrt E_{k}\left( a\right) -E_{-(2m+1)}\left( a\right) }}\widehat{A}\left( v_{2m}\right) \psi _{k}(x;a,b). \label{foextMorse} \end{equation} Inserting Eq(\ref{MregRSfct2}), Eq(\ref{MregRSfct3}) and Eq(\ref{foMorse}) into Eq(\ref{foextMorse}) and using the following identities for GLP \begin{equation} \left\{ \begin{array}{c} \mathit{L}_{n}^{\left( \beta \right) }\left( z\right) +\mathit{L _{n-1}^{\left( \beta +1\right) }\left( z\right) =\mathit{L}_{n}^{\left( \beta +1\right) }\left( z\right) \\ z\mathit{L}_{n-1}^{\left( \beta +1\right) }\left( z\right) =(n+\beta \mathit{L}_{n-1}^{\left( \beta \right) }\left( z\right) -n\mathit{L _{n}^{\left( \beta \right) }\left( z\right) \end{array \right. \end{equation we obtain (in order to simplify the expressions we fix the $x$ scale such that $\alpha =1$) \begin{equation} \psi _{k}^{\left( 2m\right) }(x;a,b)\sim M_{a,k}^{(2m)}\left( z\right) \frac z^{a-k}e^{-z/2}}{\mathit{L}_{2m}^{-2(a+1+2m)}(-z)},\ \psi _{-}^{\left( 2m\right) }(x;a,b)\sim \frac{z^{a+1+2m}e^{-z/2}}{\mathit{L _{2m}^{-2(a+1+2m)}(-z)} \label{foextMorse2} \end{equation where $z=2by$ and \begin{equation} M_{a,k}^{(2m)}\left( z\right) =2\left( m+a+1\right) \mathit{L _{2m-1}^{-2(a+2m+1)}(-z)\mathit{L}_{k}^{2(a-k)}(z)-\left( k+1\right) \mathit L}_{k+1}^{2(a-k)}(z)\mathit{L}_{2m}^{-2(a+2m+1)}(-z) \label{polyM1} \end{equation which is a polynomial of degree $2m+k+1$ with \begin{equation} M_{a,k}^{(2m)}\left( 0\right) =-\frac{\left( 2a+2m+2\right) _{2m}\left( 2a-2k+1\right) _{k}}{(2m)!k!}, \end{equation $\left( a\right) _{n}$ being the usual Pochhammer function $\left( a\right) _{n}=a(a+1)...(a+n-1)$ \cite{magnus}$.$ From the orthonormality conditions $<\psi _{k}^{\left( 2m\right) }(x;a,b)\mid \psi _{k^{\prime }}^{\left( 2m\right) }(x;a,b)>=\delta _{k,k^{\prime }}$ we deduce that the polynomials \begin{equation} \left\{ \begin{array}{c} B_{-}^{\left( 2m\right) }\left( z,a\right) =1 \\ B_{k}^{\left( 2m\right) }\left( z,a\right) =z^{k+2m+1}M_{a,k}^{(2m)}\left( \frac{1}{z}\right) ,\ k\in \left\{ 0,...,\left[ a\right] -1\right\} \end{array \right. \end{equation are orthogonal on the positive half line with respect to the weight \begin{equation} w^{\left( 2m\right) }\left( z,a\right) =\frac{e^{-1/z}}{z^{2\left( a+2m\right) +3}\left( \mathit{L}_{2m}^{-2(a+1+2m)}(-1/z)\right) ^{2}}. \end{equation} \section{Radial effective Kepler-Coulomb} The effective radial Kepler-Coulomb (ERKC) potential with zero ground level $E_{0}(a)=0$) is the third and last exceptional primary TSIP of the first category \cite{grandati}. It is defined on the positive half line a \begin{equation} V(x;a)=\frac{a(a-1)}{x^{2}}-\frac{\gamma }{x}+V_{0}\left( a\right) ,\ \gamma >0,\ a>1 \label{potKC} \end{equation where $x>0$ and $V_{0}\left( a\right) =\gamma ^{2}/4a^{2}$. Its bound states are given by \begin{equation} \psi _{n}\left( x;a\right) =\exp \left( -\int dxw_{n}(x;a)\right) \sim x^{a}e^{-\gamma x/2a_{n}}\mathit{L}_{n}^{\left( 2a-1\right) }(\gamma x/a_{n}),\ n\geq 0, \label{foKC} \end{equation with the corresponding energies $E_{n}(a)=V_{0}\left( a\right) -V_{0}\left( a_{n}\right) $, where $a_{k}=a+k.$ The associated RS equation i \begin{equation} -w_{n}^{\prime }(x;a)+w_{n}^{2}(x;a)=V(x;a)-E_{n}\left( a\right) \label{RSeqMorse} \end{equation} The solutions of eq(\ref{RSeqMorse}) corresponding to the physical eigenstates are given by \begin{equation} w_{n}(x;a)=w_{0}(x;a)+R_{n}(x;a), \label{RS functions KC1} \end{equation where \begin{equation} w_{0}(x;a)=-\frac{a}{x}+\gamma /2a \label{RS functions KC2} \end{equation and \begin{eqnarray} R_{n}(y;a) &=&-\frac{E_{n}\left( a\right) }{w_{0}(x;a)+w_{0}(y;a_{1})-}\Rsh ...\Rsh \frac{E_{n}\left( a\right) -E_{j-1}\left( a\right) } w_{0}(x;a_{j-1})+w_{0}(x;a_{j})-}\Rsh ...\Rsh \frac{E_{n}\left( a\right) -E_{n-1}\left( a\right) }{w_{0}(x;a_{n-1})+w_{0}(x;a_{n})} \label{RS functions KC3} \\ &=&\frac{\gamma }{2a_{n}}-\frac{\gamma }{2a}-\left( \log \left( \mathit{L _{n}^{\left( 2a-1\right) }(\gamma x/a_{n})\right) \right) ^{\prime }. \notag \end{eqnarray} The only covariance transformation for the ERKC potentials is given by \begin{equation} a\overset{\Gamma _{a}}{\rightarrow }\underset{-a_{-1}}{\underbrace{1-a} ,\left\{ \begin{array}{c} V(x;a)\overset{\Gamma _{a}}{\rightarrow }V(x;a)-E_{-1}\left( a\right) \\ w_{n}(x;a)\overset{\Gamma _{a}}{\rightarrow }v_{n}(x;a)=w_{n}(x;-a_{-1}) \end{array \right. \label{discreteKC} \end{equation with \begin{equation} a_{k}\overset{\Gamma _{a}}{\rightarrow }1-a+k=-a_{-\left( k+1\right) },\quad E_{n}\left( a\right) \overset{\Gamma _{a}}{\rightarrow }\gamma ^{2}/4\left( \frac{1}{a_{-1}^{2}}-\frac{1}{a_{-(n+1)}^{2}}\right) =E_{-(n+1)}\left( a\right) -E_{-1}\left( a\right) . \end{equation} We then have \begin{equation} -v_{n}^{\prime }(x;a)+v_{n}^{2}(x;a)=V(x;a)-E_{-(n+1)}\left( a\right) \end{equation and from Eq.(\ref{RS functions KC2}) and Eq.(\ref{RS functions KC3}), we deduce \begin{equation} v_{n}(x;a)=v_{0}(x;a)+Q_{n}(x;a), \label{KCregRSfct1} \end{equation where \begin{equation} \left\{ \begin{array}{c} v_{0}(x,a)=\frac{a_{-1}}{x}-\frac{\gamma }{2a_{-1}} \\ Q_{n}(x,a)=-\frac{\gamma }{2a_{-(n+1)}}+\frac{\gamma }{2a_{-1}}-\left( \log \left( \mathit{L}_{n}^{\left( 1-2a\right) }(-\gamma x/a_{-(n+1)})\right) \right) ^{\prime } \end{array \right. \label{KCregRSfct2} \end{equation} If the argument of the GLP is positive, that is, if $a<n+1$ the Kienast-Lawton-Hahn theorem ensures that $Q_{n}(x,a)$ is regular for $x>0$ if $1-2a<-n$, that is, if \begin{equation} \frac{n+1}{2}<a<n+1 \end{equation} Another possibility to ensure the regularity of $Q_{n}(x,a)$ is to consider values of $a$ such that $a>n+1,$ where the argument of the GLP is now negative. From the Kienast-Lawton-Hahn theorem, we then deduce that in this case for each even value of $n=2m$, $Q_{2m}(x,a)$ is regular. The DBT $A\left( v_{n}\right) $ applied to $w_{k}$ gives \begin{equation} w_{k}(x;a)\overset{A\left( v_{n}\right) }{\rightarrow }w_{k}^{\left( n\right) }(x;a)=-v_{n}(x;a)+\frac{E_{k}\left( a\right) -E_{-(n+1)}\left( a\right) }{v_{n}(x;a)-w_{k}(x;a)}, \label{DBTKC} \end{equation where $w_{k}^{\left( n\right) }(x;a)$ satisfies \begin{equation} -w_{k}^{\left( n\right) \prime }(x;a)+\left( w_{k}^{\left( n\right) }(x;a)\right) ^{2}=V^{\left( n\right) }(x;a)-E_{k}\left( a\right) , \end{equation with \begin{equation} V^{\left( n\right) }(x;a)=V(x;a)+2v_{n}^{\prime }(x;a)=V(x;a_{-1})+E_{-1}\left( a\right) +2Q_{n}^{\prime }(x;a). \end{equation} In the cases where \begin{equation} \left\{ \begin{array}{c} \frac{n+1}{2}<a<n+1\ \qquad \quad \quad \left( i\right) \\ n=2m,\quad a>n+1,\text{ \ \ \ \ \ }\left( ii\right \end{array \right. \label{condregKC} \end{equation \ $V^{\left( n\right) }(x;a)$ is regular on the positive half line and isospectral to $V(x;a)$ \begin{equation} V^{\left( n\right) }(x;a)\underset{iso}{\equiv }V(x;a). \end{equation} We have also \begin{equation} v_{n}^{\prime }(x;a)+v_{n}^{2}(x;a)=V^{\left( n\right) }(x;a)-E_{-(n+1)}\left( a\right) , \end{equation that is, $-v_{n}(x;a)$ is a regular RS function for the extended potential V^{\left( n\right) }(x;a)$, associated to the eigenvalue $E_{-(n+1)}\left( a\right) <0$, when $\frac{n+1}{2}<a$. Moreover \begin{equation} \psi _{-}^{\left( n\right) }(x;a)=\exp \left( +\int v_{n}(x;a)dx\right) \sim \frac{x^{a-1}\exp \left( -\frac{\gamma }{2a_{-\left( n+1\right) }}x\right) } \mathit{L}_{n}^{\left( 1-2a\right) }(-\gamma x/a_{-\left( n+1\right) })}. \label{fondextKC} \end{equation} In the case $(ii)$ (see Eq.(\ref{condregKC})), $a_{-\left( 2m+1\right) }>0$ and $\psi _{-}^{\left( 2m\right) }$ is a physical eigenstate for $\widehat{H ^{\left( 2m\right) }$ with the lowest eigenvalue. In other words, $\psi _{-}^{\left( 2m\right) }$ is the fundamental state for $\widehat{H}^{\left( 2m\right) }$ and, as for the two preceding exceptional primary TSIP of the first category, the isospectrality is not strict. On the other hand, in the case $(i)$ ($a_{-\left( n+1\right) }<0$), $\psi _{-}^{\left( n\right) }$ is not in the physical spectrum and in this regime the isospectrality between \widehat{H}^{\left( n\right) }$ and $\widehat{H}$ becomes strict. Consider first the case $\left( ii\right) $. The superpartner of the extended potential $V^{\left( 2m\right) }(x;a)=V(x;a)+2v_{2m}^{\prime }(x;a)$ is given by \begin{equation} \widetilde{V}^{\left( 2m\right) }(x;a)=V^{\left( 2m\right) }(x;a)+2\left( -v_{2m}^{\prime }(x;a)\right) =V(x;a) \end{equation and the DBT $A\left( v_{2m}\right) $ corresponds to a backward SUSY partnership. The fundamental eigenstate of $\widehat{H}^{\left( 2m\right) }(a)=-d^{2}/dx^{2}+V^{\left( 2m\right) }(x;a)$ at the energy E_{-(2m+1)}\left( a\right) $ is \begin{equation} \psi _{-}^{\left( 2m\right) }(x;a)\sim \frac{x^{a-1}\exp \left( -\frac \gamma x}{2\left\vert a_{-\left( 2m+1\right) }\right\vert }\right) }{\mathit L}_{2m}^{\left( 1-2a\right) }(-\gamma x/\left\vert a_{-(2m+1)}\right\vert )} \end{equation and the excited eigenstates at energy $E_{k}\left( a\right) ,\ k\geq 0$ are (see Eq.(\ref{foDBT})) \begin{equation} \psi _{k}^{\left( 2m\right) }(x;a)\sim \frac{1}{\sqrt{E_{k}\left( a\right) -E_{-(2m+1)}\left( a\right) }}\widehat{A}\left( v_{2m}\right) \psi _{k}(x;a), \label{foextKC} \end{equation that is, \begin{equation} \psi _{k}^{\left( 2m\right) }(x;a)\sim x^{a-1}e^{-\gamma x/2a_{k}}\frac N_{a,k}^{(2m)}\left( x\right) }{\mathit{L}_{2m}^{\left( 1-2a\right) }(-\gamma x/\left\vert a_{-(2m+1)}\right\vert )}, \label{foextKC1} \end{equation where \begin{eqnarray} N_{a,k}^{(n)}\left( x\right) &=&\left( 1-2a\right) \mathit{L}_{k}^{\left( 2a-1\right) }(\gamma x/a_{k})\mathit{L}_{n}^{\left( 1-2a\right) }(-\gamma x/a_{-(n+1)}) \\ &&+\left( a-\frac{n+1}{2}\right) \mathit{L}_{k}^{\left( 2a-1\right) }(\gamma x/a_{k})\mathit{L}_{n}^{\left( -2a\right) }(-\gamma x/a_{-(n+1)})+\left( a \frac{k-1}{2}\right) \mathit{L}_{k}^{\left( 2a-2\right) }(\gamma x/a_{k} \mathit{L}_{n}^{\left( 1-2a\right) }(-\gamma x/a_{-(n+1)}) \notag \\ &&+\frac{k+1}{2}\mathit{L}_{k+1}^{\left( 2a-1\right) }(\gamma x/a_{k} \mathit{L}_{n}^{\left( 1-2a\right) }(-\gamma x/a_{-(n+1)})-\frac{n+1}{2 \mathit{L}_{k}^{\left( 2a-1\right) }(\gamma x/a_{k})\mathit{L}_{n+1}^{\left( -2a\right) }(-\gamma x/a_{-(n+1)}), \notag \end{eqnarray is a polynomial of degree $n+k+1$. From the orthonormality condition of the eigenstates of $\widehat{H}^{\left( 2m\right) }(a)$ we obtain that the function\bigskip s $C_{-}^{\left( 2m\right) }\left( x,a\right) =1$ an \begin{equation} C_{k}^{\left( 2m\right) }\left( x,a\right) =e^{-\gamma x/2a_{k}}N_{a,k}^{(2m)}\left( x\right) ,\ k\geq 0, \end{equation constitute an orthogonal family on the positive half line with respect to the weight \begin{equation} w^{\left( 2m\right) }\left( x,a\right) =\frac{x^{2\left( a-1\right) }} \left( \mathit{L}_{2m}^{\left( 1-2a\right) }(-\gamma x/\left\vert a_{-(2m+1)}\right\vert )\right) ^{2}}. \end{equation} In the case $(i)$, the situation is quite different since the ground state of $V^{\left( n\right) }$ is associated to the RS function $w_{0}^{\left( n\right) }(x;a)$ and the superpartner of the extended potential $V^{\left( n\right) }(x;a)$ is now given by \begin{equation} \widetilde{V}^{\left( n\right) }(x;a)=V^{\left( n\right) }(x;a)+2w_{0}^{\left( n\right) \prime }(x;a),\ n\geq 0, \label{SUSYpartextKC} \end{equation as for the $L1$ and $L2$ extensions of the isotonic oscillator \cit {grandati3} but in the ERKC case $V^{\left( n\right) }$ does not inherit of the shape invariance properties of the initial TSIP. The physical eigenstates for the energies $E_{k}\left( a\right) ,\ k\geq 0$ of $\widehat{H}^{\left( n\right) }(a)$ are given by \begin{equation} \psi _{k}^{\left( n\right) }(x;a,\gamma )\sim x^{a-1}e^{-\gamma x/2a_{k} \frac{N_{a,k}^{(n)}\left( x\right) }{\mathit{L}_{n}^{\left( 1-2a\right) }(\gamma x/\left\vert a_{-(n+1)}\right\vert )}, \end{equation and the function\bigskip s \begin{equation} C_{k}^{\left( n\right) }\left( x,a\right) =e^{-\gamma x/2a_{k}}N_{a,k}^{(n)}\left( x\right) ,\ k\geq 0, \end{equation are orthogonal on the positive half line with respect to the weight \begin{equation} w^{\left( n\right) }\left( x,a\right) =\frac{x^{2\left( a-1\right) }}{\left( \mathit{L}_{n}^{\left( 1-2a\right) }(\gamma x/\left\vert a_{-(n+1)}\right\vert )\right) ^{2}}. \end{equation} \section{Conclusion} In this paper we have shown that the method previously developed for the isotonic potential \cite{grandati3}, can be used to generate in a direct and systematic way the solvable regular rational extensions for all the exceptional first category TSIP. This approach is based on DBT transformations built from excited states RS functions regularized via the use of discrete symmetries of the initial potential. The results are quite different from those obtained for the isotonic oscillator (which is the unique exceptional second category TSIP). Each exceptional first category TSIP admits only one series of regular rational extensions. Generally, as for the $L3$ series of rational extensions of the isotonic potential, it can be obtained only from regularized excited states associated to even quantum numbers and the DBT can be viewed as a backward SUSY partnership. The isospectrality is not strict and the spectrum of the extended potential presents a supplementary lower level. The ERKC potential constitutes an exception since extended potentials can be also obtained from regularized excited states RS functions associated to odd quantum numbers for some range of values of the "angular momentum" parameter $a$. They are in this case strictly isospectral to the original potential. \section{Acknowledgments} I would like to thank A.\ B\'{e}rard, R.\ Milson and C.\ Quesne for stimulating discussions and very interesting suggestions.
1,108,101,565,880
arxiv
\section{INTRODUCTION} Control and planning are two key ingredients of autonomous robotic applications. Planning algorithms, in a broad sense, provide the agents with feasible trajectories given the constraints. On the other hand, control algorithms compute and execute the instantaneous low-level control signals to follow these trajectories. Ideally, feedback controllers are desired due to their capability to account for the uncertainties inherent in the modeling and measurement processes. However, constraints relating to the state, control and other environmental factors stipulate the use of optimization methods and optimal control theories. The motion, actuator and environmental constraint equations encountered in optimization applications are rarely linear and convex, which are the holy grail for optimization theory. Aside from some special cases, most nonlinear differential equations do not have explicit analytical solutions \cite{jordan1999nonlinear}. For optimal control problems with nonlinear dynamics, one can transform the optimization problem into nonlinear programming by using direct transcription methods \cite{betts2010practical}. Nonetheless, nonlinear differential equations are sensitive to the initial conditions along with other associated complexities which can cause the solutions to diverge \cite{mao2018successive, mao2019convexification}. One effective strategy to solve the nonlinear and non-convex trajectory optimization problems is to relax the problem structure by approximating the non-linear dynamics and constraints by convexification methods for real-time performance. The resulting convex optimal control problems can be efficiently solved in polynomial time thanks to the availability of state of the art convex solvers \cite{domahidi2013ecos, scs} and the increasing performance of modern computational hardware. A special variant of successive convexification methods (SCvx) \cite{mao2016successive, mao2018successive, mao2019convexification} have bolstered interest for solving non-convex optimal control problems in aerospace applications. There is a growing body of research showing flexible implementations of optimal control and trajectory generation for challenging motion and environment constraints. This is due to their theoretic convergence properties \cite{mao2016successive, mao2018successive, mao2019convexification} and real-time solvability by on-board computers. These algorithms can handle non-convex state and control constraints with guaranteed convergence to nonlinear dynamics in continuous time. It has been demonstrated that sequential convexification warrants a super-linear convergence rate, owing to its constraint satisfaction property through the iterated solutions \cite{szmuksuccessive, reynolds2019dual}. The synergy of the conic optimization solvers, convexification methods and the developments in hardware afford a solution for real-time trajectory optimization with complex state and control constraints. The resultant trajectories are feasible and locally optimal. Locally optimal and feasible trajectories are desirable due to the complications in obtaining a global optimal solution for nonlinear differential equations with real-time performance requirements. In this paper, we investigate the use of SCvx algorithms \cite{mao2016successive} for autonomous vehicle motion planning and control problem formulation, implementation and simulations. This paper is the first systematic application of SCvx algorithms from the aerospace literature to the autonomous driving problems. The SCvx algorithms are different than the classical Sequential Convex Programming (SCP) methods with additional mechanisms that ensure the strong guaranteed convergence which is not addressed in the classical SCP applications. In particular we formulate a full planner which gives a speed and path trajectory for autonomous driving as successive convexification problems and can be used as either a MPC controller or full planner. This improves on the frequently used iterative MPC methods by ensuring recursive feasibility and constraint satisfaction at each iteration, with proven theoretical convergence. We give the details on the use of SCVx for MPC application to path tracking and speed regulation for autonomous vehicles. Additionally, borrowing from the recent aerospace literature, we show an implementation of logical state-triggered constraints for an evasion maneuver in the simulations section. We structure this paper in five sections. After the introduction, we describe successive convexification briefly in Section~\ref{sec:sc}. The vehicle models and application of the SCvx methods are given in Section~\ref{sec:models}. We detail the formal definitions of the optimization problems in Section~\ref{sec:opt}. The simulation results are discussed in Section~\ref{sec:simres}. The paper is concluded in the Section~\ref{sec:conc}. \section{Successive Convexification - Current Theme} \label{sec:sc} SCvx algorithms were first presented in \cite{mao2016successive} for nonlinear motion equations and extended to handle non-convex state and control constraints in \cite{mao2017successive, mao2018successive}. The main idea is based on converting the nonlinear and non-convex equations to their convex counterparts and solving the convex sub-problems sequentially. At each iteration, the non-linear equations are linearized around the previous trajectory and non-convex constraints are transformed to convex constraints. Each of the sub-problems is solved to full optimality. Similar approaches have been proposed in the literature for autonomous vehicles. In \cite{carvalho2013predictive, gray2013integrated}, an iterative linearization method for Model Predictive Controllers (MPC) was proposed to generate the lateral and longitudinal motion control commands for highly nonlinear vehicle dynamics equations. Iterative linearization and more general SCP methods work well in practice, however, the solvers might halt and give an error for infeasible solutions over the iterations even though the original problem might have a feasible solution. This behavior is called artificial infeasibility \cite{mao2016successive}. The source of this infeasibility might be coming from either linearization errors and related constraint violations or an initial infeasible trajectory. As an example to the infeasibility caused by linearization is the linearization of the system equations for an unrealistically too small \cite{mao2019convexification} for which no feasible input exists. The other common problem in convexification is the artificial unboundedness associated which is also associated with linearization. In the literature, these problem are solved by introducing a constant radius trust region for each time step on the control variables \cite{carvalho2013predictive, gray2013integrated, alrifaee2018real}. To alleviate the constraint violation, hard constraints are converted to soft constraint on the decision variables in every constraint equations except nonlinear dynamics. However, a feasible initial solution for the system dynamics equations is still required to proceed in the iterations. Therefore, soft constraints on the boundary equations partially alleviate the infeasibility problems. In SCvx algorithms, these problems are handled by scheduling a trust-region and adding a virtual control signal to the equations eliminating. These simple and effective interventions prevent artificial infeasibility and artificial unboundedness \cite{mao2018successive, mao2019convexification, mao2018tutorial}. The trust region size is adjusted during the iterations which contributes to the guaranteed convergence property. It is also is an improvement over the constant trust region approach seen in the iterative linearization methods. Trust-region scheduling solves the artificial unboundedness. Meanwhile, the virtual control input makes the solution trajectories temporarily one-step reachable in the convex sub-problems, thus preventing the artificial infeasibility. Introducing a virtual control input to the optimization problem brings more general approach to handling infeasibility than adding a slack variable to each constraint equation. The virtual control term, independently from the original control, acts as a soft constraint on the state equations moving the states to temporarily to a reachable region. On the other hand, the convergence of SCP algorithms used in the autonomous driving applications do not report a general convergence property to the nonlinear solution. One of the major contribution of the SCvx algorithms is the strong convergence property which is a relatively new development in this direction. In the SCP applications, the convergence is not usually addressed and if so, weak convergence is provided by heuristic arguments. The general strong convergence property one of the most desired aspect of solution for the real-time applications \cite{mao2016successive, mao2018successive}. One more clear distinction of the SCvx algorithms from the classical SCP methods is the integration of the system differential equations. In the SCP literature for autonomous vehicles, the equations are integrated using a constant step size, i.e using Euler integration with a single shooting propagation. In contrast, SCvx solves the integral equation between the time-steps using more accurate integration methods additionally dividing the integration into sub-intervals. The integration step is executed in a multiple shooting manner which also contributes the general convergence property of the SCvx algorithms. The differences of the SCvx algorithms over the conventional SCP methods are summarised in the following table. \begin{table}[] \caption{Distinctions of SCvx over General SCP Methods} \label{tab:my-table} \begin{tabular}{lcc} \cline{2-3} & Generic SCP Algorithms & SCvx \\ \hline Trust Region & \checkmark (constant) & \checkmark (scheduled -dynamic) \\ \hline Virtual Force & - & \checkmark \\ \hline Integration & single step & multi-step \\ \hline Propagation & single shooting & multiple shooting \\ \hline Convergence & rarely addressed & strong, guaranteed \\ \hline \end{tabular} \end{table} The SCvx algorithm and its variants have been implemented for various aerospace problems, from powered descent \cite{szmuk2018successive, reynolds2019state} to spacecraft rendezvous \cite{malyuta2019fast}, stochastic motion planning \cite{ridderhof2019minimum, vinod2018stochastic} and many others together with the application of state-triggered constraints \cite{szmuksuccessive}. In the thesis by Szmuk \cite{szmuksuccessive}, promising computational performance is reported for a range of problem configurations. In \cite{szmuk2017convexification} agile maneuvers of a quad-rotor were demonstrated with state-triggered obstacle constraints. Integer constraints are handled with successive convexification in \cite{malyuta2019fast} for fast trajectory generation. In this paper we follow the footsteps of aerospace literature to demonstrate the application of SCvx to autonomous driving tasks. \section{Autonomous Vehicle Applications} \label{sec:models} The race for launching the first commercially viable autonomous vehicle has been an ongoing effort in the vehicle industry. Autonomous vehicles are required to handle many tasks without human supervision. Motion planning is the foremost requirement for autonomy. Real-time optimization methods similar to MPC have been prevalent for planning and control in constrained vehicle motion studies over the last decade. Many problems need to be addressed with suitable strategies for autonomous driving applications. For example, in highly dynamic environments, autonomous vehicles should be able to achieve safe stopping given boundary conditions in an emergency situation. If this is not possible, the planning modules should be able to find an evasion maneuver and decide another sequence of actions. These decisions should be made in a split second. Even for a simple stopping, it is obvious that, in autonomous driving, various tasks are required to be addressed. The tasks for autonomous driving include, but are not limited to, the computation of the following: \begin{itemize} \item Yaw rate trajectories for yaw and roll stability during lane changing or taking a turn, \item Longitudinal velocity trajectories for cruise control, adjusting the car-following distance and safe stopping, \item Velocity and path planning for evasive maneuvers, \item Feasible trajectories that the low-level controllers can act on within the actuator limits \end{itemize} The above tasks all require fast computational modules. In this paper, we present the application of SCvx algorithms to a full plan with a velocity trajectory. We also demonstrate the use of logical state-triggers in a simple evasion maneuver. The successive convexification algorithms can be used for all of the above. These algorithms have been demonstrated to handle a broad range of stringent constraint requirements in real-time aerospace applications. We consider that SCvx algorithms bring improvements to autonomous driving on top of iterative MPC methods presented in \cite{carvalho2013predictive, augugliaro2012generation, carvalho2016predictive, gray2013integrated}. The improvements mainly come from the constraint satisfaction and theoretically proven guaranteed convergence. In the literature, various approaches to path and speed planning have been proposed with a breadth of constraint configurations and solution methods. A list and detailed comparison of various velocity planning algorithms for autonomous driving can be found in \cite{zhang2018toward}. In this study, a simple point mass kinematic model parametrized in polar coordinates is used to formulate the motion and constraint equations. In our study, we handle a similar kind of optimization formulation with a diverse range of constraint expressions in the formulation using the kinematic vehicle models. We formulate the velocity planning problem coupled with path tracking. \subsection{Vehicle Model} Under this section, we give a brief description of the vehicle models we use in the planning and low speed control problems. It is beneficial to include the vehicle models as it makes more clear the variables and constraints used in the optimization problem. Please note that, the SCvx algorithm is a general approach and admits models with any complexity. We can use the single track kinematic vehicle models with and without side-slip angle in planning simulations (see Fig. \ref{fig:single_track} and Eqs. \ref{eq:with_beta}). The first column in the equations represents a robot-car model \cite{laumond1998robot}. In this model, the rear-axle center tracks the given reference trajectory. The second column (right) represents the same model, but considers the side-slip angle \(\beta\) \cite{kong2015kinematic, rajamani2011vehicle}. \begin{align} \dot{X_w} &= V\cos{(\Psi)} & \dot{X_w} &= V\cos{(\Psi + \beta)} \nonumber\\ \dot{Y_w} &= V\sin{(\Psi)} & \dot{Y_w} &= V\sin{(\Psi + \beta)} \nonumber\\ \dot{\Psi} &= \frac{V}{l_r}\tan{(\delta_f)} & \dot{\Psi} &= \frac{V}{l_r}\sin{(\beta)} \nonumber\\ \dot{V} &= u_0 & \dot{V} &= u_0 \nonumber\\ \dot{\delta_f} &= u_1 & \dot{\delta_f} &= u_1 \nonumber\\ & &\beta &= \arctan{(\frac{l_r}{l_r + l_f}\tan{(\delta_f)})} \nonumber \\ \label{eq:with_beta} \end{align} \begin{figure}[ht] \centering \includegraphics[width=0.3\textwidth]{figures/single_track.png} \caption{Single Track Vehicle Model with Side-slip Angle} \label{fig:single_track} \end{figure} In Eq. (\ref{eq:with_beta}), the first three rows represent the differential equations of the global coordinates; $X_w,\; Y_w$ and heading angle $\Psi$. The parameters $l_r,\; l_f$ mark the location of the center of gravity with respect to rear and front axles. The acceleration $u_0 = \dot{V}$ and the steering rate $u_1 = \dot{\delta_f}$ are the control inputs to the models. To capture the affect of tire deflection on the dynamics, the side-slip angle $\beta$ is included in the models which can be used for path tracking at lower speeds. We transform the model into error dynamics by expressing the equations in the error coordinates as in \cite{gray2013integrated, carvalho2016predictive, qian2016model} (Fig. \ref{fig:error_coord})). \begin{figure}[ht] \centering \includegraphics[width=0.35\textwidth]{figures/spatial_coordinates.pdf} \caption{Road Aligned Error Coordinates} \label{fig:error_coord} \end{figure} In addition, we use arc-length parametrization instead of time in the implementations. The travelled distance $s$ is used as the integrating variable instead of time. The expression of this transformation for the side-slip angle model is given in Eqs. \eqref{eq:xprime} and \eqref{eq:arc-length}. A similar transformation can be obtained from the left column (robot-car) equations of Eq. \ref{eq:with_beta}. By these modifications, not only can the obstacle constraints along the path be expressed linearly, but also the trajectory problem is converted into a path planning problem. The arc length parametrization brings another advantage in terms of the planning horizon which can be chosen arbitrarily long in the SCvx algorithms. This makes the problem a fixed time optimal control problem without introducing an additional non-linearity due to the time scaling. \begin{align} \dot{x(t)} &= f(x(t), u(t)), \nonumber & x(s)^{\prime} &= \frac{dx}{dt}\frac{dt}{ds} = f(x(t), u(t)) \frac{1}{\dot{s}} \nonumber\\ \label{eq:xprime} \end{align} After the transformations and the arc-length parametrization, the final set of the equations in the error coordinates takes the form of \begin{equation} x^{\prime}(s) = F(x(s), u(s), \kappa(s)) \label{eq:arc-length} \end{equation} \noindent with the states: \begin{equation} \begin{aligned}[c] \dot{s} &= \frac{1}{1 -\kappa e_y} V \cos{(e_\Psi + \beta)}\\ e_y^{\prime} &= V \sin{(e_\Psi + \beta)} \frac{1}{\dot{s}} \\ V^{\prime} &= u_0 \frac{1}{\dot{s}} \nonumber \\ \end{aligned} \; \begin{aligned}[c] \Psi^{\prime} &= \frac{V}{l_r}\sin{(\beta)} \frac{1}{\dot{s}} \\ e_\Psi^{\prime} &= \Psi^{\prime} - \kappa(s)\\ \delta_f^{\prime} &= u_1 \frac{1}{\dot{s}} \end{aligned} \end{equation} The time $t_s^{\prime}= \frac{1}{\dot{s}}$ can also be included in the states to recover the time dependent trajectories. In the equations, the road curvature $\kappa(s)$ enters the model as the reference parameter. It can be taken from off-line look-up tables or approximated from sensor measurements. \subsection{Convex Sub-problems} The successive convexification method is built upon the convex sub-problems which arise from linearization around the previously computed or predicted trajectories. We can express the general structure of the convex sub-problems as in the following form; \begin{align} \min_{u(s)} \quad & J(s(t_0), s(t_f), x(s), u(s), \kappa(s)) \\ \textrm{s.t.} \quad & h(x(s), u(s), \kappa(s)) =0 \label{eq:equality_constraint} \\ & g(x(s), u(s), \kappa(s)) \leq 0 \label{eq:inequality_constraint} \end{align} \noindent where the equality and inequality constraints are given by \eqref{eq:equality_constraint} and \eqref{eq:inequality_constraint} and the boundary conditions of the integration are defined as $s \in [s(t=0),\; s(t=t_f)]$. The final distance is decided depending on the problem configuration. \subsection{Linearization, Discretization and Scaling} The linear time-varying model of the vehicle motion is approximated by the first order Taylor expansion as; \begin{align} x^{\prime}(s + ds) & \approx A(s) x(s)+B(s) u(s)+F(s) + w(s) \nonumber\\ A(s) &:=\left.\frac{\partial F}{\partial x}\right|_{\bar{z}(s)} \nonumber\\ B(s) &:=\left.\frac{\partial F}{\partial u}\right|_{\bar{z}(s)} \nonumber\\ F(s) &:= F(\bar{x}(s), \bar{u}(s),\bar{\kappa}(s)) \nonumber\\ w(s) &:=-A(s) \bar{x}(s)-B(s) \bar{u}(s) \nonumber \\ \label{eq:discretization} \end{align} \noindent where the bar notation represents the computed trajectory coordinates obtained by the previous solution ($\bar{z(s)}$) at which the linearization takes place with: $$\bar{z}(s):=\left[\bar{s}\;, \bar{x}^{T}(s)\;, \bar{u}^{T}(s)\right]^{T} \text { for all } s \in [0, 1].$$ The real arc-length $s_{real} \in [s_{t=0}, s_{t=t_f}]$ is re-scaled into the interval $s \in [0,\;1]$. In the speed planning study, the final point of the arc-length and the number of intervals are fixed. The equality and inequality constraints are convexified if they are not convex in a similar manner: \begin{equation} h_{i}(z(s)) \approx h_{i}(\bar{z}(s))+\left.\frac{\partial h_{i}}{\partial z}\right|_{\bar{z}(s)} \delta z(s) \end{equation} \begin{equation} g_{i}(z(s)) \approx g_{i}(\bar{z}(s))+\left.\frac{\partial g_{i}}{\partial z}\right|_{\bar{z}(s)} \delta z(s) \end{equation} \noindent with the definition of $\delta z(s):=z(s)-\bar{z}(s)$. The performance of various discretization methods for successive convexification was reported in \cite{szmuksuccessive, malyuta2019discretization}. According to this study, the First Order Hold (FOH) has the lowest computational time among others (zero order hold, classical Runge-Kutta and pseudo-spectral methods). Therefore, we used the FOH method to discretisize the state trajectories and the control using the fundamental matrix solution to the ordinary differential equations. In the FOH discretization, the parameters and input the models are interpolated between the start and end points of each integration interval. In this case, the discrete equations take the following form \cite{szmuksuccessive, malyuta2019discretization} with the intervals $s_k = \frac{k}{K-1}$ and $k \in[0, K]$ for the each step $k$: \begin{dmath} x^{\prime}(s) =A(s) x(s)+\lambda_{k}^{-}(s) B(s) u_{k}+\lambda_{k}^{+}(s) B(s) u_{k+1} + F(s) + w(s) \end{dmath} $\forall s \in\left[s_{k}, s_{k+1}\right]$ and the interpolating coefficients: \begin{align} u(s) &=\lambda_{k}^{-}(s) u_{k}+\lambda_{k}^{+}(s) u_{k+1}, & \forall \tau \in\left[s_{k}, s_{k+1}\right] \nonumber\\ \kappa(s) &=\lambda_{k}^{-}(s) \kappa_{k}+\lambda_{k}^{+}(s) \kappa_{k+1}, & \nonumber\\ \lambda_{k}^{-}(s)& :=\frac{s_{k+1}-s}{s_{k+1}-s_{k}} & \lambda_{k}^{+}(s):=\frac{s-s_{k}}{s_{k+1}-s_{k}} \nonumber\\ \end{align} \noindent with the fundamental matrix solution \cite{antsaklis2006linear, hespanha2018linear} to ODEs: \begin{equation} \Phi_{A}\left(\xi, s_{k}\right)=I_{n_{x} \times n_{x}}+\int_{s_{k}}^{\xi} A(\zeta) \Phi_{A}\left(\zeta, s_{k}\right) d \zeta \end{equation} \noindent where $n_x$ is the number of states in the equations. Using the properties of the fundamental matrix (Theorem II.1 in \cite{malyuta2019discretization}) one can arrive at the non-homogeneous solution of the Linear Time Varying (LTV) system equations as: \begin{align} x_{k+1} &=A_{k} x_{k}+B_{k}^{-} u_{k} + B_{k}^{+} u_{k+1} + F_{k} + w_{k} \\ A_{k} &:=\Phi_{A}\left(s_{k+1}, s_{k}\right) \\ B_{k}^{-} &:=A_{k} \int_{s_{k}}^{s_{k+1}} \Phi_{A}^{-1}\left(\xi, s_{k}\right) B(\xi) \lambda_{k}^{-}(\xi) d \xi \\ B_{k}^{+} &:=A_{k} \int_{s_{k}}^{s_{k+1}} \Phi_{A}^{-1}\left(\xi, s_{k}\right) B(\xi) \lambda_{k}^{+}(\xi) d \xi \\ F_{k} &:=A_{k} \int_{s_{k}}^{s_{k+1}} \Phi_{A}^{-1}\left(\xi, s_{k}\right) F(\xi) d \xi \\ w_{k} &:=A_{k} \int_{s_{k}}^{s_{k+1}} \Phi_{A}^{-1}\left(\xi, s_{k}\right) w(\xi) d \xi \end{align} These matrices are used as the inputs to the solvers to compute the motion equation constraints in the implementation. One more subtlety in the numerical optimization is scaling. In general, the states in the equations do not have a similar range of magnitude. This may introduce inconsistencies into the numerical solution. To prevent such problems during the optimization, it is a common practice to normalize the states. One way to do scaling is with a linear transformation. We applied the following transformation to all of the states $x$ and the inputs $u$ \cite{reynolds2019dual, gill1981practical, ross2018scaling}; \begin{gather} x =D_{\hat{x}} \hat{x} + C_{\hat{x}} \label{eq:scalingx}\\ u =D_{\hat{u}} \hat{u} + C_{\hat{u}} \label{eq:scalingu}\\ \nonumber \end{gather} \noindent where $\hat{x}$ and $\hat{u}$ are the normalized state and control variables. The solver seeks a solution in the normalized variables denoted by the hat notations in \eqref{eq:scalingx}, \eqref{eq:scalingu}. The scaling coefficient matrices $D_{\hat{x}},\; D_{\hat{u}} $ and the centering vectors $C_{\hat{x}}, \; C_{\hat{u}}$ can be computed from the maximum and minimum boundary values of the variables. \subsection{Virtual Force for Artificial Infeasibility and Trust Region for Artificial Unboundedness} A virtual force vector $\nu_k \in \mathcal{R}^{n_x}$ is added to the system equations as an input to prevent artificial infeasibility, making the states one-step reachable: \begin{equation} x_{k+1}=A_{k} x_{k}+B_{k}^{-} u_{k}+B_{k}^{+} u_{k+1}+w_{k}+\nu_{k} \end{equation} The $\ell_1$ norm is used in the cost function of the virtual force to promote sparsity \eqref{eq:l1}. A high penalty weight $w_{\nu}$ is assigned in the cost for the virtual force so that the solver uses it only when necessary. The following cost is added to the optimization objective cost function: \begin{equation} J_{v}(\nu):=w_{\nu} \sum_{k \in \overline{\mathcal{K}}}\left\|\nu_{k}\right\|_{1} \label{eq:l1} \end{equation} As the linearization step might introduce unboundedness, the search space in the optimization variables $[x-x_k,\;u-u_k]$ are bounded by a trust region. Two forms of trust region formulation are practised in the literature. One can add a hard trust region constraint into the optimization formulation or a trust region cost can be added to the optimization cost using the soft trust region method \cite{benedikter2019convex, szmuksuccessive}. We used the former one: \begin{equation} \left\lVert \delta x_{k} \right\lVert_1 + \left\lVert \delta u_{k} \right\lVert_1 < \rho_{tr} \end{equation} \noindent where $\delta x_{k} := x - x_k,\;\delta u_{k} := u - u_k,$ and $\rho_{tr}$ is the trust region radius which is scheduled depending on the accuracy of the linear approximation in the model. The details of the trust region scheduling algorithm are given in \cite{mao2018successive, mao2018tutorial}. \subsection{State-Triggered Constraints} In some applications, we may need some constraint when a certain criterion is met in a continuous optimization problem. An evasion manuever can be given as an example to this case. If a collision is obvious on the way through a trajectory plan we can reformulate the alternatives in a continous optimization to make the autonomous agent to perform another maneuver. In general, embedding logical constraints require stringent treatment and and mixed integer programming is involved. There are specialized optimization software for this purpose. Szmuk et al. in \cite{szmuk2018successive_trig} proposed state-triggered constraint formulation in their successive convexification applications and show its use in different optimization problems improving the method in \cite{reynolds2019dual, reynolds2019state, szmuk2017convexification}. The underlying structure of continuous state-triggered constraint formulation is very similar to linear complementarity problem \cite{cottle1992linear}. Assume that we want to enforce some constraints equations $c(z) = 0$ given some conditions $g(z) \le 0$ with a variable $z \in \mathcal{R}^{n_z}$ belongs to parent optimization problem. The formal definition follows as: \begin{align} \sigma & \geq 0 \nonumber\\ g(z)+\sigma & \geq 0 \nonumber\\ \nonumber \sigma c(z) & \leq \label{eq:trig_c} \end{align} \noindent where a new slack variable $\sigma \in \mathcal{R_{++}}$ is introduced. The equations \ref{eq:trig_c} admits an analytical solution with $\sigma^{*}:=-\min (g(z), 0)$ (please see \cite{szmuk2018successive_trig, reynolds2019state,reynolds2019dual } for the details). Thus the state-triggered constraints in a continuous optimization can be formulated as $h(z):=-\min (g(z), 0) \cdot c(z) \leq 0$. The variable $z$ can take any parameter from the previously solved trajectories in the successive convexification. In our demonstration, we devised a simple evasion maneuver in which if an autonomous vehicle cannot reach a desired speed (let's say 1 m/s) at terminal point of the trajectory, we request the vehicle to avoid from a virtual obstacle at the terminal location. \section{Problem Formulations in Planning and Control} \label{sec:opt} We applied the successive convexification methods to a full planning (speed and path) and MPC path tracking problems. However, for the sake of clarity, we narrow down the application to only planning problems in the paper. The method can be extended to MPC formulation by including a receding horizon. Expressing the path region and obstacle constraints as well as the terminal conditions are straightforward with arc-length parameterization. Therefore, we used arc-length parametrization in the solution. \subsection{Application of SCvx for Speed Planning} We consider the scenarios of reducing or increasing the vehicle speed within a given travelling distance while the car is following a curved path, with state and actuator constraints. The main objective is to arrive at a speed value at the end boundaries of the solution. For example, if there is a pedestrian in a certain distance, the autonomous vehicle must be able to stop before the pedestrian location. One can make the problem more complex by adding an obstacle avoidance constraint in the speed planning problem as well. Thus, the problem becomes full planning problem in which the path avoiding the obstacle needs also be generated along with the speed plan. We simulated various scenarios using the vehicle models given by Eq. \eqref{eq:with_beta}. In planning algorithms, less complex models are preferred in terms of computational speed. Therefore, we give the results of the planning for only the vehicle model without the side-slip angle formulation given in the first column of Eq. \eqref{eq:with_beta}. The arc-length parametrization introduces singularity around the point $\dot{s} = 0$ in the equations as the final speed is required to be zero for the stopping problems. For planning applications this singularity can be ignored, however when recovering the time dependent control signal it can be avoided by defining a lower bound of speed other than zero such as $V_{final} = 0.5 \; [m/s]$. From this speed value, we assume that the vehicle can be stopped safely. The formal definition of the speed planning problem given the boundary conditions is formulated in the discrete form with $s := s_{[1:K]}$ as follows: \begin{align} \min_{u(s_{1:K})} \quad & J(s(t_0), s(t_f), x(s), u(s), \kappa(s)) \\ \textrm{s.t.} \quad & h_i(x(s), u(s), \kappa(s)) =0 \\ & g_i(x(s), u(s), \kappa(s)) \leq 0 \end{align} where the controls are acceleration and steering rate and the cost functionals are given by; \begin{align} J(s, x, u, \kappa) & = J_{e_y} + J_{e_\Psi} + J_{\nu} + J_{jerk} + J_{u} + J_{N = s_f} \nonumber\\ J_{e_{y, \Psi}} &= w_{e_{y, \Psi}} \left\lVert e_{_{y, \Psi}{[1:K]}} \right\lVert_2 & \nonumber \\ J_{jerk} &= w_{jerk} \left\lVert \Delta u_{0_{[1:K-1]}} \right\lVert_2 \nonumber \\ J_u &= w_{u_0} \left\lVert u_{0_{[1:K]}} \right\lVert_2 + w_{u_1} \left\lVert u_{1_{[1:K]}} \right\lVert_2 \nonumber\\ J_{N=s_f} &= w_{N} \left\lVert V_{[K]} - V_{N=s_f} \right\lVert_2 \nonumber \\ \label{eq:discrete_opt} \end{align} \noindent where $w_{(.)}$ represents the weights of the associated variables. In the implementation, all the weights are set as unity except the virtual force weight which must be set sufficiently high. We included longitudinal jerk cost to reduce the rapid changes in the acceleration considering the comfort requirements. The jerk cost is formulated as the two-norm of the difference of successive acceleration inputs. Since the dynamics are parametrized by the arc-length, this jerk expression is called pseudo-jerk for the longitudinal motion. The last term $J_{N=s_f}$ captures the terminal constraint for the desired speed at the end of the planning horizon. The desired speed can also be enforced in the constraint equations. Although the the range of controls are constrained in the optimization, one can define additional cost to further reduce the amount of control effort exerted into system. The lateral deviation and deviation of heading angle $J_{e_{y, \Psi}}$ can be enforced to have the vehicle to follow the prescribed path while performing the main objectives (i.e while trying to stop at the given horizon or avoiding an obstacle). We formulated the cost terms using the norms of the variables as recommended in the CVX optimization library \cite{CVXeliminating} as we use the Second Order Conic Programming (SOCP) solver ECOS \cite{domahidi2013ecos} for solving the problems. We enforced the following state and control constraints; \begin{equation} \begin{aligned}[c] V & \leq V_{max}, \; V \in \mathcal{R}_{++} \\ \lvert \delta \rvert & \leq 27 \; [deg] & \\ x(s_0) & = x_{initial} \\ x(s_f) & = x_{final} \\ \end{aligned} \; \begin{aligned}[c] \lvert \dot{\delta_f} \rvert & \leq 60 \; [deg/sec] & \\ \left\lVert \begin{bmatrix} a_{x}, & a_{y}\\ \end{bmatrix} \right\rVert_2 & \leq \mu g & \label{eq:friction0}\\ u(s_0) &= u_{initial} \\ u(s_f) &= u_{final} \\ e_y(s) & \leq p(s) \\ \end{aligned} \end{equation} In the constraint equations, $e_y(s) <= p(s)$ can be used for the boundary of the road or to define the obstacle avoidance distance along the path. It can be a piece-wise linear function or a nonlinear function that can be convexified. In the simulations, we locate a virtual obstacle along the center line the vehicle tries to follow. When the vehicle reaches the obstacle regions $20:24^{th}$ discretization region (corresponding the region between 25 and 30 meters) it put a distance to this region minimum 0.5 meters from the right. The acceleration constraint, $\rVert [a_{x}, \; a_{y}] \lVert_{2}\leq \mu g$, captures the friction circle constraint (Kamm’s circle) with the tire road adhesion coefficient $\mu \in [0, \; 1]$. Once again, this is a simplified assumption for capturing the tire force limit and used as demonstration only. The aim is not to formulate a full fledged vehicle model with tires but show the use of SCvx algorithms in the autonomous driving domain. One can design models and problems serving the objective in their design. The rate constraints in time and arc-length parametrization can be converted in either direction by simple algebraic manipulations when forming the constraint equations by affine relations. Given the formal description of the problem, speed profile trajectory optimization can be described as having the vehicle reach a terminal speed at the end of a given road section, while following a prescribed path and respecting the state and control constraints. One important point in the velocity planning is to guess an initial solution. The initial solution can be prepared by generating an input sequence and integrating the equations for the given inputs, or interpolating the state and control variables between the boundary conditions. Due to the virtual force in the model, the predicted trajectories are not required to be feasible. In our simulations, in addition to linear interpolation, we initialize the steering angle related variables from the Ackermann steering geometry as the road section and curvature are all known. \subsection{Use of State-Triggered Constraints for an Evasion Maneuver} The continuous formulation of state-triggered constraints allows us to formulate logical constraints in a continuous optimization problems. Otherwise, one would use specialized software suit to be able to solve logical constrained problems. Other advantage of the state-triggered constraints is the reduction of the constraint variables in the problem. For example, if there will be some region in a state-space domain where the constraints are not necessary to enforce can be defined by these framework. There is no need to define a constraint equation along the path where the constraints are inactive. We show the use of state-triggers with a simple evasion maneuver. Similar to full planner example given above, we set a terminal distance 50 meters, and initial speed in such a way that the vehicle cannot reach the final desired speed from the given speed. For this example, we set the initial speed to 25 [m/s] and the final speed 0.5. From the initial speed defined, the car cannot reach the final speed exactly and its speed is the final distance is around 2 [m/s]. The only difference from the obstacle free scenario presented in the previous section is the initial speed. We increased the initial speed from 20 to 25 [m/s] so that the car cannot stop in the given range. The state trigger constraint $g(z) = [V_{final} \ge 1]$ is defined which states that if the vehicle's terminal speed is greater then a one meter/second. If this condition is encountered during the solution, we requested the vehicle to start an evasion maneuver 2.5 meters before the final point (last two discrete points in the simulations). The constraint conditions therefore becomes $c[z]= [e_{y{N-2:N}} \ge 1]$ putting a distance a minimum of one meters from the left in the last two discrete points of the solution trajectory. \section{Simulation Results} \label{sec:simres} We solved the speed planning problem with several constraint configurations. We enforced the desired terminal speed in the objective function. The final arc-length can be chosen arbitrarily (depending on the problem configuration). As an example, if there is a pedestrian 50 meters ahead and the car is requested to stop within this range before the pedestrian, the total arc-length in this scenario can be chosen as 50 meters. We observed convergence up to 100 meters total arc-length with 25-50 discretization steps. We present the result of two scenarios. The initial speed and the road-tire friction coefficient $\mu$ are set to 20 [m/s] and 0.6 respectively for both of the scenarios. In the first, the vehicle is requested to slow down to 0.5 [m/s] within 50 meters. In the second scenario, the vehicle is requested to slow down and reach at a terminal speed similar to the first one but with an additional obstacle constraint. A virtual obstacle is located between the 20-24 steps of the discretization out of 40. In this case, the vehicle must satisfy two objectives; slowing down to a terminal speed and avoiding an obstacle while doing so. We formulated obstacle avoidance to ensure the vehicle pass the obstacle region from left or right and put an distance to the obstacle at least 0.5 meters. Please note that, this is a simple path and velocity plan demonstration. In the planning algorithms using simple models is a custom assuming that the plan can be refined and passed to more accurate controllers and planners. Therefore, for demonstration the algorithms we simply ignore the vehicle geometry. Fig. (\ref{fig:vx_vp}) shows the evolution of the vehicle longitudinal speed on the road section for the two scenarios while the corresponding paths are given in Fig.(\ref{fig:path_vp}) \begin{figure}[h] \centering \includegraphics[width=0.5\textwidth]{figures/Vx.pdf} \caption{Evolution of Speed Trajectories with and without Obstacle Avoidance} \label{fig:vx_vp} \end{figure} In Fig.(\ref{fig:vx_vp}) the initial trajectories are plotted in red with a dashed line, whereas the final trajectories with a dot-marked continuous line in black. As shown in the figure, the trajectories converge a solution. It takes more iterations to converge in the obstacle case. \begin{figure}[h] \centering \includegraphics[width=0.5\textwidth]{figures/velplan_trj.pdf} \caption{Evolution of Paths with and without Obstacle Avoidance} \label{fig:path_vp} \end{figure} The resolution of the trajectory, the number of discretization steps and the final distance are problem dependent. We tested different final distance and number of discretization steps and were able to solve the planning problem with several combinations. One can use this flexibility to further improve the solutions by first solving with a low resolution and improving the optimization mesh if necessary. We give the computed control inputs for the velocity planning with obstacle problem only in Fig. (\ref{fig:inputs_vp}) due to limited space. The first rows of the figure presents the evolution of computed acceleration inputs to the models. In the middle row, the steering state variable is shown. The last row is the steering rate control input to the model. All the input constraint equations are satisfied as expected. \begin{figure}[h] \centering \includegraphics[width=0.48\textwidth]{figures/inputs_single.pdf} \caption{Evolution of Controls for Speed Planning Problem} \label{fig:inputs_vp} \end{figure} Constraint satisfaction is more clear in in Fig. (\ref{fig:acc_vp}) which shows the the magnitude of the combined acceleration vectors. The solution here respects the friction circle constraint which is the dashed-line in the figure. \begin{figure}[H] \centering \includegraphics[width=0.4\textwidth]{figures/accf.pdf} \caption{Friction Circle Constraint - with Obstacle} \label{fig:acc_vp} \end{figure} The simulation results for the state triggers is shown in the Fig. (\ref{fig:evasion}). \begin{figure}[H] \centering \includegraphics[width=0.4\textwidth]{figures/evasion.pdf} \caption{Global Trajectory - Result of Active State-Triggered Constraint} \label{fig:evasion} \end{figure} In the figure, it is shown that, the vehicle put a distance with a minimum of one meters from left to a virtual obstacle located at the end of the path. Since the solver cannot find a speed trajectory reaching the desired speed (0.5 m/s) at the terminal location. In this case, the state-triggered constraint is activated and the algorithm gives a solution to the second optimization problem to the evasion maneuver by which the car avoids a virtual obstacle located at the terminal point passing the obstacle from left by putting a specified distance. \section{DISCUSSION and CONCLUSIONS} \label{sec:conc} In this paper, we presented the application of SCVx methods to vehicle models for velocity planning and evasion maneuver design with a state-trigger. The vehicle models are arc-length parametrized to flatten the coordinates of the curved paths. In the planning algorithm, simplified models i.e point-mass kinematic model are used to find a feasible trajectory. In the spirit of this custom, we haven't used a complex model to formulate the planning problems but we kept some complexity at a certain level to formulate realistic situations. We used uniform discretization intervals and demonstrated the application of SCvx algorithms to the vehicle motion planning and control problems. The SCvx methods are algorithmic methods that bring about the possibility to solve optimal trajectory generation problems in real-time. The SCVx methods bring improvements in convergence by constraint satisfaction on top of the iterative MPC algorithms in the literature. There have been a wide range of methods that facilitate convergence and increase accuracy. Among them are the well-known pseudo-spectral methods which integrate the trajectories with non-uniform discretization points. One can further improve the application of SCvx making use of the pseudo-spectral method if accuracy and precision is of primary interest. We implemented the problem structure in the Python environment as it is easier to manipulate the codes, to visualize the results than implementing the simulation in C++. The computational performance of the algorithms in Python is not suitable for real-time implementation for our simulations. The computations take 2.5 - 3 secs for the planning algorithms. It might be a sound computational time, however our initial C++ implementation for path tracking provides a solution at 40Hz. We did not make use of strategies such as customized solvers \cite{dueri2014automated}, or parallelizing the discretization steps to improve computation times in this study. The SCvx algorithms together with the listed computational strategies are promising and viable solution for autonomous vehicle planning and control applications. In the future, we plan to experiment with these techniques on a real car and develop more flexible obstacle avoidance constraint implementations. We will release the implementation of the algorithms under Autoware open source software repository \cite{kato2018autoware, aw:autoware_repo}. \section*{ACKNOWLEDGMENT} We would like to thank to Sven Niederberger for sharing his implementations of SCvx algorithms in both Python and C++ on github \cite{steven}. We are inspired from his code templates. \bibliographystyle{ieeetr}
1,108,101,565,881
arxiv
\subsection{Results} Equip the sphere $S^2$ with its standard symplectic form $\omega$ scaled so that $\int_{S^2} \omega =2$. Let $L_{1,1}$ be the monotone Clifford torus (product of equators) in $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega).$ Our first result is the following. \begin{theorem}\label{one} The Clifford torus $L_{1,1}$ is a maximal integral packing of $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega).$ \end{theorem} For real numbers $a,\,b>0$, consider the symplectic polydisk $$P(a,b) =\left\{(z_1, z_2) \in {\mathbb C}^2 \, \mid \, \pi|z_1|^2<a,\,\pi|z_2|^2<b\right\} \subset {\mathbb R}^4 .$$ Identifying $L_{1,1}$ with the standard Clifford torus in ${\mathbb R}^4$, Theorem \ref{one} implies that $L_{1,1}$ is a maximal integral packing of each $P(a,b)$ with $1<a,\,b < 2$. If $a$ and $b$ are both greater then $2$, then a natural candidate for a maximal integral packing of $P(a,b)$ is the collection of integral Lagrangian tori $$\{L_{k,l} \mid k,l \in {\mathbb N}, k \leq \lfloor a \rfloor,\, l \leq \lfloor b \rfloor\},$$ where $L_{k,l}$ is the product of the circle about the origin bounding area $k$ in the $z_1$-plane with the circle about the origin bounding area $l$ in the $z_2$-plane. The analogous packing in dimension two is always maximal. Our second result shows that, in dimension four, this candidate always fails. \begin{theorem}\label{two} If $\min(a,b) >2$, then $\{L_{k,l} \mid k,l \in {\mathbb N}, k \leq \lfloor a \rfloor,\, l \leq \lfloor b \rfloor\}$ is not a maximal integral packing of $P(a,b)$. For every $\epsilon>0$ there is an integral Lagrangian torus $L^+$ in $$P(2+\epsilon, 2+\epsilon) \smallsetminus \{L_{k,l} \mid k,l \in \{1,2\}\}.$$ \end{theorem} \subsection{Overview} The first step in our proof of Theorem \ref{one} is to show that any integral Lagrangian torus in $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$ is actually monotone. This follows from the work of Hind and Opshtein in \cite{ho}, and is proved in Lemma \ref{monotone} below. Arguing by contradiction, we then assume there is a monotone Lagrangian torus $\mathbb{L}$ in $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$ that is disjoint from the Clifford torus $L_{1,1}$. The work of Ivrii in \cite{ivriit}, and Dimitroglou-Rizell, Goodman, and Ivrii in \cite{rgi}, implies that there is a finite energy holomorphic foliation $\mathcal{F}$ of $S^2 \times S^2 \smallsetminus (\mathbb{L} \cup L_{1,1})$ which has a normal form near $\mathbb{L}$ and $L_{1,1}$ (see Section \ref{double}). We use $\mathcal{F}$ to establish the existence of two symplectic spheres, $F$ and $G$, in $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$ (see Proposition \ref{existence2} and Proposition \ref{existence3}). Both $F$ and $G$ represent a homology class of the form $(1,d) \in H_2(S^2 \times S^2; {\mathbb Z})={\mathbb Z}^2$ for some large $d$. They also have special intersection properties with the leaves of $\mathcal{F}$ and with each other (see Section \ref{FG} and Proposition \ref{intcount}). Using the spheres $F$ and $G$, together with the operations of blow-up, inflation, and blow-down, we then alter the ambient symplectic manifold away from $\mathbb{L} \cup L_{1,1}$ to obtain a new monotone symplectic manifold, $(X, \Omega)$. This new manifold is symplectomorphic to $(S^2 \times S^2, (d+1)(\pi_1^* \omega + \pi_2^*\omega))$ and $\mathbb{L}$ and $L_{1,1}$ remain disjoint and monotone therein. However the images (transforms) of the spheres $F$ and $G$ in $(X, \Omega)$ are now in the class $(1,0)$ and their existence implies, by the work of Cielieback and Schwingenheur in \cite{cisc}, that $\mathbb{L}$ and $L_{1,1}$ must both be Hamiltonian isotopic to the Clifford torus in $(X, \Omega)$. It then follows from standard monotone Lagrangian Floer thoery, \cite{oh}, that it is not possible for $\mathbb{L}$ and $L_{1,1}$ to be disjoint. This contradiction completes the proof of Theorem \ref{one}. To prove Theorem \ref{two} we construct, for every $\epsilon>0$, an explicit embedding of the closure of $P(1,1)$ into $P(2+\epsilon, 2+\epsilon) \smallsetminus \{L_{k,l} \mid k,l \in \{1,2\}\}$, using a time-dependent Hamiltonian flow. The desired Lagrangian, $L^+$, is the one on the boundary of the image. \subsection{Commentary and further questions} Given that Theorem \ref{one} is reduced to the problem of detecting intersection points of two monotone Lagrangian tori, using \cite{ho}, it is natural to ask whether Lagrangian Floer theory (rigid holomorphic curves) can also be used to prove Theorem \ref{one} directly. To the knowledge of the authors this is not yet possible. The following result seems to be as close to a proof of Theorem \ref{one} as one can currently get using Lagrangian Floer theory. \begin{theorem} \label{lag} Suppose that $L$ is a monotone Lagrangian torus in $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$. If the Lagrangian Floer homology of $L$, with respect to some ${\mathbb C}^*$-local system, is nontrivial, then $L$ must intersect $L_{1,1}$. \end{theorem} This follows from work of Evans and Lekili in \cite{evle} which implies that the Clifford torus split-generates the monotone Fukaya category of $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$. It is not known whether there exist monotone Lagrangian tori in $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$ whose Lagrangian Floer homology is trivial for every choice of ${\mathbb C}^*$-local system. In \cite{vianna}, Vianna constructs a countably infinite collection of monotone Lagrangian tori in $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$, no two of which are Hamiltonian isotopic. Each of the tori in Vianna's collection satisfies the hypothesis of Theorem \ref{lag}. The following question, in the sprit of Theorem \ref{one}, remains unresolved. \begin{question}\label{always} Does every pair of monotone Lagrangian tori in $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$ intersect? \end{question} Progress on other aspects of the study of disjoint Lagrangian tori has also recently been made in two related works by Mak and Smith, in \cite{maksmi}, and by Polterovich and Shelukhin, in \cite{polshe}. Let $\{\gamma_i\}$ be a collection of disjoint circles bounding disks of the same area, and let $E$ be the equator in the sphere $S^2$. In \cite{maksmi} and \cite{polshe}, it is shown that, with respect to certain nonmonotone symplectic forms on $S^2 \times S^2$, packings of the form ${\mathcal L} = \sqcup \gamma_i \times E$ are maximal in the sense that any Lagrangian torus Hamiltonian isotopic to $\gamma_1 \times E$ must intersect ${\mathcal L}$. In comparison, the maximal packing given by Theorem \ref{one} only includes a single torus, $L_{1,1}$, but we do not assume any other tori are Hamiltonian isotopic to it. Theorem \ref{two} shows that analogous packings of the form $\sqcup \gamma_i \times \gamma_j$ are no longer maximal. Below are a few of the questions suggested by Theorem \ref{two} which also remain unresolved. \begin{question} Is every integral Lagrangian torus in $P(2+\epsilon, 2+\epsilon) \smallsetminus \{L_{k,l} \mid k,l \in \{1,2\}\}$, Hamiltonian isotopic to $L_{1,1}$? \end{question} \begin{question} Suppose $2<a,\,b <3$. Are there six disjoint integral Lagrangian tori in $P(a,b)$? \end{question} \begin{question} Suppose $2<b <3$. Are there three disjoint integral Lagrangian tori in $P(2,b)$? \end{question} \end{section} \section{Conventions, labels and notation} Every copy of the two dimensional sphere $S^2$ will implicitly be identified with the unit sphere in ${\mathbb R}^3$ and we will label the north and south poles by $\infty$ and $0$, respectively. In $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$, we use these points to define the four symplectic spheres $S_0=S^2 \times \{0\}$, $S_{\infty}=S^2 \times \{\infty\}$, $T_0=\{0\} \times S^2$ and $T_{\infty}=\{\infty\} \times S^2$. The ordered basis $\{ [S_0], [T_0]\}$ of $H_2(S^2 \times S^2;{\mathbb Z})$ is used to identify it with ${\mathbb Z}^2$. Let $L \subset (M, \Omega)$ be a Lagrangian torus in a four dimensional symplectic manifold. A diffeomorphism $\psi$ from $\mathbb{T}^2= S^1 \times S^1$ to $L$ will be referred to as a parameterization of $L$. It specifies a basis of $H_1(L; {\mathbb Z})$ and thus an isomorphism from $H_1(L; {\mathbb Z})$ to ${\mathbb Z}^2$. We will denote this copy of ${\mathbb Z}^2$ by $H_1^{\psi}(L; {\mathbb Z})$. The parameterization $\psi$ can also be extended to a symplectomorphism $\Psi$ from a neighborhood of the zero section in $T^*\mathbb{T}^2$ to a Weinstein neighborhood $\mathcal{U}(L)$ of $L$ in $M$. We will denote the corresponding coordinates in the neighborhood $\mathcal{U}(L)$ of $L$ by $(p_1, p_2, q_1, q_2)$ and, for simplicity, we will assume that $$\mathcal{U}(L) = \{|p_1| < \epsilon, |p_2 |< \epsilon\},$$ for some $\epsilon >0$. \section{Proof of Theorem \ref{one}.}\label{curves} Arguing by contradiction, we begin with the following. \medskip \noindent{\bf Assumption 1.} There is an integral Lagrangian torus $\mathbb{L}$ in $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$ which is disjoint from the Clifford torus $L_{1,1}$. \medskip Below we show that Assumption 1 can be refined in three ways. \subsection{Refinement 1. We may assume that $\mathbb{L}$ is monotone.} A symplectic manifold $(M, \Omega)$ is {\em monotone} if the Chern and area homomorphisms, $$ c_1 \colon \pi_2(M) \subset H_2(M, {\mathbb Z}) \to {\mathbb Z} \text{ and } \Omega \colon \pi_2(M) \to {\mathbb R}, $$ are positively proportional. Recall that a Lagrangian submanifold $L \subset (M, \Omega)$ is {\em monotone} if its Maslov and area homomorphisms, $$ \mu \colon \pi_2(M,L) \to {\mathbb Z} \text{ and } \Omega \colon \pi_2(M,L) \to {\mathbb R}, $$ are positively proportional. We will denote the constant of proportionality of $L$ by $\lambda$. If $L$ is a Lagrangian torus, then one can verify monotonicity by checking it for a collection of disks whose boundaries generate $H_1(L; {\mathbb Z})$. \begin{lemma} \label{pair} Suppose that $(M, \Omega)$ is a symplectic four manifold which is monotone with constant $\frac{\lambda}{2}$. A Lagrangian torus $L$ in $(M, \Omega)$ is monotone with constant $\lambda$ if there are two smooth maps $v_1,\,v_2 \colon (D^2, S^1) \to (M,L)$ such that the boundary maps $v_1|_{S^1}$ and $v_2|_{S^1}$ determine an integral basis of $H_1(L;{\mathbb Z}) ={\mathbb Z}^2$ and $\mu([v_i]) =\lambda \Omega([v_i])$ for $i=1,2.$ \end{lemma} Refinement 1 is validated by the following result. \begin{proposition}\label{monotone} Every integral Lagrangian torus $L$ in $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$ is monotone. \end{proposition} \begin{proof} By Theorem C of \cite{rgi} there is a Hamiltonian diffeomorphism which displaces $L$ from the pair of spheres $S_{\infty} \cup T_{\infty}.$ Hence, $L$ can be identified with an integral Lagrangian torus $\mathbf{ L}$ inside the polydisk $P(2-\epsilon,2-\epsilon) \subset ({\mathbb R}^4, \omega_4)$ for some sufficiently small $\epsilon>0.$ By Lemma \ref{pair}, it suffices to find two smooth maps $v_1,\,v_2 \colon (D^2, S^1) \to ({\mathbb R}^4, \mathbf{ L})$ such that the boundary maps $v_1|_{S^1}$ and $v_2|_{S^1}$ determine an integral basis of $H_1(\mathbf{L};{\mathbb Z})$ and $\mu([v_i]) =2 \omega_4([v_i])$ for $i=1,2.$ Simplifying further, we note that, for ${\mathbb R}^4$, the maps $\mu$ and $\omega_4$ can be recast as homomorphisms $$\mu \colon H_1(\mathbf{ L}; {\mathbb Z}) \to {\mathbb Z} \text{ and }\omega_4 \colon H_1(\mathbf{ L}; {\mathbb Z}) \to {\mathbb R}$$ and it suffices to find an integral basis $\{e_1,e_2\}$ of $H_1(\mathbf{ L}; {\mathbb Z})$ such that $\mu(e_i) =2 \omega_4(e_i)$ for $i=1,2$. Since $\mathbf{ L}$ is contained in $P(2-\epsilon,2-\epsilon)$, it follows from \cite{cm} that there is a smooth map $f \colon (D, S^1) \to ({\mathbb R}^4, \mathbf{ L})$ of Maslov index $2$ whose symplectic area is positive and less than two. Since $\mathbf{ L}$ is integral, the area of $f$ must be equal to one. If $e_1$ is the element of $H_1(\mathbf{ L}; {\mathbb Z})$ represented by $f|_{S^1}$, then we have $\mu(e_1)=2$ and $\omega_4(e_1)=1$. Let $c$ be a class in $H_1(\mathbf{ L}; {\mathbb Z})$ such that $\{e_1,c\}$ is an integral basis. Since $\mu(c)$ is even, by adding integer multiples of $e_1$ to $c$, if necessary, we may assume that $\mu(c)=2$. It remains to show that $\omega_4(c) =1.$ Arguing by contradiction, assume that $\omega_4(c) \neq 1$. Set $$ \hat{c}= \begin{cases} c & \text{if}\,\, \omega_4(c) > 1,\\ c + 2(e_1-c) & \text{if}\,\, \omega_4(c) < 1. \end{cases} $$ Then $\{e_1, \hat{c}\}$ is an integer basis of $H_1(\mathbf{ L}; {\mathbb Z})$ that satisfies $$\omega_4(e_1)=1,\,\omega_4(\hat{c}) \geq 2$$ and $$\mu(e_1)=\mu(\hat{c}) =2.$$ In \cite{ho}, Hind and Opshtein prove that if a Lagrangian torus in $P(a,b)$ admits such a basis, then either $a>2$ or $b>2$. This contradicts the assumption that $\mathbf{ L}$ lies in $P(2-\epsilon,2-\epsilon)$ and we are done. \end{proof} \subsection{Refinement 2: We may assume that $\mathbb{L}$ lies in the complement of $S_0 \cup S_{\infty} \cup T_0 \cup T_{\infty}$} To verify this, we utilize the relative finite energy foliations from \cite{rgi} which we now recall. \subsubsection{Foliations of $(S^2 \times S^2) \smallsetminus L$ following \cite{rgi} } In \cite{gr}, Gromov proves that for any smooth almost-complex structure $J$ on $S^2 \times S^2$ that is tamed by the symplectic form $\pi_1^*\omega + \pi_2^*\omega$, there is a foliation of $S^2 \times S^2$ by $J$-holomorphic spheres in the class $(0,1)$ (and another with fibres in the class $(1,0)$). For any monotone Lagrangian torus $L \subset (S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$, there is an analogous relative theory, developed first by Irvrii in \cite{ivriit}, and completed in Dimitroglou-Rizell, Goodman and Ivrii in \cite{rgi}, with input from \cite{we} and \cite{hl}. This yields symplectic $S^2$-foliations of $S^2 \times S^2$ that are {\em compatible with $L$}. These are obtained by stretching certain Gromov foliations along $L$ and smoothing the compactifications of the limiting buildings with more than one level. We now describe a version of this theory that has been adapted for the purposes of this paper. As in \cite{hl}, we only consider the curves which, after stretching, map to $S^2 \times S^2 \smallsetminus L$. \medskip \noindent{\bf Input.} Let $L$ be a monotone Lagrangian torus in $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$. Fix a parameterization $\psi$ of $L$ and the corresponding Weinstein neighborhood $$\mathcal{U}(L)=\{|p_1|<\epsilon, |p_2|<\epsilon\}.$$ Fix a tame almost complex structure $J$ on ($S^2 \times S^2 \smallsetminus L, \pi_1^*\omega + \pi_2^*\omega)$ such that in $\mathcal{U}(L)$ we have $$J \frac{\partial}{\partial q_i} = - \sqrt{p_1^2+p_2^2} \, \frac{\partial}{\partial p_i}.$$ Recall that each negative end of a finite energy $J$-holomorphic curve $u$ mapping to $S^2 \times S^2 \smallsetminus L$ is asymptotic to a closed Reeb orbit on a copy of the flat unit cotangent bundle, $S_L^*\mathbb{T}^2$, of $\mathbb{T}^2$, corresponding to $L$. This Reeb orbit covers a closed geodesic, $\gamma$, of the flat metric on $\mathbb{T}^2$. In this case, we simply say that the end of $u$ is asymptotic to $L$ along $\gamma$. \medskip \noindent{\bf Output.} From this input, one can construct, as in $\S 2.5$ of \cite{rgi}, a family of almost complex structures $J_{\tau}$ on $S^2 \times S^2$ for $\tau \geq 0$. Taking the limit of the Gromov foliations for the $J_{\tau}$ as $\tau \to \infty$, it follows from Theorem D and Proposition 5.16 of \cite{rgi} that one obtains a foliation $\mathcal{F}=\mathcal{F}(L, \psi, J)$ of $S^2 \times S^2\smallsetminus L$ with the following properties. \begin{itemize} \item The foliation $\mathcal{F}$ has two kind of leaves: unbroken ones consisting of a single closed $J$-holomorphic sphere in $S^2 \times S^2\smallsetminus L$ of class $(0,1)$, and broken leaves consisting of a pair of finite energy $J$-holomorphic planes in $S^2 \times S^2\smallsetminus L$. \item Each leaf of $\mathcal{F}$ intersects $S_{\infty}$ in exactly one point. For a broken leaf this means that exactly one of its planes intersects $S_{\infty}$. \item The ends of two planes of a broken leaf are asymptotic to the same geodesic, but with opposite orientations. This geodesic is embedded. We denote its homology class, equipped with the orientation determined by the plane which intersects $S_{\infty}$, by $\beta \in H_1(L; {\mathbb Z})$. This class is the same for all broken leaves of $\mathcal{F}$ and is referred to as the foliation class of $\mathcal{F}$. \item Each point $z \in S^2 \times S^2 \smallsetminus L$ lies in a unique leaf of $\mathcal{F}$, and each point of $L$ lies on a unique geodesic in the foliation class $\beta$ that corresponds to a unique plane of a broken leaf of $\mathcal{F}$ that intersects $S_{\infty}$. \item Let $p \colon S^2 \times S^2 \to S_{\infty}$ be the map which takes $z \in S^2 \times S^2 \smallsetminus L$ to the intersection of its leaf with $S_\infty$, and takes $z \in L$ to the intersection with $S_{\infty}$ of the broken leaf asymptotic to the unique geodesic through $z$ representing the foliation class. The map is well defined by positivity of intersection since $S_{\infty}$ is complex. Then $p(L)$ is an embedded closed curve in $S_{\infty}$. Moreover, if $L$ is homotopic to $L_{1,1}$ in the complement of $T_0 \cup T_{\infty}$, then $p(T_0)$ and $p(T_{\infty})$ (which are points since $T_0$ and $T_{\infty}$ are complex) lie on opposite sides of the closed curve $p(L)$. \item If $L$ is disjoint from $S_0 \cup S_{\infty} \cup T_0 \cup T_{\infty}$, then we may assume this configuration of symplectic spheres is $J$-complex \end{itemize} \begin{lemma}(Straightening).\label{straight} For all sufficiently small $\epsilon>0$ we may assume, by perturbing $J$ outside of $\mathcal{U}(L)$, that the unbroken leaves of $\mathcal{F}$ that intersect $\mathcal{U}(\mathbb{L})$ do so along the annuli $\{p_1=\delta, q_1 = \theta, -\epsilon< p_2<\epsilon\}$ for some $\theta \in S^1$ and nonzero $\delta \in (-\epsilon, \epsilon)$. As well, the planes of broken leaves of $\mathcal{F}$ through $S_{\infty}$ intersect $\mathcal{U}(\mathbb{L})$ along the annuli $\{p_1=0, q_1 = \theta, 0<p_2<\epsilon\}$, for some $\theta \in S^1$, and the planes of $\mathcal{F}$ through $S_0$ intersect $\mathcal{U}(\mathbb{L})$ along the annuli $\{p_1=0, q_1 = \theta, -\epsilon<p_2<0\}$. \end{lemma} \begin{proof} The statement for broken leaves was established in Proposition 5.16 of \cite{rgi} (see the first bullet point of the proof). Given this, our unbroken leaves intersect $\partial \mathcal{U}(L)$ close to $p_1=0$ in circles smoothly approximating circles $\{p_1 = \delta, q_1 = \theta, p_2 = \pm \epsilon\}$. We look first at the parts of these leaves mapping to the complement of $\mathcal{U}(L)$, which are families of holomorphic disks. Perturbing the disks we may assume they intersect $\partial \mathcal{U}(L)$ close to $p_1=0$ precisely in the circles $\{p_1 = \delta, q_1 = \theta, p_2 = \pm \epsilon\}$ while remaining symplectic and smoothly converging to the broken leaves. Hence changing $J$ outside of $\mathcal{U}(L)$ we may assume the perturbed disks remain holomorphic. These new disks match with the annuli $\{p_1=\delta, q_1 = \theta, -\epsilon< p_2<\epsilon\}$ to give holomorphic spheres in the class $(0,1)$, and in fact by positivity of intersection these are the only spheres in the class intersecting the region $\{|p_1| < \epsilon\}$, at least if we shrink $\epsilon$ to include only the region where the perturbations apply. \end{proof} \medskip \noindent{\bf Solid tori.}\label{solid} In the case when $L$ is disjoint from $S_0 \cup S_{\infty} \cup T_0 \cup T_{\infty}$ we define $\mathcal{T}_{\infty}$ be the set of all the $J$-holomorphic planes of the broken leaves which intersect $S_{\infty}$. This set can be collectively compactified to obtain a smoothly embedded solid torus in $S^2 \times S^2$ whose boundary is $L$. Similarly, the set $\mathcal{T}_{0}$ consisting of the other planes of the broken leaves can be used to obtain another solid torus with boundary on $L$. Note that, since the planes in $\mathcal{T}_0$ and $\mathcal{T}_{\infty}$ fit together to form spheres in the class $(0,1)$, by positivity of intersection a $J$-holomorphic sphere $u \colon S^2 \to S^2 \times S^2 \smallsetminus L$ in a class of the form $(1,d)$ must either intersect the all the planes of $\mathcal{T}_{\infty}$ once, or all the planes of $\mathcal{T}_0$ once. \begin{example}\label{cliff} For the Clifford torus $L_{1,1} \subset S^2 \times S^2$ and a $J$ adapted to the standard parameterization $\psi_{1,1}$ of $L_{1,1}$, we get a foliation $\mathcal{F}_{1,1}$ of $S^2 \times S^2 \smallsetminus L_{1,1}$ with leaves in the class $(0,1)$. The broken leaves of $\mathcal{F}_{1,1}$ comprise two families of $J$-holomorphic planes with boundary on $L_{1,1}$: $\frak s_0$ which consisting of planes intersecting $S_0$ and $\frak s_{\infty}$ which consists of the planes intersecting $S_{\infty}$. \end{example} \begin{remark}\label{both} One also obtains, for the same $J$, an analogous foliation whose leaves represent the class $(1,0)$. \end{remark} The following result establishes Refinement 2. The proof is based on that of Corollary E in \cite{rgi}. \begin{proposition} \label{move2} Suppose that $\mathbb{L}$ is a monotone Lagrangian torus in $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$ that is disjoint from $L_{1,1}$. Then there is a Hamiltonian diffeomorphism $\phi$ of $S^2 \times S^2$which displaces $\mathbb{L}$ from $S_0 \cup S_{\infty} \cup T_0 \cup T_{\infty}$ and is supported away from $L_{1,1}$. Moreover, $\phi(\mathbb{L})$ is homotopic to $L_{1,1}$ in the complement of $T_0 \cup T_{\infty}$ and also in the complement of $S_0 \cup S_{\infty}$. \end{proposition} \begin{proof} We start with an almost-complex structure $J_0$ on $S^2 \times S^2$ adapted to the standard parameterization $\psi_{1,1}$ of $L_{1,1}$. We also assume that $J_0$ is standard in a Weinstein neighborhood $\mathcal{U}(\mathbb{L})$ of $\mathbb{L}$ that is disjoint from $L_{1,1}$. Let $\mathcal{F}_0$ be the corresponding $J_0$-holomorphic foliation of $S^2 \times S^2 \smallsetminus L_{1,1}$ and let $p_0 \colon S^2 \times S^2 \to S_{\infty}$ be the corresponding map. We may assume that the points $p_0(T_0)$ and $p_0(T_{\infty})$ lie in different components of $S_{\infty} \smallsetminus p_0(L_{1,1})$. Deform $J_0$ to a family $J_t$ for $t \ge 0$ by stretching the neck, as in \cite{behwz}, along a sphere bundle in $\mathcal{U}(\mathbb{L})$ that is disjoint from $L_{1,1}$. This yields a family of foliations $\mathcal{F}_t$ of $S^2 \times S^2 \smallsetminus L_{1,1}$. Since the planes of the broken leaves of $\mathcal{F}_0$ have minimal area they persist under the deformation to yield the planes of the broken leaves of $\mathcal{F}_t$. This yields a family of maps $p_t \colon S^2 \times S^2 \to S_{\infty}$. \begin{lemma} The sets $p_t(\mathbb{L})$ in $S_{\infty}$ converge in the Hausdorff topology to a circle $C_{\infty} \in S_{\infty}$ as $t \to \infty $. \end{lemma} \begin{proof} Let $J_{\infty}$ be the limiting almost complex structure which is fully stretched along $\mathbb{L}$. The circle $C_{\infty}$ is the intersection with $S_{\infty}$ of the broken leaves of the $J_{\infty}$ foliation which are asymptotic to $\mathbb{L}$. Now, $p_t(\mathbb{L})$ consists of the intersection with $S_{\infty}$ of $J_t$ holomorphic spheres which intersect $\mathbb{L}$. Hence a sequence of points $z_t \in p_t(\mathbb{L})$ corresponds to a sequence of $J_t$ holomorphic curves in the class $(0,1)$ which all intersect $\mathbb{L}$. Up to taking a subsequence, this sequence of curves converges to a broken curve asymptotic to $\mathbb{L}$ and hence the $z_t$ converge to a point in $C_{\infty}$. \end{proof} \begin{lemma} If we denote the projection with respect to the fully stretched almost-complex structure by $p_{\infty}$, then $C_{\infty} = p_{\infty}(\mathbb{L})$ is disjoint from $p_{\infty}(L_{1,1})$. \end{lemma} \begin{proof} This follows from the fact that the original planes of the broken leaves have area $1$ and so cannot degenerate further. Indeed, since $\mathbb{L}$ is monotone, any holomorphic curve asymptotic to $\mathbb{L}$ must have integral area, and in particular curves in the class $(0,1)$ cannot converge to buildings with more than two top level curves. \end{proof} It follows from the results above that there is an $N>0$ such that $p_t(L_{1,1})$ is disjoint from $C_{\infty}$ for all $t \geq N$. With this we can choose two continuous curves $\gamma_0,\, \gamma_{\infty} \colon [0, \infty) \to S_{\infty}$ with the following properties: \begin{itemize} \item $\gamma_0(0) = p_0(T_0)$, and $\gamma_{\infty}(0) = p_0(T_{\infty})$. \item $\gamma_0(t)$ and $\gamma_{\infty}(t)$ are disjoint from $p_t(L_{1,1})$ for all $t \in [0,\infty).$ \item For some $N>0$, both $\gamma_0(t)$ and $\gamma_{\infty}(t)$ are disjoint from $C_{\infty}$, and $C_{\infty}$ is disjoint from $p_t(L_{1,1})$ for all $t \geq N$. \item $C_{\infty}$ separates $\gamma_0(N)$ and $\gamma_{\infty}(N)$ in $S_{\infty}$. \end{itemize} For each $t \in [0, \infty)$, both $p_t^{-1}(\gamma_0(t))$ and $p_t^{-1}(\gamma_{\infty}(t))$ are $J_t$-holomorphic spheres in the class $(0,1)$ disjoint from $L_{1,1}$. The family of spheres $$\{p_t^{-1}(\gamma_0(t))\}_{t \in [0,N]}$$ forms a symplectic isotopy which displaces $T_0$ from $\mathbb{L}$ in the complement of $L_{1,1}$. Similarly, the family of spheres $$\{p_t^{-1}(\gamma_{\infty}(t))\}_{t \in [0,N]}$$ forms a symplectic isotopy which displaces $T_{\infty}$ from $\mathbb{L}$ in the complement of $L_{1,1}$. Moreover, these isotopies can be generated by a single Hamiltonian flow on $S^2 \times S^2$ that fixes $L_{1,1}$. The inverse flow displaces $\mathbb{L}$ from $T_0 \cup T_{\infty}$. The final separation condition is enough to guarantee the homotopy condition in the theorem. By considering also the $J_t$ holomorphic foliation in the class $(1,0)$ (see Remark \ref{both}), we can displace $\mathbb{L}$ from $S_0 \cup S_{\infty}$ too. After adjusting the isotopy of $S_0 \cup S_{\infty}$ we may assume that it fixes $T_0 \cup T_{\infty}$, see Corollary 3.7 of \cite{rgi}. Hence the inverse flow will not reintroduce intersections with $T_0$ or $T_{\infty}$. \end{proof} \subsection{Refinement 3: We may assume that $\mathbb{L}$ is homologically trivial in $(S^2 \times S^2) \smallsetminus(S_0 \cup S_{\infty} \cup T_0 \cup T_{\infty})$}\label{claim3} To see this, note that $(S^2 \times S^2) \smallsetminus(S_0 \cup S_{\infty} \cup T_0 \cup T_{\infty})$ can be identified with a subset of the cotangent bundle of $\mathbb{T}^2$ in which $L_{1,1}$ is identified with the zero section. In this setting we can invoke the following. \begin{theorem}\label{hom}(Theorem 7.1, \cite{rgi}) A homologically nontrivial Lagrangian torus $L$ in $(T^*\mathbb{T}^2, d\lambda)$ is Hamiltonian isotopic to a constant section. In particular if $L$ is exact then it is Hamiltonian isotopic to the zero section. \end{theorem} If our monotone Lagrangian $\mathbb{L}$ was homologically nontrivial in $(S^2 \times S^2) \smallsetminus(S_0 \cup S_{\infty} \cup T_0 \cup T_{\infty})$ it would then follow from Theorem \ref{hom} and Section $2.3.B_4''$ of \cite{gr} that $\mathbb{L} \cap L_{1,1} \neq \emptyset$, which would contradict our original assumption. \subsection{A path to the proof of Theorem \ref{one}} By the three Refinements established above, it suffices to show that the following assumption is false. \medskip \noindent{\bf Assumption 2.} There is a monotone Lagrangian torus $\mathbb{L}$ in the set $$Y= (S^2 \times S^2) \smallsetminus(S_0 \cup S_{\infty} \cup T_0 \cup T_{\infty})$$ which is disjoint from the Clifford torus $L_{1,1}$ and is homologically trivial in $Y$. \medskip \noindent{\bf A path to a contradiction.} To obtain a contradiction to Assumption 2, we will show, using a sequence of blow-ups, inflations and blow-downs, that it implies the existence of two disjoint monotone Lagrangian tori in a new (monotone) copy of $S^2 \times S^2$ which are both Hamiltonian isotopic to the Clifford torus therein, and hence can not be disjoint. To perform the necessary sequence of blow-ups, inflations and blow-downs, we must first establish the existence of a special collection symplectic spheres and disks in our current model (see Proposition \ref{intcount} below). These spheres and discs must be well-placed with respect to a holomorphic foliation of $S^2 \times S^2 \smallsetminus (\mathbb{L} \cup L_{1,1})$ which we introduce below in Section \ref{double}. They are obtained from special holomorphic buildings whose existence we establish in Section \ref{FG}. These existence results rely on the analysis of a general stretching scenario that is contained in Section \ref{stretch}. \begin{remark} To falsify Assumption 2, we must use it to a build and analyze a complicated set of secondary objects in order to derive a contradiction. The reader is asked to bear in mind that many of the results established in the remainder of this section hold in a setting which will later be shown to be impossible. \end{remark} \subsection{Straightened holomorphic foliations of $S^2 \times S^2 \smallsetminus (\mathbb{L} \cup L_{1,1})$, under Assumption 2}\label{double} Let $\mathbb{L}$ be a Lagrangian torus as in Assumption 2. Here we describe the holomorphic foliations of $S^2 \times S^2\smallsetminus (\mathbb{L} \cup L_{1,1})$ that are implied by the existence of $\mathbb{L}$. Let $\psi$ be a parameterization of $\mathbb{L}$ and $\psi_{1,1} $ be the standard parameterization of $L_{1,1}$. Consider a tame almost complex structure $J$ on $$(S^2 \times S^2 \smallsetminus (\mathbb{L} \cup L_{1,1}), \pi_1^*\omega + \pi_2^*\omega)$$ which is adapted to both $\psi$ and $\psi_{1,1}$. We will always make the following assumption. \begin{enumerate} \item [(A1)] $J$ is equal to the standard split complex structure near $S_0$, $S_{\infty}$, $T_0$ and $T_{\infty}$. In particular, $T_0$ and $T_{\infty}$ are unbroken leaves of the foliation. \end{enumerate} Let $J_{\tau}$ be the family of almost complex structures on $S^2 \times S^2$ that are determined by $J$ as in \S 2.5 of \cite{rgi}. Taking the limit of the Gromov foliations in the class $(0,1)$ with respect to the $J_{\tau}$, as $\tau \to \infty$, and arguing as in \cite{rgi}, we get a $J$-holomorphic foliation $$\mathcal{F}= \mathcal{F}(\mathbb{L},L_{1,1}, \psi,\psi_{1,1}, J)$$ of $S^2 \times S^2\smallsetminus (\mathbb{L} \cup L_{1,1})$. Each leaf of $\mathcal{F}$ still intersects $S_{\infty}$ in exactly one point, but there are now three types of leaves. The first are unbroken leaves consisting of a single closed $J$-holomorphic sphere in $S^2 \times S^2\smallsetminus (\mathbb{L} \cup L_{1,1})$ of class $(0,1)$. The second type of leaves are broken and consist of a pair of finite energy $J$-holomorphic planes in $S^2 \times S^2\smallsetminus (\mathbb{L} \cup L_{1,1})$ that are asymptotic to $L_{1,1}$ along the same embedded geodesic with opposite orientations. As in Example \ref{cliff}, the collection of planes like this which intersect $S_{\infty}$ comprise a $1$-dimensional family, $\frak s_{\infty}$, and their companion planes comprise a family $\frak s_{0}$. The third class of leaves are also broken, but consist of a pair of finite energy $J$-holomorphic planes in $S^2 \times S^2\smallsetminus (\mathbb{L} \cup L_{1,1})$ asymptotic to $\mathbb{L}$. They too have matching ends. The planes of these broken leaves which intersect $S_{\infty}$ comprise the family $\mathcal{T}_{\infty}$ and the others comprise the family $\mathcal{T}_{0}$, as in \S \ref{solid}. Refinement 3 has the following consequence. \begin{lemma}\label{tauzero} The planes in $\mathcal{T}_{\infty}$ intersect both $S_0$ and $S_{\infty}$. Equivalently, the planes in $\mathcal{T}_{0}$ are disjoint from $S_0 \cup S_{\infty}$. \end{lemma} \begin{proof} We define a relative homology class $\Sigma \in H_2(S^2 \times S^2, (S_0 \cup S_{\infty} \cup T_0 \cup T_{\infty})$ by first choosing an embedded path $\gamma:[0,1] \to S_{\infty}$ with $\gamma(0) = T_0 \cap S_{\infty}$ and $\gamma(1) = T_{\infty} \cap S_{\infty}$. Then choose a family of embedded paths $\sigma_t$ in $p^{-1}(\gamma(t))$ from $S_{\infty}$ to $S_0$. The union of the $\sigma_t$ define $\Sigma$. We may assume that $\gamma$ intersects $p((\mathbb{L})$ in a single point $\gamma(t_0)$, and, arguing by contradiction, if $\mathcal{T}_{0}$ happened to intersect $S_0$ then $\sigma_t$ would intersect $\mathbb{L}$, giving a nontrivial intersection $\Sigma \bullet \mathbb{L}$. This contradicts Refinement 3. \end{proof} Note that there are now two foliation classes, $\beta_{\mathbb{L}}$ and $\beta_{L_{1,1}}$, determined by each of the two classes of broken leaves. The foliation $\mathcal{F}$ also defines a projection map $$ p \colon S^2 \times S^2 \to S_{\infty}. $$ In this setting, $p(L_{1,1})$ and $p(\mathbb{L})$ are disjoint embedded circles in $S_{\infty}$, which by Proposition \ref{move2} are disjoint from $T_0 \cup T_{\infty}$ and are homotopic in the complement. Therefore, without loss of generality, there are disjoint closed disks $A_0 \subset S_{\infty}$ with boundary $p(\mathbb{L})$ and $A_{\infty} \subset S_{\infty}$ with boundary $p(L_{1,1})$, such that $p(T_0) \in A_0$ and $p(T_{\infty}) \in A_{\infty}$. Denote the closed annulus defined by the closure of $S_{\infty}\smallsetminus (A_0 \cup A_{\infty})$ by $B$. Let $(P_1, P_2, Q_1, Q_2)$ be coordinates in the neighborhood $\mathcal{U}({\mathbb{L}})$ of $\mathbb{L}$ determined by $\psi$, and let $(p_1, p_2, q_1, q_2)$ be coordinates in the neighborhood $\mathcal{U}(L_{1,1})$ of $L_{1,1}$ determined by $\psi_{1,1}$. As in Lemma \ref{straight}, where we had only one Lagrangian torus, we may assume that the leaves of $\mathcal{F}$ are straight in both $\mathcal{U}({\mathbb{L}})$ and $\mathcal{U}(L_{1,1})$. In particular, we may assume that the unbroken leaves of $\mathcal{F}$ that intersect $\mathcal{U}(\mathbb{L})$ do so along the annuli $\{P_1=\delta \neq 0, Q_1 = \theta, |P_2|<\epsilon\}$, the planes of $\mathcal{T}_{\infty}$ intersect $\mathcal{U}(\mathbb{L})$ along the annuli $\{P_1=0, Q_1 = \theta, 0<P_2<\epsilon\}$, and the planes of $\mathcal{T}_0$ intersect $\mathcal{U}(\mathbb{L})$ along the annuli $\{P_1=0, Q_1 = \theta, -\epsilon<P_2<0\}$. Similarly, we may assume that the unbroken leaves of $\mathcal{F}$ that intersect $\mathcal{U}(L_{1,1})$ do so along the annuli $\{p_1=\delta \neq 0, q_1 = \theta, |p_2|<\epsilon\}$, the planes of $\frak{s}_{\infty}$ intersect $\mathcal{U}(L_{1,1})$ along the annuli $\{p_1=0, q_1 = \theta, 0<p_2<\epsilon\}$, and the planes of $\frak{s}_0$ intersect $\mathcal{U}(L_{1,1})$ along the annuli $\{p_1=0, q_1 = \theta, -\epsilon<p_2<0\}$. The map $p$ can also be described simply in these Weinstein neighborhoods. In $\mathcal{U}(\mathbb{L})$, we may assume that the region $\{P_1<0\} \subset \mathcal{U}(\mathbb{L})$ is mapped by $p$ into the interior of $A_0$, and $\{P_1>0\} \subset \mathcal{U}(\mathbb{L})$ is mapped by $p$ into the interior of $B$. Similarly, we may assume that in $\mathcal{U}(L_{1,1})$ the region $\{p_1>0\} \subset \mathcal{U} (L_{1,1})$ is mapped by $p$ into the interior of $A_{\infty}$ and $\{p_1<0\} \subset \mathcal{U}(L_{1,1})$ is mapped by $p$ into the interior of $B$. Using some of the freedoms available in the choice of $\psi$ and $\psi_{1,1} $, we can add the following additional assumption. \begin{enumerate} \item [(A2)] The foliation class $\beta_{\mathbb{L}}$ is equal to $(0,-1)\in H_1^{\psi}(\mathbb{L};{\mathbb Z})$, and the foliation class $\beta_{L_{1,1}}$ is equal to $(0,-1)\in H_1^{\psi_{1,1}}(L_{1,1};{\mathbb Z})$. \end{enumerate} \subsection{Stretching scenario for class $(1,d)$, under Assumption 2.}\label{stretch} Let $J_{\tau}$, for $\tau \geq 0$, be the family of almost complex structures on $S^2 \times S^2$ used in Section \ref{double} to obtain the foliation $\mathcal{F}$. For a sequence $\tau_k \to \infty$, let $u_{k,d} \colon S^2 \to S^2 \times S^2$ be a sequence of $J_{\tau_k}$-holomorphic curves in the class $(1,d)$ that converges to a holomorphic building $\mathbf{F}_d$ as in \cite{behwz}. The limit $\mathbf{F}_d$ consists of genus zero holomorphic curves in three levels. The {\em top level} curves map to $S^2 \times S^2 \smallsetminus (\mathbb{L} \cup L_{1,1})$ and are $J$-holomorphic. The {\em middle level} curves map to one of two copies of ${\mathbb R} \times S^*\mathbb{T}^2$, the symplectization of the unit cotangent bundle of the flat torus. These copies correspond to $\mathbb{L}$ and $L_{1,1}$ and the identifications are defined by the parameterizations $\psi$ and $\psi_{1,1}$. It follows from the definition of the family $J_{\tau}$ that these middle level curves are all $J_{\mathrm{cyl}}$-holomorphic where $J_{\mathrm{cyl}}$ is a fixed cylindrical almost complex structure. Similarly, the {\em bottom level} curves of the limiting building map to one of two copies of $T^*\mathbb{T}^2$ and are $J_{\mathrm{std}}$-holomorphic where $J_{\mathrm{std}}$ is a standard complex structure. Each top level curve of $\mathbf{F}_d$ can be compactified to yield a map from a surface of genus zero with boundary to $(S^2 \times S^2,\mathbb{L} \cup L_{1,1})$. The components of the boundary correspond to the negative punctures of the curve. They are mapped to the closed geodesics on $\mathbb{L}$ or $L_{1,1}$ underlying the Reeb orbits to which the corresponding puncture is asymptotic. The middle and bottom level curves can be compactified to yield maps to either $\mathbb{L}$ or $L_{1,1}$ with the same type of boundary conditions. These compactified maps can all be glued together to form a map $\bar{\mathbf{F}}_d\colon S^2 \to S^2 \times S^2$ in the class $(1,d)$. \medskip \begin{definition}\label{sp} A $J$-holomorphic curve $u$ in $S^2\times S^2 \smallsetminus (\mathbb{L} \cup L_{1,1})$ is said to be {\em essential (with respect to the foliation $\mathcal{F}$)} if the map $p \circ u$ is injective. \end{definition} \begin{definition}\label{ft} Let $u$ be a $J$-holomorphic curve in $S^2\times S^2 \smallsetminus (\mathbb{L} \cup L_{1,1})$. A puncture of $u$ is said to be of {\em foliation type with respect to} $\mathbb{L}$ $(L_{1,1})$ if it is asymptotic to a closed Reeb orbit which lies on the copy of $S^*\mathbb{T}^2$ that corresponds to $\mathbb{L}$ ($L_{1,1}$) and covers a closed geodesic in an integer multiple of the foliation class $\beta_{\mathbb{L}}$ ($\beta_{L_{1,1}}$). The puncture is of {\em positive (negative) foliation type} if this integer is positive (negative). \end{definition} \begin{lemma}\label{ends} Let $u$ be a $J$-holomorphic curve in $S^2\times S^2 \smallsetminus (\mathbb{L} \cup L_{1,1})$ with a puncture. Let $\{c_l\}$ be a sequence of circles in the domain of $u$ which lie in a standard neighborhood of the puncture, wind once around it, and converge to it in the Hausdorff topology. If the puncture is of foliation type with respect to $\mathbb{L}$ $(L_{1,1})$, then the sets $p(u (c_l))$ converge to a point on $p(\mathbb{L})$ $(p(L_{1,1}))$. Moreover each $p(u (c_l))$ either maps into the point (in which case $u$ covers a plane in a broken leaf) or it winds nontrivially around the point. If the puncture is not of foliation type then the sets $p(u (c_l))$ converge to $p(\mathbb{L})$ $(p(L_{1,1}))$. \end{lemma} \begin{proof} This follows from the exponential convergence theorem from \cite{hofa}. \end{proof} \begin{corollary}\label{possible ends} If $u$ is an essential $J$-holomorphic curve in $S^2\times S^2 \smallsetminus (\mathbb{L} \cup L_{1,1})$, then its punctures on $\mathbb{L}$ are either all of foliation type or none of them are, and similarly for the punctures on $L_{1,1}$. If $u$ has no punctures of foliation type, then it is either a J-holomorphic plane or cylinder. If $u$ is a plane, then the closure of the image of $p \circ u$ is $A_0$ or $A_{\infty}$ or the closure of their complements in $S_{\infty}$. If $u$ is a cylinder, then the closure of the image of $p \circ u$ is $B$. \end{corollary} \begin{proof} The previous lemma implies that if $u$ has punctures of both foliation type and not of foliation type on $\mathbb{L}$ or $L_{1,1}$ then $p \circ u$ will not be injective. \end{proof} The following result can be proved in the same way as Lemma 6.2 in \cite{hl}. \begin{lemma}\label{allsame} Let $u$ be an essential curve whose punctures on $\mathbb{L}$ are all of foliation type. Then these punctures are either all positive or all negative (see Definition \ref{ft}). The same holds for the punctures on $L_{1,1}$. \end{lemma} Let $u_{k,d}$ be a sequence converging to $\mathbf{F}_d$ as in the {\bf stretching scenario for class $(1,d)$.} The fact that the curves $u_{k,d}$ must intersect each leaf of $\mathcal{F}$ exactly once, imposes several important restrictions on $\mathbf{ F}_d$ in relation to the foliation $\mathcal{F}$, and allows us to identify a handful of possible limit structures. \begin{proposition}\label{lem1} Let $\mathbf{ F}_d$ be a limit as in the {\bf stretching scenario for class $(1,d)$}. Then the building $\mathbf{ F}_d$ is of one of the following types: \bigskip \noindent {\bf Type 0.} $\mathbf{ F}_d$ is a (possibly nodal) $J$-holomorphic sphere in $S^2 \times S^2 \smallsetminus (\mathbb{L} \cup L_{1,1})$ in the class $(1,d)$, where one (essential) sphere lies in the class $(1,j)$ for some $1 \le j \le d$ and any remaining curves are either spheres covering unbroken leaves of the foliation, or pairs of planes covering broken leaves of the foliation. \bigskip \noindent {\bf Type 1.} $\mathbf{ F}_d$ has a unique essential curve $u_d$. The punctures of $u_d$ are all of foliation type, and along $\mathbb{L}$, and also $L_{1,1}$, are either all positive or all negative. The image of $p\circ u_d$ is $S_{\infty}$ minus finitely many points on $p(\mathbb{L}) \cup p(L_{1,1})$. The other top level curves of $\mathbf{ F}_d$ either cover unbroken leaves of the foliation, or they are $J$-holomorphic planes covering one of the planes of a broken leaf of the foliation. \bigskip \noindent {\bf Type 2a.} $\mathbf{ F}_d$ has exactly two essential curves, $u_{\mathbb{L}}$ and $\underline{u}$. The closures of the images of the maps $p \circ u_{\mathbb{L}}$ and $p \circ \underline{u}$ are $A_0$ and $B \cup A_{\infty}$, respectively. Any punctures of $\underline{u}$ on $L_{1,1}$ are all of foliation type and are either all positive or all negative. The other top level curves of $\mathbf{ F}_d$ cover (broken or unbroken) leaves of $\mathcal{F}$. \bigskip \noindent {\bf Type 2b.} $\mathbf{ F}_d$ has exactly two essential curves, $\underline{u}$ and $u_{L_{1,1}}$. The closures of the images of the maps $p \circ \underline{u}$ and $p \circ u_{L_{1,1}}$ are $A_0 \cup B$ and $A_{\infty}$, respectively. Any punctures of $\underline{u}$ on $\mathbb{L}$ are all of foliation type and are either all positive or all negative. The other top level curves of $\mathbf{ F}_d$ cover (broken or unbroken) leaves of $\mathcal{F}$. \bigskip \noindent {\bf Type 3.} $\mathbf{ F}_d$ has exactly three essential curves, $u_{\mathbb{L}}$, $\underline{u}$, and $u_{L_{1,1}}$. The closures of the images of the maps $u_{\mathbb{L}}$, $\underline{u}$, and $u_{L_{1,1}}$ are $A_0$, $B$ and $A_{\infty}$, respectively. The other top level curves of $\mathbf{ F}_d$ again cover (broken or unbroken) leaves of $\mathcal{F}$. \end{proposition} \medskip \noindent{\bf Proof of Proposition \ref{lem1}.} We begin with the following result which allows us to use essential curves to sort the limit structures. \begin{lemma}\label{lem0} Let $\mathbf{ F}_d$ be a limit as in the {\bf stretching scenario for class $(1,d)$}. If $u$ is a top level curve of $\mathbf{ F}_d$, then it is either essential or else the image of $p \circ u$ is a point. The essential curves have disjoint images under $p$, which are open sets, and these images include the complement of $p(\mathbb{L}) \cup p(L_{1,1})$. \end{lemma} \begin{proof} Recall that the curves of $\mathbf{F}_d$ can be compactified and glued together to form a map $\bar{\mathbf{F}}_d\colon S^2 \to S^2 \times S^2$ in the class $(1,d)$. The intersections of $\bar{\mathbf{F}}_d$ with an unbroken leaf $T$ of $\mathcal{F}$ all correspond to intersections of top level curves of $\mathbf{ F}_d$ with $T$. Since $(1,d) \bullet T = (1,d) \bullet (0,1) =1$, there can only be one such intersection point, by positivity of intersection. If $u$ is a top level curve such that the map $p \circ u$ is constant, then $u$ covers part of a broken leaf of our foliation and has intersection number $0$ with all unbroken leaves. Assume then that $u$ is a top level curve such that $p \circ u$ is nonconstant. By the discussion above, $u$ intersects any unbroken leaf $T$ either once or not at all, and if $p \circ u$ has any double points then they must lie in $p(\mathbb{L}) \cup p(L_{1,1})$. Positivity of intersection again implies that the nonconstant map $p \circ u$ is an open mapping and this implies that the double points of $p \circ u$ form an open set. We conclude that $u$ is essential. To see that the essential curves have disjoint images under $p$ we can apply the same argument to a union $u \cup v$. The intersection number also implies that all unbroken fibers intersect at least one essential curve. \end{proof} Lemma \ref{lem0} implies that there is an essential curve $u$ of $\mathbf{ F}_d$ that intersects $T_0$. The closure of the image of $p \circ u$ must contain $A_0$. By Corollary \ref{possible ends} the following cases are exhaustive. \medskip \noindent{\em Case 1: $u$ has no punctures.} In this case, $p \circ u$ must be a bijection onto $S_\infty$. Hence, $u$ is a $J$-holomorphic sphere in a class of the form $(1,j)$ for $j$ in $[0,d]$. By Lemma \ref{lem0} all the other top level curves of $\mathbf{ F}_d$ must cover leaves of the foliation. The top level curves of $\mathbf{ F}_d$ which cover fibres fit together to form a possibly disconnected curve in the class $(0,d-j)$. If $j=d$ then $\mathbf{ F}_d$ consists only of the curve $u$. Either way, the building is of Type 0. \medskip \noindent{\em Case 2: $u$ has punctures and they are all of foliation type.} In this case we claim that $\mathbf{ F}_d$ is of Type 1. By Lemma \ref{ends}, the image of the map $p \circ u$ includes points in each component of the complement of $p(\mathbb{L}) \cup p(L_{1,1})$, and so by Lemma \ref{lem0} we have that $p \circ u$ is a bijection onto $S_\infty$ minus a finite set of points on $p(\mathbb{L}) \cup p(L_{1,1})$. The other top level curves of $\mathbf{ F}_d$ must either cover unbroken leaves of $\mathcal{F}$ or they are $J$-holomorphic planes covering one of the planes of a broken leaf of $\mathcal{F}$. The statement about positivity or negativity of punctures is Lemma \ref{allsame}. \medskip \noindent{\em Case 3: $u$ has one puncture not of foliation type.} Since $u$ intersects the leaf $T_0$, the closure of the image of $p \circ u$ is either $A_0$ or $A_0 \cup B$. In either case, $u$ does not intersect $T_{\infty}$. Suppose that the closure of the image of $p \circ u$ is $A_0$. By Lemma \ref{lem0}, there is an essential curve $v$ of $\mathbf{ F}_d$ that intersects $T_{\infty}$, and the images of $p \circ u$ and $p \circ v$ cannot intersect. Hence the closure of the image of $p \circ v$ is either $A_{\infty}$ or $B \cup A_{\infty}$. In the first case, $\mathbf{ F}_d$ is of Type 3 with $u_{\mathbb{L}}=v$ and $u_{L_{1,1}}=u$, where the third curve, $\underline{u}$, exists by Lemma \ref{lem0}. In the second case, $\mathbf{ F}_d$ is of Type 2a with $u^0_{\mathbb{L}}=u$ and $u^{\infty}_{\mathbb{L}} =v$. If, instead, the closure of the image of $p \circ u$ is $A_0 \cup B$, then a similar argument implies that $\mathbf{ F}_d$ is of Type 2b. \medskip \noindent This completes the proof of Proposition \ref{lem1}. \medskip \subsection{The existence of special buildings, under Assumption 2.}\label{FG} In this section we will establish the existence of two special limits of the {\bf stretching scenario for class $(1,d)$} when $d$ is sufficiently large. The following result will be used to exploit the large $d$ limit. \begin{lemma}\label{mono} There exists an $\epsilon >0$ such that $$\mathrm{area}(u) \ge \epsilon u \bullet (S_0 \cup S_{\infty})$$ for all $J$-holomorphic curves $u$ in $S^2\times S^2 \smallsetminus (\mathbb{L} \cup L_{1,1})$. \end{lemma} \begin{proof} Fix an open neighborhood of $S_{\infty}$ of the form $\mathcal{N}_{\epsilon}=S_{\infty} \times D^2(\epsilon)$ where $D^2(\epsilon)$ is the open disc of area $\epsilon$. We may assume that the closure of $\mathcal{N}_{\epsilon}$ is disjoint from $\mathbb{L} \cup L_{1,1}$ and, by (A1), we may assume that $J$ restricts to $\mathcal{N}_{\epsilon}$ as the standard split complex structure. Let $\pi_2 \colon S_{\infty} \times D^2(\epsilon) \to D^2(\epsilon)$ be projection and set $$u_{\epsilon,\infty} = u |_{u^{-1}(\mathcal{N}_{\epsilon})}.$$ By perturbing $\epsilon$ if needed we may assume that $u^{-1}(\mathcal{N}_{\epsilon})$ is a smooth manifold. We have $$ \mathrm{degree}(\pi_2 \circ u_{\epsilon,d}) = u \bullet S_{\infty}. $$ This implies \begin{eqnarray*} \mathrm{area}(u_{\epsilon,\infty}) & \geq & \int_{u^{-1}(\mathcal{N}_{\epsilon})} u_{\epsilon,\infty}^*(\omega\oplus \omega) \\ {} & \geq & \int_{(\pi_2 \circ u_{\epsilon,\infty})^{-1}(D^2(\epsilon))} (\pi_2 \circ u_{\epsilon,\infty})^*\omega \\ {} & = & \left(\int_{D^2(\epsilon)} \omega \right) u \bullet S_{\infty}\\ {} & = & \epsilon u \bullet S_{\infty}. \end{eqnarray*} A similar calculation for $S_0$ gives the result. \end{proof} \begin{proposition}\label{existence2} For all sufficiently large $d$, there exists a limiting building $\mathbf{ F}$ as in the {\bf stretching scenario for class $(1,d)$} such that $\mathbf{ F}$ is of Type 3. The building consists of its three essential top level curves, $u_{\mathbb{L}}$, $\underline{u}$, and $u_{L_{1,1}}$, together with $d-1$ planes in $\mathcal{T}_{0} \cup \mathcal{T}_{\infty}$ and $d$ planes in $\frak{s}_{0} \cup \frak{s}_{\infty}$. \end{proposition} \begin{proof} Fix $d+1$ points on $L_{1,1}$ and $d$ points on $\mathbb{L}$. Let $J_{\tau}$, for $\tau \geq 0$, be the family of almost complex structures on $S^2 \times S^2$ from Section \ref{double} and for a sequence $\tau_k \to \infty$, let $u_{k} \colon S^2 \to S^2 \times S^2$ be a convergent sequence of $J_{\tau_k}$-holomorphic curves in the class $(1,d)$ that pass through the $2d+1$ constraint points. Their limit, $\mathbf{F}$, is the desired building. To see this we first note that the point constraints already preclude the possibility that $\mathbf{ F}$ is of Type 0. If $F$ had Type $1$, the point constraints on $L_{1,1}$ would imply, by Lemma \ref{allsame}, that $\mathbf{ F}$ must contain $d+1$ planes either all in $\frak{s}_{0}$ or all in $\frak{s}_{\infty}$. This contradicts the fact that the our curve has intersection number $d$ with $S_0$ and $S_{\infty}$. The same argument precludes the possibility that $\mathbf{ F}$ has Type 2a. It remains to show that $\mathbf{ F}$ does not have Type 2b. Assuming that $\mathbf{ F}$ has Type 2b, we will show that it must include a collection of curves of total area equal to two, that intersect $S_0 \cup S_{\infty}$ $d$ times. If $d$ is sufficiently large, this contradicts Lemma \ref{mono} above. \medskip \noindent{\bf Claim 1.} If $\mathbf{ F}$ has Type 2b, then it includes at least $d$ planes in $\frak{s}_{0} \cup \frak{s}_{\infty}$. \medskip To see this consider the subbuilding $\mathbf{F}_{1,1}$ of $\mathbf{ F}$ consisting of its middle and bottom level curves mapping to the copies of ${\mathbb R} \times S^*\mathbb{T}^2$ and $T^*\mathbb{T}^2$ that correspond to $L_{1,1}$. Since it is connected and has genus zero, it follows from Proposition 3.3. of \cite{hl} that $$ \mathrm{index}(\mathbf{F}_{1,1}) =2(s-1), $$ where $s$ is the number of positive ends of $\mathbf{F}_{1,1}$. Since, $\mathbf{F}_{1,1}$ passes through the $d+1$ generic point constraints on $L_{1,1}$, and the Fredholm index in these manifolds is nondecreasing under multiple covers, we must also have $$ \mathrm{index}(\mathbf{F}_{1,1}) \geq 2(d+1). $$ Hence, $\mathbf{F}_{1,1}$ has at least $d+2$ positive ends. Under the assumption that $\mathbf{ F}$ has Type 2b, two of these positive ends match with the two essential top level curves of $\mathbf{ F}$. This leaves at least $d$ positive ends of $\mathbf{F}_{1,1}$ that match with top level curves of $\mathbf{ F}$ that cover planes in $\frak{s}_{0} \cup \frak{s}_{\infty}$. \begin{remark} The same argument implies that if $\mathbf{ F}$ has Type 3 then again it must include at least $d$ planes in $\frak{s}_{0} \cup \frak{s}_{\infty}$. \end{remark} \medskip \noindent{\bf Claim 2.} If $\mathbf{ F}$ has Type 2b, then it includes $d$ planes in $\mathcal{T}_{0}$ and none in $\mathcal{T}_{\infty}$. \medskip By Lemma \ref{allsame} the $d$ constraint points on $\mathbb{L}$ imply that, if $\mathbf{F}$ is of Type 2b, then it must contain $d$ planes either all in $\mathcal{T}_{0}$ or all in $\mathcal{T}_{\infty}$. To show that these planes can not be in $\mathcal{T}_{\infty}$, we consider intersections with $S_0 \cup S_{\infty}$. Overall, the top level curves of $\mathbf{ F}$ must intersect $S_0 \cup S_{\infty}$ exactly $2d$ times. The planes of $\mathbf{ F}$ asymptotic to $L_{1,1}$ from Claim 1, account for at least $d$ of these intersections. Since $\mathbb{L}$ is homologically trivial in $Y$, by Lemma \ref{tauzero} each plane of $\mathcal{T}_{\infty}$ must intersect both $S_0$ and $S_{\infty}$, while the planes in $\mathcal{T}_{0}$ intersect neither of these spheres. If the $d$ planes of $\mathbf{F}$ asymptotic to $\mathbb{L}$ are in $\mathcal{T}_{\infty}$ then they would contribute another $2d$ intersections with $S_0 \cup S_{\infty}$. By positivity of intersection, this can not happen and so these planes must belong to $\mathcal{T}_{0}$ as claimed. \medskip To complete the argument, we now balance areas. The total area of all the curves in $\mathbf{F}$ is $2(d+1)$. If $\mathbf{ F}$ has Type 2b, then the planes from Claim 1 and Claim 2 have total area at least $2d$. It's essential curves must then have total area equal to 2. Also, they must contribute the remaining $d$ intersections with $S_0 \cup S_{\infty}$. It follows from Lemma \ref{mono}, that this is impossible for all $d$ sufficiently large. Hence $\mathbf{ F}$ can not be of Type 2b, and must instead be of Type 3. Arguing as above, it follows that in addition to its three essential top level curves, $\mathbf{ F}$ must then have $d$ planes in $\frak{s}_{0} \cup \frak{s}_{\infty}$ and $d-1$ planes in $\mathcal{T}_{0} \cup \mathcal{T}_{\infty}$. \end{proof} \begin{proposition}\label{existence3} For all sufficiently large $d$, there exists a limiting building $\mathbf{G}$ as in the {\bf stretching scenario for class $(1,d)$} such that $\mathbf{ G}$ Type 3. In addition to its three essential curves it consists of $d$ planes in $\mathcal{T}_{0} \cup \mathcal{T}_{\infty}$ and $d-1$ planes in $\frak{s}_{0} \cup \frak{s}_{\infty}$. \end{proposition} \begin{proof} Here we fix $d$ points on $L_{1,1}$ and $d+1$ points on $\mathbb{L}$, and for $J_{\tau}$ as in Proposition \ref{existence2} consider the limit, $\mathbf{ G}$, of a convergent sequence of $J_{\tau_k}$-holomorphic spheres, for $\tau_k \to \infty$, that represent the class $(1,d)$ and pass through the $2d+1$ constraint points. The point constraints imply that $\mathbf{ G}$ is not of Type 0. If $\mathbf{ G}$ was of Type 1, the point constraints would imply that $\mathbf{ G}$ includes at least $d$ planes in either $\frak{s}_{0}$ or $\frak{s}_{\infty}$, and at least $d+1$ planes in either $\mathcal{T}_{0}$ or $\mathcal{T}_{\infty}$. From this it follows that the essential curve of $\mathbf{ G}$ would have area $1$. Recalling Lemma \ref{tauzero}, since $L$ is homologicially trivial, the planes of $\mathcal{T}_{\infty}$ each intersect $S_0 \cup S_{\infty}$ twice. Arguing as in Claim 2 from the proof of Proposition \ref{existence2}, if the planes asymptotic to $\mathbb{L}$ lie in $\mathcal{T}_{\infty}$ then the broken planes will contribute a total of $d + 2(d+1)$ intersections with $S_0 \cup S_{\infty}$, a contradiction as there are only $2d$ such intersections. On the other hand, if these planes all lie in $\mathcal{T}_{0}$ then the essential curve must contribute $d$ intersections with $S_0 \cup S_{\infty}$. As this essential curve has area $1$ then contradicts Lemma \ref{mono} when $d$ is sufficiently large. Hence, $\mathbf{ G}$ is not of Type 1. Next we show that $\mathbf{ G}$ can not be of Type 2b. Assume that it is. Then $\mathbf{ G}$ includes $d+1$ planes in either $\mathcal{T}_{0}$ or $\mathcal{T}_{\infty}$. Counting intersections as above, $\mathbf{ G}$ must have $d+1$ planes in $\mathcal{T}_{0}$. Arguing as in Claim 1 above, we consider the subbuilding $\mathbf{G}_{1,1}$ of $\mathbf{G}$ consisting of its middle and bottom level curves that map to the copies of ${\mathbb R} \times S^*\mathbb{T}^2$ and $T^*\mathbb{T}^2$ that correspond to $L_{1,1}$. Since $\mathbf{G}_{1,1}$ is connected and has genus zero, we have $$ \mathrm{index}(\mathbf{G}_{1,1}) =2(s-1), $$ where $s$ is the number of positive ends of $\mathbf{G}_{1,1}$. Since, $\mathbf{G}_{1,1}$ passes through the $d$ generic point constraints on $\mathbb{L}$ we also have $$ \mathrm{index}(\mathbf{G}_{1,1}) \geq 2d. $$ Hence, $\mathbf{G}_{1,1}$ has at least $d+1$ positive ends. Two of these positive ends match with negative ends of the two essential curves of $\mathbf{G}_{1,1}$. It follows that $\mathbf{G}$ must have at least $d-1$ planes in $\frak{s}_{0} \cup \frak{s}_{\infty}$. This means the planes covering broken leaves then have area at least $2d$. As the limiting building has total area $2d+2$ and also includes two essential curves we see that the essential curves each have area $1$ and there are exactly $d-1$ planes in $\frak{s}_{0} \cup \frak{s}_{\infty}$. As the planes in $\mathcal{T}_{0}$ are disjoint from $S_0 \cup S_{\infty}$, the essential curves of $\mathbf{ G}$ must have $d+1$ intersections with $S_0 \cup S_{\infty}$. Lemma \ref{mono} again implies that this is impossible for all sufficiently large $d$. Finally we show that $\mathbf{ G}$ can not be of Type 2a. In this case $\mathbf{ G}$ includes $d$ planes in $\mathcal{T}_{0} \cup \mathcal{T}_{\infty}$ and $d$ planes in either $\frak{s}_{0}$ or $\frak{s}_{\infty}$. The planes asymptotic to $L_{1,1}$ thus account for all intersections with either $S_0$ or $S_{\infty}$ and so the planes asymptotic to $\mathbb{L}$ therefore all lie in $\mathcal{T}_{0}$. The essential curves have total area $2$ and must together account for all intersections with either $S_0$ or $S_{\infty}$. This contradicts Lemma \ref{mono} as before. \end{proof} \begin{lemma}\label{area1} All curves in the limiting buildings $\mathbf{ F}$ and $\mathbf{ G}$ that map to $S^2 \times S^2 \smallsetminus (\mathbb{L} \cup L_{1,1} )$ have area $1$, and in particular are simply covered. \end{lemma} \begin{proof} To see this, note that since $\mathbf{ F}$ is of Type 3, it has its three essential curves together with $2d-1$ other top level curves that cover leaves of the foliation. Since $\mathbf{ F}$ has total area $2d+2$ and monotonicity implies that all curves have integral area, the result for $\mathbf{ F}$ follows. The same argument applies to $\mathbf{ G}$. \end{proof} \subsection{A collection of symplectic spheres and disks, under Assumption 2.} Consider $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$ equipped with an almost complex structure $J$ adapted to parameterizations $\psi$ and $\psi_{1,1}$ of $\mathbb{L}$ and $L_{1,1}$, respectively. Recall that for the projection $p : S^2 \times S^2 \to S_{\infty}$, defined by the foliation $\mathcal{F}$ corresponding to $J$, the images $p(\mathbb{L})$ and $p(L_{1,1})$ are disjoint circles. There are also disjoint disks $A_0 \subset S_{\infty}$ with boundary $p(\mathbb{L})$ and $A_{\infty} \subset S_{\infty}$ with boundary $p(L_{1,1})$ such that $p(T_0) \in A_0$ and $p(T_{\infty}) \in A_{\infty}$. In this section we will prove the following result. \begin{proposition}\label{intcount} There exist embedded symplectic spheres $F, G \colon S^2 \to S^2 \times S^2$ in the class $(1,d)$, and embedded symplectic disks $\mathbb{E} \colon (D^2,S^1) \to (S^2 \times S^2,\mathbb{L})$ and $E_{1,1} \colon (D^2,S^1) \to (S^2 \times S^2,L_{1,1})$ of Maslov index $2$, such that: \begin{enumerate} \item $F$, $G$, $\mathbb{E}$ and $E_{1,1}$ are all $J$-holomorphic away from arbitrarily small neighborhoods of a collection of Lagranian tori whose elements are near to, and Lagrangian isotopic to, either $\mathbb{L}$ or $L_{1,1}$;\\ \item the class of $\,\mathbb{E}|_{S^1}$ and the foliation class $\beta_{\mathbb{L}}$ form an integral basis of $H_1(\mathbb{L}:{\mathbb Z})$;\\ \item the class of $E_{1,1}|_{S^1}$ and the foliation class $\beta_{L_{1,1}}$ form an integral basis of $H_1(L_{1,1}:{\mathbb Z})$;\\ \item exactly one of $F$ and $G$ intersects the planes of $\mathcal{T}_{0}$ and the other intersects the planes of $\mathcal{T}_{\infty}$;\\ \item exactly one of $F$ and $G$ intersects the planes of $\frak{s}_{0}$ and the other intersects the planes of $\frak{s}_{\infty}$;\\ \item $F \bullet \mathbb{E} + G \bullet \mathbb{E} =d$;\\ \item $F \bullet E_{1,1} + G \bullet E_{1,1} =d$;\\ \item $F \bullet G =2d$;\\ \item $p(F \cap G)$ consists of $d$ points in $A_0$ and $d$ points in $A_{\infty}$.\\ \end{enumerate} \end{proposition} \noindent{\bf Proof of Proposition \ref{intcount}.} To prove this result we will compactify and deform curves of the buildings $\mathbf{ F}$ and $\mathbf{ G}$ from Propositions \ref{existence2} and \ref{existence3}. There are two deformation processes which are used in order to resolve the intersection patterns of the resulting maps. The fact that the foliation $\mathcal{F}$ is assumed to be straightened near $\mathbb{L}$ and $L_{1,1}$ (see Section \ref{double}) plays a prominent role here. In particular, curves through $\mathcal{U}(\mathbb{L})$ and $\mathcal{U}(L_{1,1})$ are deformed so that many of their intersections can be identified with intersections between cylinders in the local models. \subsubsection{Deformations near $\mathbb{L}$} We begin by describing two deformation processes for curves with ends on $\mathbb{L}$. These appear as Lemma \ref{fukaya} and Corollary \ref{up}, below. Consider the coordinates $(P_1,Q_1,P_2,Q_2)$ in our Weinstein neighborhood $$ \mathcal{U}(\mathbb{L}) = \{|P_1|<\epsilon,\, |P_2|<\epsilon\}. $$ For each {\it translation vector} $\mathbf{ v}=(a,b) \in (-\epsilon, \epsilon)^2$, there is a corresponding nearby Lagrangian torus $$\mathbb{L}(\mathbf{ v}) = \mathbb{L}(a,b) = \{P_1=a,P_2=b\} \subset \mathcal{U}(\mathbb{L}).$$ Note that the parameterization $\psi$ of $\mathbb{L}$ determines an obvious parameterization, $\psi(\mathbf{ v}) =\psi(a,b)$ of $\mathbb{L}(a,b)$, and a canonical isomorphism from $H_1^{\psi}(L; {\mathbb Z})$ to $H_1^{\psi(a,b)}(L(a,b); {\mathbb Z})$. Following \cite{rgi} section 4, given a finite collection of translation vectors, $$ \mathbf{V} =\{\mathbf{v}_1, \dots, \mathbf{v}_k \} =\{(a_1,b_1), \dots, (a_k,b_k)\}, $$ let $J_{\mathbf{V} }$ be an almost complex structure which coincides with $J$ outside $\mathcal{U}(\mathbb{L})$ and inside has the form \begin{equation} \label{J|} J_{\mathbf{V} } \frac{\partial}{\partial Q_i} = - \rho_{\mathbf{V} } \frac{\partial}{\partial P_i}, \end{equation} where $\rho_{\mathbf{V} }$ is a positive function away from the collection of Lagrangians $$ \mathbb{L}(\mathbf{V} )= \cup_{i=1}^k \mathbb{L}(\mathbf{v}_i),$$ and in a neighborhood of each $\mathbb{L}(\mathbf{v}_i)$ has the form $$\rho_{\mathbf{V} } = \sqrt{(P_1-a_i)^2 + (P_2-b_i)^2}.$$ In this case, we say that $J_{\mathbf{V} }$ is stretched along $\mathbb{L}({\mathbf{V} })$. The set of all such almost complex structures will be denoted by $\mathcal{J}_{\mathcal{U}(\mathbb{L})}$. Using the induced parameterizations $\psi(\mathbf{v}_i)$ of each of the $\mathbb{L}(\mathbf{v}_i)$, the almost complex structure $J_{\mathbf{V} }$ defines a family of almost complex structures $J_{\mathbf{V} ,\tau}$ on $S^2 \times S^2$ which allow us to stretch $J$ holomorphic curves in $S^2 \times S^2$ along $\mathbb{L}(\mathbf{V} ) \cup L_{1,1}.$ The limit of the Gromov foliations for the $J_{\mathbf{V} ,\tau}$, in class $(0,1)$, yield a foliation $\mathcal{F}(\mathbf{V} )$ of $$S^2 \times S^2 \smallsetminus (\mathbb{L}(\mathbf{V} ) \cup L_{1,1}).$$ For example, for $\mathbf{V} =\{(0,0)\}$ we have $J_{\mathbf{V} } =J$ and $\mathcal{F}(\mathbf{V} )=\mathcal{F}$. \begin{lemma}\label{folcor} Leaves of the foliation $\mathcal{F}(\mathbf{V} )$ intersect $\mathcal{U}_{\epsilon}(\mathbb{L})$ along the annuli $\{P_1=\delta, Q_1 = \theta, |P_2|< \epsilon \}$. A leaf of $\mathcal{F}(\mathbf{V} )$ that intersects $\mathcal{U}_{\epsilon}(\mathbb{L})$ along the annulus $\{P_1=\delta, Q_1 = \theta, |P_2|< \epsilon \}$ is broken if and only if the collection $\mathbf{V} $ contains an element of the form $(\delta,b_i)$. \end{lemma} \begin{proof} It follows from \eqref{J|} that these annuli are $J_{\mathbf{V} }$-holomorphic. By assuming $J$ satisfies the conclusions of Lemma \ref{straight}, they also extend to $J_{\mathbf{V} }$-holomorphic spheres in the class $(0,1)$. By positivity of intersection, these spheres, and indeed any holomorphic sphere in the class $(0,1)$, are leaves of the foliation $\mathcal{F}(\mathbf{V} )$. \end{proof} Our first deformation process allows us to deform a regular curve so that its ends on $\mathbb{L}$ become ends on a nearby translated Lagrangian. \begin{lemma}\label{fukaya}(Fukaya's Trick) Let $u$ be a regular $J$-holomorphic curve with $k\geq0$ ends on $\mathbb{L}$ and $l\geq 0$ ends on $L_{1,1}$. For all $\mathbf{v}=(a,b)$ with $\|\mathbf{v}\|^2= a^2 + b^2$ sufficiently small there is a regular $J_{\mathbf{v}}$-holomorphic curve $u(\mathbf{v})$ with $k$ ends on $\mathbb{L}(\mathbf{v})$ and $l$ ends on $L_{1,1}$. Moreover the ends of $u(\mathbf{v})$ on $\mathbb{L}(\mathbf{v})$ represent the identical classes in $H_1^{\psi(\mathbf{v})}(L,{\mathbb R})$ as do those of $u$ in $H_1^{\psi}(L,{\mathbb R})$. The classes corresponding to the ends of $u(\mathbf{v})$ on $L_{1,1}$ are also identical to those of $u$. \end{lemma} \begin{proof} For $\|\mathbf{ v}\|$ sufficiently small, the Lagrangian isotopy $t \mapsto \mathbb{L}(t\mathbf{v})$ for $0\le t \le 1$ is contained in $\mathcal{U}(\mathbb{L})$. Let $f_{t,\mathbf{v}}$ be a family of diffeomorphisms of $S^2 \times S^2$ such that: \begin{itemize} \item $f_{0,\mathbf{v}}$ is the identity map, \item $f_{t,\mathbf{v}}(\mathbb{L}) = \mathbb{L}(t\mathbf{v})$ for all $t \in [0,1]$, \item each $f_{t,\mathbf{v}}$ is equal to the identity map outside of $\mathcal{U}(\mathbb{L})$, \item $\|f_{t,\mathbf{v}}\|_{C^1}$ is of order 1 in $\|\mathbf{v}\|$. \end{itemize} As above, the parameterization $\psi$ of $\mathbb{L}$ determines a parameterization $\psi(t\mathbf{v})$ for each $\mathbb{L}(t\mathbf{v})$. Let $J_{t\mathbf{v}}$ be a family of tame almost structures in $\mathcal{J}_{\mathcal{U}(\mathbb{L})}$ such that each $J_{t\mathbf{v}}$ is adapted to $\psi(t\mathbf{v})$. In particular, $J_{t\mathbf{v}}$ is stretched along $\mathbb{L}(t\mathbf{v})$. Set $$\tilde{J}_{t\mathbf{v}} = (f^{-1}_{t,\mathbf{v}})_* J_{t\mathbf{v}}.$$ For $\|\mathbf{v}\|$ sufficiently small, $\tilde{J}_{t\mathbf{v}}$ is a tame almost complex structure on $S^2 \times S^2 \smallsetminus \mathbb{L }\cup L_{1,1}$ for all $t \in [0,1]$. Since $u$ is regular, for sufficiently small $\|\mathbf{v}\|$ the curve $u$ persists to yield a regular $\tilde{J}_{\mathbf{v}}$-holomorphic curve $\tilde{u}( \mathbf{v})$ with the same asymptotic behavior as $u$. By our choice of $\tilde{J}_{t\mathbf{v}}$, $$ u(\mathbf{v})=f_{1,\mathbf{v}}\circ \tilde{u}(\mathbf{v})$$ is then a regular $J_{\mathbf{v}}$-holomorphic curve with $k$ ends on $\mathbb{L}(\mathbf{v})$ instead of $\mathbb{L}$. \end{proof} By Lemma \ref{area1}, one can apply Lemma \ref{fukaya} to all the top level curves curves of the buildings $\mathbf{ F}$ and $\mathbf{ G}$ from Proposition \ref{existence2} and Proposition \ref{existence3}, to obtain new buildings $\mathbf{ F}(\mathbf{v})$ and $\mathbf{G}(\mathbf{v})$. Indeed, Lemma \ref{area1} implies that the top level curves are somewhere injective (and are actually embedded as they are limits of embedded curves) so for a generic choice of $J$ they are regular. Applying Theorem 1 from \cite{we}, we even have the stronger statement that our curves are regular for all $J$. For example, suppose that the top level curves of $\mathbf{ F}$ are \begin{equation*} \label{ } \{u_{\mathbb{L}},\underline{u},u_{L_{1,1}}, u_1, \dots, u_{d-1},\frak{u}_1, \dots, \frak{u}_d\}, \end{equation*} where the $u_i$ belong to $\mathcal{T}_0 \cup \mathcal{T}_{\infty}$ and the $\frak{u}_j$ belong to $\frak{s}_0 \cup \frak{s}_{\infty}$. Then for $\mathbf{ v}=(a,b)$ with $\|\mathbf{v}\|$ sufficiently small we can define the deformed building $\mathbf{ F}(\mathbf{v})$ to be the one whose top level curves are \begin{equation*} \label{ } \{ u_{\mathbb{L}}(\mathbf{v}), \underline{u}(\mathbf{v}),u_{L_{1,1}},u_1(\mathbf{v}), \dots, u_{d-1}(\mathbf{v}),\frak{u}_1, \dots, \frak{u}_d\} \end{equation*} and whose middle and bottom level curves are the same as those of $\mathbf{ F}$ but are now considered to map to copies of ${\mathbb R} \times S^* \mathbb{T}^2$ and $T^* \mathbb{T}^2$ that correspond to $\mathbb{L}(\mathbf{ v})$ rather than $\mathbb{L}$. Note that $\mathbf{ F}(\mathbf{v})$ still has a continuous compactification $\bar{\mathbf{ F}}(\mathbf{v}) \colon S^2 \to S^2 \times S^2$ which can be deformed arbitrarily close to $\mathbb{L}(\mathbf{ v})$ to obtain a smooth symplectic sphere $F=F(\mathbf{ v}) \colon S^2 \to S^2 \times S^2$ which is $J$-holomorphic away from a small neighborhood of $\mathbb{L}(\mathbf{ v})$. Our second deformation process changes the essential $J$-holomorphic curve $u_{\mathbb{L}}$ of $\mathbf{ F}$ into one which is pseudo-holomorphic with respect to an almost-complex structure that is stretched along additional nearby Lagrangian tori. \begin{lemma}\label{away} For $b \neq 0$, set $\mathbf{V} = \{ (0,0), (0,b)\}$. Let $J_s$, for $s \in [0,1]$, be a smooth family of almost complex structures in $\mathcal{J}_{\mathcal{U}(\mathbb{L})}$ that connects $J$ to $J_{\mathbf{V} }$ in a manner that manifests the stretching of $J$ along $\mathbb{L}((0,b))$. The essential curve $u_{\mathbb{L}}$ of $\mathbf{F}$ belongs to a smooth family of $J_s$-holomorphic planes $u_{\mathbb{L}}(s)$ for $s \in [0,1]$. \end{lemma} \begin{proof} By Lemma \ref{area1}, the initial curve $u_{\mathbb{L}}$ has area equal to $1$. Since $\mathbb{L}$ is monotone, no degenerations are possible until $s=1$. In other words, the family of deformed curves $u_{\mathbb{L}}(s)$ exists for all $s \in [0,1)$ and it suffices to show that it extends to $s=1$. Arguing by contradiction, assume that there is a sequence $s_j \to 1$ such that the curves $u_{\mathbb{L}}(s_j)$ converge to a nontrivial $J_{\mathbf{V} }$-holomorphic building $\mathbf{ H}$ which includes curves with punctures asymptotic to $\mathbb{L}(\mathbf{v})$ with $\mathbf{v}=(0,b)$. We will show that this implies that, unlike $u_{\mathbb{L}}$, none of the curves of $\mathbf{H}$ intersect $T_0$, a contradiction. \medskip \noindent{Claim 1.} Let $v$ be a $J_{\mathbf{V} }$-holomorphic curve of $\mathbf{ H}$. Any puncture of $v$ asymptotic to $\mathbb{L}(\mathbf{v})$ must cover a closed geodesic in a class $(k,l) \in H_1(\mathbb{L}(\mathbf{v});{\mathbb Z})$ with $k \leq0$. \medskip \begin{proof} Since the closure of $p \circ u_{\mathbb{L}}$ is $A_0$, $u_{\mathbb{L}}$ is disjoint from the leaves of $\mathcal{F}$ which intersect $\mathcal{U}(\mathbb{L})$ in the region $\{P_1 >0\}$. The same is true of the curves $u_{\mathbb{L}}(s)$ for all $s<1$. Hence, $v$ must also be disjoint from these leaves. The curve $v$ can be extended smoothly to the oriented blow-up of the relevant puncture, such that the resulting map $\bar{v}$ acts on the corresponding boundary circle as $$\theta \mapsto (0,b, Q_1 + k \theta, Q_2 + l \theta)$$ for some $Q_1, Q_2 \in S^1$. The tangent space to the image of $\bar{v}$ at a boundary point on the circle is spanned by $\{ k \frac{\partial}{\partial Q_1} + l \frac{\partial}{\partial Q_2}, k \frac{\partial}{\partial P_1} + l \frac{\partial}{\partial P_2} \}$. If $k$ were positive, this would contradict the fact that $v$ is disjoint from the leaves through $\{P_1 >0\}$ since $\mathbf{v}=(0,b)$.\end{proof} \medskip \noindent{Claim 2.} Let $v$ be a $J_{\mathbf{V} }$-holomorphic curve with a puncture that is asymptotic to $\mathbb{L}(\mathbf{v})$ along a geodesic in a class which is a multiple of the foliation class, i.e. of the form $(0,l) \in H_1^{\psi(\mathbf{v})}(\mathbb{L}(\mathbf{v});{\mathbb Z})$. Then $v$ must cover a plane or cylinder of a twice broken leaf of the foliation $\mathcal{F}(\mathbf{V} )$. \medskip \begin{proof} This follows from the asymptotic properties of holomorphic curves and the fact that $v$ lies in $\{P_1 \le 0\}$, as in Lemma 6.2 of \cite{hl}. \end{proof} We can now complete the proof of Lemma \ref{away}. Let $\mathbf{ H}_{\mathrm{ top}}$ denote the collection of top level curves of $\mathbf{ H}$, let $\mathbf{H}_{1,1}$ be the subbuilding consisting of the middle and bottom level curves of $\mathbf{ H}$ that map to the copies of ${\mathbb R} \times S^*\mathbb{T}^2$ and $T^*\mathbb{T}^2$ corresponding to $L_{1,1}$, and let $\mathbf{H}_{\mathbf{v}}$ be the subbuilding consisting of the middle and bottom level curves of $\mathbf{ H}$ that map to the copies of ${\mathbb R} \times S^*\mathbb{T}^2$ and $T^*\mathbb{T}^2$ corresponding to $\mathbb{L}(\mathbf{v})$. Now consider the classes $(k_1,l_1), \dots,(k_m,l_m) \in H_1(\mathbb{L}(\mathbf{v});{\mathbb Z})$ of the geodesics determined by all of the punctures of top level curves of $\mathbf{ H}$ that are asymptotic to $\mathbb{L}(\mathbf{v})$. These constitute the boundary of the cycle in $\mathbb{L}(\mathbf{v})$ that is obtained by gluing together the compactifications of the curves of $\mathbf{H}_{\mathbf{v}}$. Hence, the sum of the classes $(k_1,l_1), \dots,(k_m,l_m)$ must be $(0,0)$ and, by Claim 1, each $k_i$ must be zero. It then follows from Claim 2, that any curve of $\mathbf{ H}$ with an end on $\mathbb{L}(\mathbf{v})$ must cover a plane or cylinder of a broken leaf of $\mathcal{F}(\mathbf{v})$. Now partition the curves of $\mathbf{ H}_{\mathrm{ top}} \cup \mathbf{H}_{\mathbf{v}} =\mathbf{ H} \smallsetminus \mathbf{ H}_{1,1}$ into connected components based on the matching of their ends in the copies of ${\mathbb R} \times S^*\mathbb{T}^2$ and $T^*\mathbb{T}^2$ corresponding to $\mathbb{L}({\mathbf{v}})$. Denote these components by $\mathbf{ H}_1, \dots, \mathbf{ H}_k$. The compactification of each $\mathbf{ H}_j$ is a cycle representing a class in $\pi_2(S^2 \times S^2,L_{1,1})$. By monotonicity, the symplectic area of this cycle is a positive integer. Since the area of $u_{\mathbb{L}}$ is one, we must have $k=1$ and the area of the cycle determined by $\mathbf{ H}_1$ must be one. By our assumption, $\mathbf{ H}_1$ must contain a curve with an end on $\mathbb{L}(\mathbf{v})$. By the discussion above, this implies that all the curves of $\mathbf{ H}_1$ must cover a plane or cylinder of a broken leaf of $\mathcal{F}(\mathbf{V} )$ through $\mathbb{L}(\mathbf{v})$. None of these leaves intersect $T_0$ , and neither do the curves of $\mathbf{ H}_{1,1}$. Hence, no curve of $\mathbf{ H} = H_1 \cup H_{1,1}$ intersects $T_0$, which is the desired contradiction. \end{proof} The $J_{\mathbf{V} }$-holomorphic curve $u_{\mathbb{L}}(1)$ is disjoint from the region $\{P_1>0\}$. By positivity of intersection, since it does not cover a leaf of the foliation, $u_{\mathbb{L}}(1)$ is disjoint from the hypersurface $\{P_1=0\}$. The closure of $p\circ u_{\mathbb{L}}(1)$ is equal to $A_0$ and $u_{\mathbb{L}}(1)$ intersects the leaves of $\mathcal{F}(\mathbf{V} )$, that pass through the annuli $\{P_1 =c<0, Q_1=\theta\}$, exactly once. Arguing as above, one sees that the statement of Lemma \ref{away} also holds for $$\mathbf{V} =((0,0),(0,b_1),(0,b_2)),$$ for any nonzero $b_1, b_2$ in $(-\epsilon, \epsilon)$. Translating these Lagrangian tori slightly in the $P_1$-direction, we then get the following, more general, version of our second deformation process. \begin{corollary} \label{up} Let $u_{\mathbb{L}}$ be the essential curve of $\mathbf{ F}$ which is mapped by $p$ onto $A_0$. Choose nonzero constants $b_1, b_2$ in $(-\epsilon, \epsilon)$. If $\delta>0$ is sufficiently small, then for any $a_1, a_2$ in $(-\delta, \delta)$ and $$\mathbf{V} =\{(0,0),(a_1,b_1),(a_2,b_2)\} $$ there is a $J_{\mathbf{V} }$-holomorphic curve $$u_{\mathbb{L}}^{\mathbf{V} }\colon {\mathbb C} \to S^2 \times S^2 \smallsetminus (L(\mathbf{V} ) \cup L_{1,1})$$ in the class of $u_{\mathbb{L}}$ such that $u_{\mathbb{L}}^{\mathbf{V} }$ is disjoint from the region $\{P_1>0\}$, the closure of the image of $p\circ u_{\mathbb{L}}^{\mathbf{V} }$ is $ A_0$, and $u_{\mathbb{L}}^{\mathbf{V} } $ intersects the leaves of $\mathcal{F}(\mathbf{V} )$, that pass through the annuli $\{P_1 =c<0, Q_1=\theta\}$, exactly once. \end{corollary} \subsubsection{Intersections near $\mathbb{L}$} We now use the deformation tools of Lemma \ref{fukaya} and Corollary \ref{up} to resolve some intersection patterns. Let $\mathbf{ F}$ be a building of Type 3 as in Proposition \ref{existence2} and consider translation data $$\mathbf{V} =\{\mathbf{ 0},\mathbf{ v}_1, \mathbf{ v}_2\} = \{ (0,0),(a_1,b_1), (a_2,b_2)\}.$$ In what follows we will always assume that $\mathbf{ v}_1$ and $\mathbf{ v}_2$ are distinct and nontrivial. The collection of top level curves of $\mathbf{ F}$ is of the form \begin{equation*} \label{ } \{u_{\mathbb{L}},\underline{u},u_{L_{1,1}}, u_1, \dots, u_{d-1},\frak{u}_1, \dots, \frak{u}_d\}, \end{equation*} where the $u_1, \dots, u_{\alpha_0}$ belong to $\mathcal{T}_0$, $u_{\alpha_0+1}, \dots, u_{d-1}$ belong to $\mathcal{T}_{\infty}$, and the $\frak{u}_j$ belong to $\frak{s}_0 \cup \frak{s}_{\infty}$. If $\|\mathbf{ v}_1\|$ is sufficiently small then, as described in Remark \ref{defF}, the deformed building $\mathbf{ F}(\mathbf{v}_1)$ is well-defined and its top level curves are \begin{equation*} \label{ } \{ u_{\mathbb{L}}(\mathbf{v}_1), \underline{u}(\mathbf{v}_1),u_{L_{1,1}},u_1(\mathbf{v}_1), \dots, u_{d-1}(\mathbf{v}_1),\frak{u}_1, \dots, \frak{u}_d\}. \end{equation*} Choosing $a_1$ to be smaller still, if necessary, we may assume that Corollary \ref{up} holds for $\mathbf{V}$ for $|a_2|$ sufficiently small. This yields a $J_{\mathbf{V} }$-holomorphic curve $u_{\mathbb{L}}^{\mathbf{V} }$ which is disjoint from the region $\{P_1>0\}$ and intersects the leaves of $\mathcal{F}(\mathbf{V} )$, that pass through the planes $\{P_1 =c<0, Q_1=\theta\}$, exactly once. The intersection number between each top level curve of $\mathbf{ F}(\mathbf{v}_1)$ and the curve $u_{\mathbb{L}}^{\mathbf{V} }$ is well defined since, as $\mathbf{ v}_1 \neq \mathbf{ 0}$, they are asymptotic to disjoint Lagrangian tori. Moreover all these intersections are positive. We denote the total of these intersection numbers by $\mathbf{ F}(\mathbf{v}_1)\bullet u_{\mathbb{L}}^{\mathbf{V} }$. Similarly, the intersection number of each top level curve of $\mathbf{ F}(\mathbf{v}_1)$ with any of the planes in either $\mathcal{T}_0$ or $\mathcal{T}_{\infty}$ is well-defined and all such intersections are positive. Since this number is the same for any plane in the family, we denote these numbers by $\mathbf{ F}(\mathbf{v}_1)\bullet\mathcal{T}_0$ and $\mathbf{ F}(\mathbf{v}_1)\bullet\mathcal{T}_\infty$, respectively. Let $\bar{\mathbf{ F}}(\mathbf{v}_1) \colon S^2 \to S^2 \times S^2$ be the compactification of $\mathbf{ F}(\mathbf{v}_1)$, let $\mathbb{E} \colon (D^2,S^1) \to (S^2 \times S^2, \mathbb{L})$ be the compactification of the curve $u_{\mathbb{L}}^{\mathbf{ V}}$, and let $\bar{\mathcal{T}_0}$ and $\bar{\mathcal{T}}_{\infty}$ be the solid tori obtained by compactifying the planes of $\mathcal{T}_0$ and $\mathcal{T}_{\infty}$. Deforming $\bar{\mathbf{ F}}(\mathbf{v}_1)$ arbitrarily close to $\mathbb{L}(\mathbf v_1)$, we obtain a smooth map $F=F(\mathbf{ v}_1) \colon S^2 \to S^2 \times S^2$ such that \begin{equation} \label{sameEs} F \bullet \mathbb{E} = \bar{\mathbf{ F}}(\mathbf{v}_1) \bullet \mathbb{E} = \mathbf{ F}(\mathbf{v}_1)\bullet u_{\mathbb{L}}^{\mathbf{V} } \end{equation} and \begin{equation} \label{sameT} F \bullet \bar{\mathcal{T}_*} = \bar{\mathbf{ F}}(\mathbf{v}_1) \bullet \bar{\mathcal{T}_*} = \mathbf{ F}(\mathbf{v}_1)\bullet {\mathcal{T}_*},\quad \text{ for $*=0,\infty$}. \end{equation} Moreover, the corresponding intersection points are identical. \begin{lemma}\label{FE+-} Consider $\mathbf{V} = \{\mathbf{ 0},\mathbf{ v}_1,\mathbf{ v}_2\}=\{(0,0),(a_1,b_1),(a_2,b_2)\}$ such that $\mathbf{ v}_1$ and $\mathbf{ v}_2$ are distinct, $a_1$ is negative, and $b_1$ and $b_2$ are nonzero. Suppose that $|a_1|$ is sufficiently small with respect to $|b_1|$.\\ \noindent If $b_1>0$, then $ F \bullet \bar{\mathcal{T}}_0 =0, $ $ F \bullet \bar{\mathcal{T}}_{\infty} =1, $ and $ F \bullet \mathbb{E} =\alpha_0. $ \\ \noindent If $b_1<0$, then $ F \bullet \bar{\mathcal{T}}_0 =1, $ $F \bullet \bar{\mathcal{T}}_{\infty} =0,$ and $ F \bullet \mathbb{E} =d-1-\alpha_0. $ \end{lemma} \begin{proof} Here we give the proof of the case when $b_1$ is positive. The proof for $b_1<0$ is identical and is left to the reader. The map $F$ represents the class $(1,d)$. For each disk in $\bar{\mathcal{T}}_0$ there is a companion disc in $\bar{\mathcal{T}}_{\infty}$ such that the pair can be glued together, along $\mathbb{L}$, to form a sphere in the class $(0,1)$. Hence, $$F \bullet \bar{\mathcal{T}}_0 + F \bullet \bar{\mathcal{T}}_{\infty} =1.$$ Since all intersections are positive, in order to prove that $ F \bullet \bar{\mathcal{T}}_0 =0, $ and $F \bullet \bar{\mathcal{T}}_{\infty} =1,$ it suffices to prove that $F \bullet \bar{\mathcal{T}}_{\infty} \geq 1.$ In particular, in view of \ref{sameT}, it suffices to show that $\underline{u}(\mathbf{ v}_1) \bullet \mathcal{T}_{\infty} \geq1.$ The planes of $\mathcal{T}_{\infty}$ intersect $\mathcal{U}(\mathbb{L})$ in annuli of the form $\{P_1 = 0, Q_1 = \theta, P_2 > 0\}$. The curve $\underline{u}(\mathbf{v}_1)$ intersects $\mathcal{U}(\mathbb{L})$ in the region $\{P_1 >a_1\}$. Since it is essential, $\underline{u}(\mathbf{v}_1)$ must intersect every cylinder of the form $\{P_1=a, Q_1 = \theta \}$ with $a > a_1$. The curve $\underline{u}(\mathbf{v}_1)$ also has an end asymptotic to a circle in the torus $\mathbb{L}(\mathbf{v}_1)=\{P_1 = a_1, P_2 =b_1\}$. Since $b_1$ is positive, it follows that for all $a$ sufficiently close to $a_1$, $\underline{u}(\mathbf{v}_1)$ will intersect the annuli $\{P_1 = a, Q_1 = \theta, P_2 > 0\}$. Hence, if $|a_1|$ is sufficiently small with respect to $b_1$, then $\underline{u}(\mathbf{v}_1)$ must intersect the planes of $\mathcal{T}_{\infty}$ at least once, as desired. It remains to prove that $F \bullet \mathbb{E} =\alpha_0$ when $|a_1|$ is sufficiently small with respect to $|b_1|$. By \eqref{sameEs}, and the fact that the top level curves of $\mathbf{ F}(\mathbf{ v}_1)$ are \begin{equation*} \label{ } \{ u_{\mathbb{L}}(\mathbf{v}_1), \underline{u}(\mathbf{v}_1),u_{L_{1,1}},u_1(\mathbf{v}_1), \dots, u_{d-1}(\mathbf{v}_1),\frak{u}_1, \dots, \frak{u}_d\}, \end{equation*} is suffices to prove that for $|a_1|$ sufficiently small with respect to $|b_1|$, we have \begin{equation} \label{ones} u_{i}(\mathbf{v}_1) \bullet u_{\mathbb{L}}^{\mathbf{V} }= 1 \quad \text{ for } 1\leq i \leq \alpha_0, \end{equation} and $u_{\mathbb{L}}^{\mathbf{V} }$ is disjoint from all the other top level curves of $\mathbf{ F}(\mathbf{ v}_1)$. Recall that $u_{\mathbb{L}}^{\mathbf{V} }$ is an essential curve, and that the image of $p \circ u_{\mathbb{L}}^{\mathbf{V} }$ is $A_0$. So if $w$ is another curve in $S^2 \times S^2$ and $p \circ w$ is disjoint from $A_0$, then $u_{\mathbb{L}}^{\mathbf{V} }$ is disjoint from $w$. This observation implies that $u_{\mathbb{L}}^{\mathbf{V} }$ is disjoint from $ u_{L_{1,1}}$ and the $\frak{u}_j$ for $j =1,\dots, d$ since theses curves all project into $A_{\infty}$. Another consequence of $u_{\mathbb{L}}^{\mathbf{V} }$ being essential with respect to $\mathcal{F}$, is that it intersects any fiber of $\mathcal{F}$ either once or not at all. The curve $u_{\mathbb{L}}^{\mathbf{V} }$ intersects $\mathcal{U}(\mathbb{L})$ in the region $\{P_1<0\}$ and has an end asymptotic to a circle in $\mathbb{L}=\{P_1 = P_2 =0\}$. Since $b_1 > 0$, this implies that for all $a_1<0$ such that $|a_1|$ is sufficiently small with respect to $b_1$, $u_{\mathbb{L}}^{\mathbf{V} }$ must intersect the annuli of the form $\{P_1 = a_1, Q_1 = \theta, P_2 < b_1\}$ exactly once. Now the planes $u_{i}(\mathbf{v}_1)$ all belong to broken fibers of $\mathcal{F}$ that intersect $\mathcal{U}(\mathbb{L})$. For $1 \le i \le \alpha_0$, the curves $u_{i}(\mathbf{v}_1)$ intersect $\mathcal{U}(\mathbb{L})$ in annuli of the form $\{P_1 = a_1, Q_1 = \theta, P_2 < b_1\}$. For $i > \alpha_0$, the $u_{i}(\mathbf{v}_1)$ intersect $\mathcal{U}(\mathbb{L})$ in annuli of the form $\{P_1 = a_1, Q_1 = \theta, P_2 > b_1\}$. Hence, for $1 \le i \le \alpha_0$, $u_{\mathbb{L}}^{\mathbf{V} }$ intersects the fiber of $\mathcal{F}$ containing $u_{i}(\mathbf{v}_1)$ at a point on $u_{i}(\mathbf{v}_1)$. This yields equation \eqref{ones}. On the other hand, for $i>\alpha_0$, $u_{\mathbb{L}}^{\mathbf{V} }$ intersects the fiber of $\mathcal{F}$ containing $u_{i}(\mathbf{v}_1)$ at a point in the complement of $u_{i}(\mathbf{v}_1)$. Hence, $u_{\mathbb{L}}^{\mathbf{V} }$ is disjoint from these curves. Next we show that, when $|a_1|$ is sufficiently small with respect to $|b_1|$, $u_{\mathbb{L}}^{\mathbf{V} }$ is disjoint from $\underline{u}(\mathbf{ v}_1)$. Considering projections, it is clear that the part of $\underline{u}(\mathbf{ v}_1)$ in the complement of $\mathcal{U}(\mathbb{L})$ is disjoint from $u_{\mathbb{L}}^{\mathbf{V} }$ since its projection is contained in the interior of $B \cup A_{\infty}$. Suppose that $a_1=0$. Then $\underline{u}((0,b_1)) \cap \mathcal{U}(\mathbb{L})$ is contained in $\{P_1>0\}$ and is asymptotic to $\mathbb{L}(0,b_1)$. This is disjoint from $u_{\mathbb{L}}^{\mathbf{V} }\cap \mathcal{U}(\mathbb{L})$ which is contained in $\{P_1<0\}$ and is asymptotic to $\mathbb{L} =\mathbb{L}(0,0)$. By continuity, $\underline{u}((a_1,b_1)) \cap \mathcal{U}(\mathbb{L})$ is then disjoint from $u_{\mathbb{L}}^{\mathbf{V} }\cap \mathcal{U}(\mathbb{L})$ for all $a_1<0$ with $|a_1|$ sufficiently small with respect to $|b_1|$. Lastly, we must prove that \begin{equation*} \label{ } u_{\mathbb{L}}(\mathbf{ v}_1) \bullet u_{\mathbb{L}}^{\mathbf{V} }=0. \end{equation*} when $|a_1|$ is sufficiently small with respect to $|b_1|$. Since the compactifications of $u_{\mathbb{L}}^{\mathbf{V} }$ and $u_{\mathbb{L}}$ are homotopic in the space of smooth maps $(D^2, S^1) \to (S^2 \times S^2, \mathbb{L})$, it suffices to show that $$ u_{\mathbb{L}}(\mathbf{ v}_1) \bullet u_{\mathbb{L}}=0.$$ Let $\bar{u}_{\mathbb{L}}(\mathbf{ v}_1)$ and $\bar{u}_{\mathbb{L}}$ be compactifications of $u_{\mathbb{L}}(\mathbf{ v}_1)$ and $u_{\mathbb{L}}$. We claim that $u_{\mathbb{L}}(\mathbf{ v}_1) \bullet u_{\mathbb{L}}=0$ is equivalent to the fact that the Maslov index of $\bar{u}_{\mathbb{L}}$ is equal to $2$. To see this we recall that \begin{equation} \label{relmas} \mu(\bar{u}_{\mathbb{L}}) =2 c_1(\bar{u}_{\mathbb{L}}) \end{equation} where $c_1(\bar{u}_{\mathbb{L}})$ is the relative Chern number of $\bar{u}_{\mathbb{L}}$ which is equal to the number of zeros of a generic section $\xi$ of $\bar{u}_{\mathbb{L}}^*(\Lambda^2 (T(S^2 \times S^2)))$ such that $\xi|_{S^1}$ is nonvanishing and is tangent to $\Lambda^2 (T \mathbb{L})$. Let $\nu(\bar{u}_{\mathbb{L}})$ be the normal bundle to the embedding $\bar{u}_{\mathbb{L}}$ and fix an identification of $\bar{u}_{\mathbb{L}}^*(T(S^2 \times S^2))$ with the Whitney sum $\nu(\bar{u}_{\mathbb{L}}) \oplus T(D^2)$. For polar coordinates $(r, \theta)$ on $D^2$ consider the section $r\frac{\partial}{\partial \theta}$ of $\bar{u}_{\mathbb{L}}^*(T(S^2 \times S^2))$. The restriction $r\frac{\partial}{\partial \theta}|_{S^1}$ is nonvanishing and tangent to $T\mathbb{L}$. Replacing $\mathbf{ v}_1$ by $t\mathbf{ v}_1$ for some small $t>0$, if necessary, we may assume that $\bar{u}_{\mathbb{L}}(\mathbf{ v}_1)$ is close enough $\bar{u}_{\mathbb{L}}$, in the $C^1$-topology, to be identified with a section, $\sigma_{\mathbb{L}}(\mathbf{ v}_1)$, of $\nu(\bar{u}_{\mathbb{L}}) \subset \bar{u}_{\mathbb{L}}^*(T(S^2 \times S^2))$. The restriction $\sigma_{\mathbb{L}}(\mathbf{ v}_1)|_{S^1}$ is roughly parallel to the vector field $\frac{\partial}{\partial P_2}$. By rotating in the normal bundle this section is homotopic through nonvanishing sections to a section of $T\mathbb{L}$ along $\partial D^2$ which is orthogonal to $\frac{\partial}{\partial \theta}$. Set $\xi = r\frac{\partial}{\partial \theta} \wedge \sigma_{\mathbb{L}}(\mathbf{ v}_1)$. It follows from the discussion above that $\xi|_{S^1}$ is nonvanishing and is tangent to $\Lambda^2 (T \mathbb{L})$. Moreover, the zeroes of $\xi$ corresponds to the union of the zeros of $r\frac{\partial}{\partial \theta}$ and $\sigma_{\mathbb{L}}(\mathbf{ v}_1)$. Since $\bar{u}_{\mathbb{L}}$ is an embedded the zeros of $\sigma_{\mathbb{L}}(\mathbf{ v}_1)$ exactly correspond to the intersections $u_{\mathbb{L}}(\mathbf{ v}_1) \bullet u_{\mathbb{L}}$. By \eqref{relmas}, we then have \begin{equation*} \label{relmas2} \mu(\bar{u}_{\mathbb{L}}) =2 (1 + u_{\mathbb{L}}(\mathbf{ v}_1) \bullet u_{\mathbb{L}}). \end{equation*} As $\mu(\bar{u}_{\mathbb{L}})=2$ (as it has area $1$ by Lemma \ref{area1}, and $\mathbb{L}$ is monotone) we have $u_{\mathbb{L}}(\mathbf{ v}_1) \bullet u_{\mathbb{L}}=0$, and are done. \end{proof} We can deform a building $\mathbf{ G}$ as in Proposition \ref{existence3} within this same framework. As $\mathbf{ G}$ is of Type 3, it's collection of top level curves looks like \begin{equation*} \label{ } \{v_{\mathbb{L}},\underline{v},v_{L_{1,1}}, v_1, \dots, v_{d},\frak{v}_1, \dots, \frak{v}_{d-1}\}, \end{equation*} where $v_1, \dots, v_{\gamma_0}$ belong to $\mathcal{T}_0$, $v_{\gamma_0+1}, \dots, v_{d}$ belong to $\mathcal{T}_{\infty}$ and $\frak{v}_j$ belong to $\frak{s}_0 \cup \frak{s}_{\infty}$. Assuming that $\mathbf{ v}_2=(a_2,b_2)$ is sufficiently small we can deform $\mathbf{ G}$ to obtain a new building $\mathbf{ G}(\mathbf{v}_2)$ with top level curves \begin{equation*} \label{ } \{ v_{\mathbb{L}}(\mathbf{v}_2), \underline{v}(\mathbf{v}_2),u_{L_{1,1}},v_1(\mathbf{v}_2), \dots, v_{d}(\mathbf{v}_2),\frak{v}_1, \dots, \frak{v}_{d-1}\}. \end{equation*} Let $\bar{\mathbf{ G}}(\mathbf{v}_2) \colon S^2 \to S^2 \times S^2$ be the compactification of $\mathbf{ G}(\mathbf{v}_2)$. Again we can deform $\bar{\mathbf{G}}(\mathbf{v}_2)$, arbitrarily close to $\mathbb{L}(\mathbf v_2)$, to get a smooth map $G=G(\mathbf{ v}_2) \colon S^2 \to S^2 \times S^2$ such that \begin{equation*} \label{sameE} G \bullet \mathbb{E} = \bar{\mathbf{ G}}(\mathbf{v}_2) \bullet \mathbb{E} = \mathbf{ G}(\mathbf{v}_2)\bullet u_{\mathbb{L}}^{\mathbf{V} } \end{equation*} and \begin{equation*} \label{sameT} G \bullet \bar{\mathcal{T}_*} = \bar{\mathbf{ G}}(\mathbf{v}_2) \bullet \bar{\mathcal{T}_*} = \mathbf{ G}(\mathbf{v}_2)\bullet {\mathcal{T}_*},\quad \text{ for $*=0,\infty$}. \end{equation*} Arguing as in the proof of Lemma \ref{FE+-} we get the following. \begin{lemma}\label{GE+-} Consider $\mathbf{V} = \{\mathbf{ 0},\mathbf{ v}_1,\mathbf{ v}_2\}=\{(0,0),(a_1,b_1),(a_2,b_2)\}$ such that $a_2$ is negative, and $b_1$ and $b_2$ are nonzero. Suppose that $|a_2|$ is sufficiently small with respect to $|b_2|$.\\ \noindent If $b_2>0$, then $$ G \bullet \mathbb{E} = \gamma_0 + v_{\mathbb{L}}(\mathbf{ v}_2) \bullet u_{\mathbb{L}}^{\mathbf{V} }, $$ $ G \bullet \bar{\mathcal{T}}_0 =0, $ and $G \bullet \bar{\mathcal{T}}_{\infty} =1.$\\ \noindent If $b_2<0$, then $$ G \bullet \mathbb{E} = d-\gamma_0 +v_{\mathbb{L}}(\mathbf{ v}_2) \bullet u_{\mathbb{L}}^{\mathbf{V} }, $$ $ G \bullet \bar{\mathcal{T}}_0 =1, $ and $G \bullet \bar{\mathcal{T}}_{\infty} =0.$ \end{lemma} The term $v_{\mathbb{L}}(\mathbf{ v}_2) \bullet u_{\mathbb{L}}^{\mathbf{V} }$ is not necessarily equal to zero. Instead we have the following identity. \begin{lemma}\label{noleftg} For $\mathbf{V} = \{\mathbf{ 0},\mathbf{ v}_1,\mathbf{ v}_2\}=\{(0,0),(a_1,b_1),(a_2,b_2)\} $ where $b_1$ and $b_2$ have opposite sign, and $a_1$ and $a_2$ sufficiently small relative to $b_1$ and $b_2$ we have \begin{equation*} \label{ } v_{\mathbb{L}}(\mathbf{ v}_2) \bullet u_{\mathbb{L}}^{\mathbf{V} }=v_{\mathbb{L}}(\mathbf{ v}_2) \bullet u_{\mathbb{L}}(\mathbf{ v}_1). \end{equation*} \end{lemma} \begin{proof} First we consider the case when $a_1 = a_2 =0$. The image of the map $v_{\mathbb{L}}(\mathbf{ v}_2)$ projects to $A_0$ and its boundary lies in $\mathbb{L}(\mathbf{ v}_2)$. Hence, using our assumption on sign, the family of Lagrangians $\mathbb{L}(t \mathbf{ v}_1))$ for $0 \le t \le 1$ are disjoint from the compactification of $v_{\mathbb{L}}(\mathbf{ v}_2)$. It then follows from the proof of Lemma \ref{fukaya}, that the compactification of $u_{\mathbb{L}}$ is connected to that of $u_{\mathbb{L}}(\mathbf{ v}_1)$ by a path of smooth maps $u_t \colon (D^2, S^1) \to (S^2 \times S^2, \mathbb{L}(t \mathbf{ v}_1))$. Therefore we have $$v_{\mathbb{L}}(\mathbf{ v}_2) \bullet u_{\mathbb{L}}=v_{\mathbb{L}}(\mathbf{ v}_2) \bullet u_{\mathbb{L}}(\mathbf{ v}_1),$$ as required. For the general case we use the fact that the maps vary continuously with the parameters and so the intersection numbers remain unchanged for $a_1$ and $a_2$ sufficiently small. \end{proof} Since $\mathbf{ v}_1$ and $\mathbf{ v}_2$ are distinct, the intersection numbers of some of the top level curves of $\mathbf{ F}(\mathbf{v}_1)$ and $\mathbf{ G}(\mathbf{v}_2)$ are well-defined. The following results concerning these intersections, will be useful. \begin{lemma}\label{} For $\mathbf{ v}_1 =(a_1,b_1)$ and $\mathbf{ v}_2 =(a_2,b_2)$, suppose that $a_1< a_2<0$, $|a_1|$ is sufficiently small with repsect to $|b_1|$, and $|a_2|$ is sufficiently small with repsect to $|b_2|$.\\ \noindent If $b_1>b_2$, then \begin{equation*} \label{ } u_i(\mathbf{ v}_1) \bullet v_{\mathbb{L}}(\mathbf{ v}_2) =1 \text{ for } i=1, \dots, \alpha_0 \end{equation*} and \begin{equation*} \label{ } v_i(\mathbf{ v}_2) \bullet \underline{u}(\mathbf{ v}_1)=1 \text{ for } i=\gamma_0+1, \dots, d. \end{equation*} If $b_1<b_2$, then \begin{equation*} \label{ } u_i(\mathbf{ v}_1) \bullet v_{\mathbb{L}}(\mathbf{ v}_2) =1 \text{ for } i=\alpha_0+1, \dots, d-1 \end{equation*} and \begin{equation*} \label{ } v_i(\mathbf{ v}_2) \bullet \underline{u}(\mathbf{ v}_1)=1 \text{ for } i=1, \dots, \gamma_0. \end{equation*} Moreover, all the intersection points here project to $A_0$. \end{lemma} \begin{proof} Since the curves $\underline{u}(\mathbf{ v}_1)$ and $v_{\mathbb{L}}(\mathbf{ v}_2)$ are essential with respect to $\mathcal{F}$, they intersect a leaf of the foliation either once or not at all. Hence it suffices to detect a single intersection of the relevant pairs of curves listed. We detect an intersection for the first type of pair above and leave the other cases to the reader. For $1 \le i \le \alpha_0$ the planes $u_i(\mathbf{ v}_1)$ intersect $\mathcal{U}(\mathbb{L})$ in annuli $\{P_1 = a_1, Q_1 = \theta, P_2 < b_1\}$. As $v_{\mathbb{L}}((0,b_2))$ is asymptotic to $\mathbb{L}((0,b_2)) = \{P_1 = 0, P_2 = b_2\}$ it intersects $u_i(\mathbf{ v}_1)$ provided $a_1$ is sufficiently small (since the boundary of $v_{\mathbb{L}}((0,b_2))$ intersects all annuli $\{P_1 = 0, Q_1 = \theta, P_2 < b_1\}$). For $a_2$ sufficiently small, the plane $v_{\mathbb{L}}(\mathbf{ v}_2)$ is a deformation of $v_{\mathbb{L}}((0,b_2))$ and so the intersection persists. As $v_{\mathbb{L}}(\mathbf{ v}_2)$ intersects fibers at most once, the intersection number is equal to $1$. Since $a_1<0$, the intersection point projects to $A_0$. \end{proof} \begin{corollary}\label{FcapG} For $\mathbf{ v}_1 =(a_1,b_1)$ and $\mathbf{ v}_2 =(a_2,b_2)$, suppose that $a_1< a_2<0$, $|a_1|$ is sufficiently small with repsect to $|b_1|$, and $|a_2|$ is sufficiently small with repsect to $|b_2|$.\\ \noindent If $b_1>b_2$, then $F \cap G$ contains at least $\alpha_0 + d- \gamma_0$ points in $\mathcal{U}(\mathbb{L})$ that project to $A_0$.\\ If $b_1<b_2$, then $F \cap G$ contains at least $d-1-\alpha_0 + \gamma_0$ points in $\mathcal{U}(\mathbb{L})$ that project to $A_0$. \end{corollary} \begin{remark} It follows from Lemma \ref{noleftg} that any {\it excess} intersection points between $F$ and $G$ in $\mathcal{U}(\mathbb{L})$ are in bijection with intersection points between $G$ and $\mathbb{E}$, at least if the $b_i$ have opposite sign and the $a_i$ are sufficiently small. \end{remark} \subsubsection{Adding deformations near $L_{1,1}$.} To completely resolve the intersections of $F$ and $G$ we must also apply deformations in the Weinstein neighborhood $$ \mathcal{U}(L_{1,1}) = \{|p_1|<\epsilon,\, |p_2|<\epsilon\}. $$ Here we consider nearby Lagrangian tori of the form $$L_{1,1}(\mathbf{w}) := \{p_1 =c, p_2 =d\},$$ for $\mathbf{w} =(c,d) \in (-\epsilon, \epsilon) \times (-\epsilon, \epsilon).$ The space of almost complex structures $\mathcal{J}_{\mathcal{U}(L_{1,1})}$ is defined following the definition of $\mathcal{J}_{\mathcal{U}(\mathbb{L})}$. Given collections $$ \mathbf{V} =\{\mathbf{v}_1, \dots, \mathbf{v}_k \} =\{(a_1,b_1), \dots, (a_k,b_k)\}, $$ and $$ \mathbf{W} =\{\mathbf{w}_1, \dots, \mathbf{w}_l \} =\{(c_1,d_1), \dots, (c_l,d_l)\} $$ set $\mathbf{X} =\{\mathbf{V} ,\mathbf{W} \}$. Denote the corresponding almost-complex structure in $\mathcal{J}_{\mathcal{U}(\mathbb{L})} \cap \mathcal{J}_{\mathcal{U}(L_{1,1})}$ by $J_{\mathbf{X}}$. Lemma \ref{fukaya} generalizes to this setting as follows. \begin{lemma}\label{fukaya2} Let $u$ be a regular $J$-holomorphic curve with $k\geq0$ ends on $\mathbb{L}$ and $l\geq 0$ ends on $L_{1,1}$. For all $\mathbf{x}=\{\mathbf{v}, \mathbf{w}\}=\{(a,b), (c,d)\}$ with $\|\mathbf{x}\|$ sufficiently small, there is a $J_{\mathbf{x}}$-holomorphic curve $u(\mathbf{x})$ that represents the class in $\pi_2(S^2 \times S^2, \mathbb{L}(\mathbf{v}) \cup L_{1,1}(\mathbf{w}) )$ that corresponds to the class $[u] \in \pi_2(S^2 \times S^2, \mathbb{L}\cup L_{1,1} )$ under the obvious identification. The curve $u(\mathbf{x})$ has $k$ ends on $\mathbb{L}(\mathbf{v})$ and these represent the identical classes in $H_1^{\psi(\mathbf{v})}(\mathbb{L};{\mathbb Z})$ as do those of $u$ in $H_1^{\psi}(\mathbb{L};{\mathbb Z})$. The curve also has $l$ ends on $L_{1,1}(\mathbf{w})$ which represent the identical classes in $H_1^{\psi_{1,1}(\mathbf{w})}(L_{1,1};{\mathbb Z})$ as do those of $u$ in $H_1^{\psi_{1,1}}(L_{1,1};{\mathbb Z})$. \end{lemma} Corollary \ref{up} generalizes as follows. \begin{lemma} \label{up2} Let $u_{\mathbb{L}}$ and $u_{L_{1,1}}$ be the essential curves of a building $\mathbf{ F}$ as in Proposition \ref{existence2}. Let $\mathbf{X} =\{\mathbf{V} ,\mathbf{W} \}$ where $$\mathbf{V} =\{(0,0),(a_1,b_1), (a_2,b_2)\}$$ and $$\mathbf{W} =\{(0,0),(c_1,d_1), (c_2,d_2)\}.$$ If $b_1,b_2, d_1, \, d_2$ are in $(-\epsilon, \epsilon)$ and $a_1,a_2, c_1, c_2$ are in $(-\delta, \delta)$, then for all sufficiently small $\delta$ there is a $J_{\mathbf{X} }$-holomorphic curve $$u_{\mathbb{L}}^{\mathbf{\mathbf{X} }}\colon {\mathbb C} \to S^2 \times S^2 \smallsetminus (\mathbb{L}(\mathbf{V} ) \cup L_{1,1}(\mathbf{W} ))$$ in the class of $u_{\mathbb{L}}$ such that $u_{\mathbb{L}}^{\mathbf{\mathbf{X} }}$ is disjoint from the region $\{P_1>0\}$, the closure of the image of $p\circ u_{\mathbb{L}}^{\mathbf{X} }$ is $ A_0$, and $u_{\mathbb{L}}^{\mathbf{X} }$ intersects the leaves of $\mathcal{F}(\mathbf{X} )$, that pass through the planes $\{P_1 =c<0, Q_1=\theta\}$, exactly once. There is also a $J_{\mathbf{X} }$-holomorphic curve $$u_{L_{1,1}}^{\mathbf{X} }\colon {\mathbb C} \to S^2 \times S^2 \smallsetminus (L(\mathbf{V} ) \cup L_{1,1}(\mathbf{W} ))$$ in the class of $u_{L_{1,1}}$ such that $u_{L_{1,1}}^{\mathbf{X} }$ is disjoint from the region $\{p_1<0\}$, the closure of the image of $p\circ u_{L_{1,1}}^{\mathbf{X} }$ is $ A_{\infty}$, and $u_{L_{1,1}}^{\mathbf{X} }$ intersects the leaves of $\mathcal{F}(\mathbf{X} )$, that pass through the planes $\{p_1 =c>0, q_1=\theta\}$, exactly once. \end{lemma} \medskip \subsubsection{Completion of the proof of Proposition \ref{intcount}} Let $\mathbf{ F}$ be a building as in Proposition \ref{existence2} and let $\mathbf{ G}$ be a building as in Proposition \ref{existence3}. Set $$\mathbf{x_1} =\{\mathbf{ v}_1,\mathbf{w}_1\}= \{(a_1,b_1),(c_1,d_1)\},$$ $$\mathbf{x_2} =\{\mathbf{ v}_2,\mathbf{w}_2\}= \{(a_2,b_2),(c_2,d_2)\},$$ $$\mathbf{V} =\{\mathbf{ 0},\mathbf{ v}_1,\mathbf{ v}_2\}= \{(0,0),(a_1,b_1),(a_2,b_2)\},$$ $$\mathbf{W} =\{\mathbf{ 0},\mathbf{ w}_1,\mathbf{ w}_2\}= \{(0,0),(c_1,d_1),(c_2,d_2)\},$$ and $$ \mathbf{ X} =\{\mathbf{ V},\mathbf{ W}\}.$$ We assume that $\|\mathbf{ x}_1\|$ and $\|\mathbf{x}_2\|$ are small enough for Lemma \ref{fukaya2} to yield the deformed buildings $\mathbf{ F}(\mathbf{ x}_1)$ and $\mathbf{ G}(\mathbf{ x}_2)$. We also assume that $|a_1|^2+ |a_2|^2+|c_1|^2 + |c_2|^2$ is small enough with respect to $|b_1|^2+ |b_2|^2+|d_1|^2 + |d_2|^2$ for Lemma \ref{up2} to yield the deformations $u_{\mathbb{L}}^{\mathbf{X} }$ and $u_{L_{1,1}}^{\mathbf{X} }$. Let $\mathbb{E} \colon (D^2,S^1) \to (S^2 \times S^2, \mathbb{L})$ be the compactifiction of $u_{\mathbb{L}}^{\mathbf{X} }$, and $E_{1,1}\colon (D^2,S^1) \to (S^2 \times S^2, L_{1,1})$ be the compactifiction of $u_{L_{1,1}}^{\mathbf{X} }$. Since the homology classes represented by the ends of $u_{\mathbb{L}}^{\mathbf{X} }$ and $u_{L_{1,1}}^{\mathbf{X} }$ are identical to those of the essential curves $u_{\mathbb{L}}$ and $u_{L_{1,1}}$, the maps $\mathbb{E}$ and $E_{1,1}$ satisfy conditions (2) and (3) of Proposition \ref{intcount}. Consider compactifications $\bar{\mathbf{ F}}(\mathbf{ x}_1) \colon S^2 \to S^2 \times S^2$ of $\mathbf{ F}(\mathbf{ x}_1)$, and $\bar{\mathbf{ G}}(\mathbf{ x}_2) \colon S^2 \to S^2 \times S^2$ of $\mathbf{ G}(\mathbf{ x}_2)$. Arguing as before, we can perturb these maps, arbitrarily close to the Lagrangians $\mathbb{L}(\mathbf{ v}_1)$, $L_{1,1}(\mathbf{ w}_1)$, $\mathbb{L}(\mathbf{ v}_2)$, and $L_{1,1}(\mathbf{ w}_2)$, to obtain smooth spheres $F$ and $G$ such that condition (1) of Proposition \ref{intcount} holds. It remains to verify the conditions (4) through (9) of Proposition \ref{intcount} that involve intersections. In the current setting, Lemma \ref{FE+-} holds as stated and the proof is unchanged. \begin{lemma}\label{FE} Suppose $a_1$ is negative, and $b_1$ and $b_2$ are nonzero. Suppose that $|a_1|$ is sufficiently small with respect to $|b_1|$.\\ \noindent If $b_1>0$, then $ F \bullet \bar{\mathcal{T}}_0 =0, $ $F \bullet \bar{\mathcal{T}}_{\infty} =1,$ and $ F \bullet \mathbb{E} =\alpha_0.\\ $ \noindent If $b_1<0$, then $ F \bullet \bar{\mathcal{T}}_0 =1, $ $ F \bullet \bar{\mathcal{T}}_{\infty} =0, $ and $ F \bullet \mathbb{E} =d-1-\alpha_0. $ \end{lemma} Lemmas \ref{GE+-} and \ref{noleftg} and Corollary \ref{FcapG} change only in notation and yield the following. \begin{lemma}\label{GE} Suppose that $a_2$ is negative, $b_1$ and $b_2$ are nonzero, and $|a_2|$ is sufficiently small with respect to $|b_2|$.\\ \noindent If $b_2>0$, then $ G \bullet \bar{\mathcal{T}}_0 =0, $ $G \bullet \bar{\mathcal{T}}_{\infty} =1,$ and $$ G \bullet \mathbb{E} = \gamma_0 + v_{\mathbb{L}}(\mathbf{x}_2) \bullet u_{\mathbb{L}}^{\mathbf{X} }.\\ $$ \noindent If $b_2<0$, then $ G \bullet \bar{\mathcal{T}}_0 =1, $ and $G \bullet \bar{\mathcal{T}}_{\infty} =0,$ and $$ G \bullet \mathbb{E} = d-\gamma_0 +v_{\mathbb{L}}(\mathbf{x}_2) \bullet u_{\mathbb{L}}^{\mathbf{X} }. $$ \end{lemma} \begin{lemma}\label{nog} If $b_1$ and $b_2$ have opposite sign, and $a_1$ and $a_2$ are sufficiently small, then \begin{equation*} \label{ } v_{\mathbb{L}}(\mathbf{ x}_2) \bullet u_{\mathbb{L}}^{\mathbf{X} }=v_{\mathbb{L}}(\mathbf{ x}_2) \bullet u_{\mathbb{L}}(\mathbf{ x}_1). \end{equation*} \end{lemma} \begin{lemma}\label{FGLL} Suppose that $a_1< a_2<0$, $|a_1|$ is sufficiently small with respect to $|b_1|$, and $|a_2|$ is sufficiently small with respect to $|b_2|$.\\ \noindent If $b_1>b_2$, then $F \cap G$ contains at least $\alpha_0 + d- \gamma_0 $ points in $\mathcal{U}(\mathbb{L})$ that project to $A_0$.\\ \noindent If $b_1<b_2$, then $F \cap G \cap \mathcal{U}(\mathbb{L})$ contains at least $d-1-\alpha_0 + \gamma_0$ points in $\mathcal{U}(\mathbb{L})$ that project to $A_0$. \end{lemma} The following analogous results follow from similar arguments. \begin{lemma}\label{FE2} Suppose $c_1$ is positive, $d_1$ and $d_2$ are nonzero, and $|c_1|$ is sufficiently small with respect to $|d_1|$.\\ \noindent If $d_1>0$, then $ F \bullet \bar{\frak{s}}_0 =0, $ $ F \bullet \bar{\frak{s}}_{\infty} =1, $ and $ F \bullet E_{1,1} =\beta_0. $\\ \noindent If $d_1<0$, then $ F \bullet \bar{\frak{s}}_0 =1, $ and $F \bullet \bar{\frak{s}}_{\infty} =0,$ and $ F \bullet E_{1,1} =d-\beta_0. $ \end{lemma} \begin{lemma}\label{GE2} Suppose $c_2$ is positive, $d_1$ and $d_2$ are nonzero, and $|c_2|$ is sufficiently small with respect to $|d_2|$.\\ \noindent If $d_2>0$, then $ G \bullet \bar{\frak{s}}_0 =0, $ $ G \bullet \bar{\frak{s}}_{\infty} =1 $ and $$ G \bullet E_{1,1} = \delta_0 + v_{L_{1,1}}(\mathbf{x}_2) \bullet u_{L_{1,1}}^{\mathbf{X} }.\\ $$ \noindent If $d_2<0$, then $ G \bullet \bar{\frak{s}}_0 =1, $ $ G \bullet \bar{\frak{s}}_{\infty} =0 $ and $$ G \bullet E_{1,1} = d-1-\delta_0 + v_{L_{1,1}}(\mathbf{x}_2) \bullet u_{L_{1,1}}^{\mathbf{X} }.\\ $$ \end{lemma} \begin{lemma}\label{nog2} If $d_1$ and $d_2$ have opposite sign, and $c_1$ and $c_2$ are sufficiently small, then \begin{equation*} \label{ } v_{L_{1,1}}(\mathbf{x}_2) \bullet u_{L_{1,1}}^{\mathbf{X} }=v_{L_{1,1}}(\mathbf{ x}_2) \bullet u_{L_{1,1}}(\mathbf{ x}_1). \end{equation*} \end{lemma} \begin{lemma}\label{FGL11} Suppose that $c_1>c_2>0$, $|c_1|$ is sufficiently small with respect to $|d_1|$, and $|c_2|$ is sufficiently small with respect to $|d_2|$.\\ \noindent If $d_1>d_2$, then $F \cap G $ contains at least $\beta_0 + d -1- \delta_0$ points in $\mathcal{U}(L_{1,1})$ that project to $A_{\infty}$.\\ \noindent If $d_1<d_2$, then $F \cap G $ contains at least $d-\beta_0 + \delta_0$ points in $\mathcal{U}(L_{1,1})$ that project to $A_{\infty}$. \end{lemma} With $F$ and $G$ fixed as above, the remaining analysis can be organized using the following two alternatives.\\ \noindent Alternative 1: either $\alpha_0 \geq \gamma_0$ or $\gamma_0 \geq \alpha_0+1.$\\ \\ \noindent Alternative 2: either $\beta_0 \geq \delta_0+1$ or $\delta_0 \geq \beta_0.$\\ \noindent{\bf Case 1.} $\alpha_0 \geq \gamma_0$ and $\beta_0 \geq \delta_0+1$. \\ \noindent In this case, we choose our translations so that \begin{equation*} \label{case1} a_1<a_2<0,\, b_2<0<b_1,\, 0< c_2<c_1,\,\text{and }\, d_2<0 < d_1. \end{equation*} For these conditions on $b_1$ and $b_2$, Lemmas \ref{FE} and \ref{GE} yield $F \bullet \mathcal{T}_0 =0$, $F \bullet \mathcal{T}_{\infty} =1$ $G \bullet \mathcal{T}_0 =1$, and $G \bullet \mathcal{T}_{\infty} =0$. This implies condition (4) of Proposition \ref{intcount}. Similarly, for these conditions on $d_1$ and $d_2$, Lemmas \ref{FE2} and \ref{GE2} imply that $F \bullet \frak{s}_0 =1$, $F \bullet \frak{s}_{\infty} =0$, $G \bullet \frak{s}_0 =1$, and $G \bullet \frak{s}_0 =1$. This gives condition (5) of Proposition \ref{intcount}. Since the maps $F$ and $G$ both represent the class $(1,d)$ in $H_2(S^2 \times S^2;{\mathbb Z}))$ we have $F \bullet G = (1,d) \bullet (1,d) = 2d$. On the other hand, for the choices above, Lemmas \ref{FGLL} and \ref{FGL11} imply that \begin{equation*} \label{in3} F \bullet G \geq (\alpha_0 + d -\gamma_0) +(\beta_0 + d-1-\delta_0). \end{equation*} In the current case, with $\alpha_0 \geq \gamma_0$ and $\beta_0 \geq \delta_0+1$, these two summands are each at least $d$, and so must we have \begin{equation} \label{e1} \alpha_0 = \gamma_0, \end{equation} and \begin{equation} \label{e2} \beta_0 =1+\delta_0. \end{equation} It follows that $F\cap G$ consists of exactly $2d$ points, $d$ of which are contained in $\mathcal{U}(\mathbb{L})$ and project to $A_0$ and $d$ of which are contained in $\mathcal{U}(L_{1,1})$ and project to $A_{\infty}$. This yields conditions (8) and (9) of Proposition \ref{intcount}. Since $F \bullet G = \mathbf{ F}(\mathbf{ x}_1) \bullet \mathbf{ G}(\mathbf{ x}_2)$, it follows from the equalities above that there can be no intersections between the essential curves of $\mathbf{ F}(\mathbf{ x}_1)$ and those of $\mathbf{ G}(\mathbf{ x}_2)$. In particular, we must have \begin{equation} \label{e3} v_{\mathbb{L}}(\mathbf{x}_2) \bullet u_{\mathbb{L}}(\mathbf{x}_1)=0, \end{equation} and \begin{equation} \label{e4} v_{L_{1,1}}(\mathbf{ x}_2) \bullet u_{L_{1,1}}(\mathbf{ x}_1)=0. \end{equation} Equation \eqref{e3} and Lemma \ref{nog} imply that \begin{equation*} \label{ } v_{\mathbb{L}}(\mathbf{ x}_2) \bullet u_{\mathbb{L}}^{\mathbf{X} }=0. \end{equation*} By Lemmas \ref{FE} and \ref{GE} and equation \eqref{e1}, we then have \begin{equation*} \label{ } F \bullet \mathbb{E} + G \bullet \mathbb{E}= \alpha_0 + d -\gamma_0 =d, \end{equation*} which yields condition (6) of Proposition \ref{intcount}. Similarly, Lemmas \ref{FE2}, \ref{GE2} and \ref{nog2}, together with equations \eqref{e2} and \eqref{e4}, imply that \begin{equation*} \label{} F \bullet E_{1,1} + G \bullet E_{1,1} = d \end{equation*} and hence condition (7) of Proposition \ref{intcount}. This completes the proof of Proposition \ref{intcount} in the present case. \medskip The proofs in the other cases follow along identical lines. For the sake of completeness we list the inequalities for the components of the translations that lead to the desired intersection patterns of Proposition \ref{intcount}, in the remaining scenarios. For the case $\alpha_0 \geq \gamma_0$ and $\delta_0\geq \beta_0$, we choose \begin{equation*} a_1<a_2<0,\, b_2<0<b_1,\, 0<c_2<c_1,\, \text{ and } d_1<0< d_2. \end{equation*} For $\gamma_0 \geq \alpha_0+1$ and $\beta_0 \geq \delta_0+1$, we choose \begin{equation*} a_1<a_2<0,\, b_1<0<b_2,\, 0<c_2<c_1,\, \text{ and } d_2<0<d_1. \end{equation*} Finally for the case $\gamma_0 \geq \alpha_0+1$ and $\delta_0\geq \beta_0$, we choose \begin{equation*} a_1<a_2<0,\, b_1<0<b_2,\, 0<c_2<c_1,\, \text{ and } d_1<0<d_2. \end{equation*} To complete the proof of Proposition \ref{intcount} we remark that the smoothings $F$ and $G$ can be replaced by smooth symplectic spheres without changing the various intersection numbers. To do this, it is enough to replace $F$ and $G$ by symplectic spheres which coincide with $F$ and $G$ away from neighborhoods of $\mathbb{L}(\mathbf v_1)$ and $L_{1,1}(\mathbf w_1)$, respectively $\mathbb{L}(\mathbf v_2)$ and $L_{1,1}(\mathbf w_2)$, that is, the new spheres differ only away from all intersection points. Now, we know that the asymptotic ends of the top level curves of $\mathbf{ F}(\mathbf{ x}_1)$ and $\mathbf{ G}(\mathbf{ x}_2)$ are simply covered, either because the curves are essential, or for covers of leaves by applying Lemma \ref{area1}. Generically the asymptotic limits are distinct. Then, for small perturbations, we may assume that the top level curves restricted to a neighborhood of the Lagrangians are symplectically isotopic to the corresponding top level curves of our original buildings $\mathbf{F}$ and $\mathbf{G}$. (In the case of $\mathbf{ F}(\mathbf{ x}_1)$ the isotopy maps $\mathbb{L}(\mathbf v_1)$ and $L_{1,1}(\mathbf w_1)$ to $\mathbb{L}$ and $L_{1,1}$ respectively.) Finally recall that the buildings $\mathbf{F}$ and $\mathbf{G}$ are limits of sequences of smooth embedded holomorphic spheres as our almost complex structures are stretched along the Lagrangians. Therefore, after a small perturbation, we may assume the top level curves of these buildings restricted to a compact subset of the complement of $\mathbb{L} \cup L_{1,1}$ extend to smooth symplectic spheres in $S^2 \times S^2$. Combining the isotopies and these extensions gives our symplectic spheres as required. \subsection{Scene change} Consider $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$ equipped with an almost complex structure $J$ adapted to parameterizations $\psi$ and $\psi_{1,1}$ of $L$ and $L_{1,1}$, respectively. Compactifying the broken leaves of the corresponding foliation $\mathcal{F}$, we get a foliation $\bar{\mathcal{F}}$ of $S^2 \times S^2$ by spheres. We denote a general sphere in $\bar{\mathcal{F}}$ by $H$. Let $F$, $G$, $E$ and $E_{1,1}$ be the spheres and disks from Proposition \ref{intcount}. We can work with an almost complex structure with respect to which these spheres and disks are holomorphic. In particular intersections with the leaves of the foliation are all positive. We start with the following intersection pattern and area profile. \bigskip \begin{tabular}{ cc } Initial intersection numbers & \quad Initial symplectic areas \\ \\ \begin{tabular}{l|l|l|l|l|l|} \cline{2-6} & $F$ & $G$ & $\mathbb{E}$ & $E_{1,1}$ & $H$ \\ \hline \multicolumn{1}{|l|}{$F$} & 2d & & & & \\ \hline \multicolumn{1}{|l|}{$G$} & $2d$ & 2d & & & \\ \hline \multicolumn{1}{|l|}{$\mathbb{E}$} & $k$ & $d-k$ & $*$ & & \\ \hline \multicolumn{1}{|l|}{$E_{1,1}$} & $l$ & $d-l$ & 0 & $*$ & \\ \hline \multicolumn{1}{|l|}{$H$} & 1 & 1 & $*$ & $*$ & 0 \\ \hline \end{tabular} & \quad \begin{tabular}{l|l|} \cline{2-2} & $\pi_1^*\omega + \pi_2^*\omega$--area \\ \hline \multicolumn{1}{|l|}{$F$} & $2+2d$ \\ \hline \multicolumn{1}{|l|}{$G$} & $2+2d$ \\ \hline \multicolumn{1}{|l|}{$\mathbb{E}$} & 1 \\ \hline \multicolumn{1}{|l|}{$E_{1,1}$} & 1 \\ \hline \multicolumn{1}{|l|}{$H$} & 2 \\ \hline \end{tabular}\\ \end{tabular} \bigskip In this section, we use $F$ and $G$ to alter $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$, away from $\mathbb{L}$ and $L_{1,1}$, to derive a scenario in which the disjointness of these Lagrangians is a contradiction. The spheres $F$ and $G$ intersect one another in $2d$ points, $\{p_1, \dots, p_{2d}\}$. By positivity of intersection, the homology classes involved imply that each of the $p_i$ in $F \cap G$ lies on a sphere, $H_i$, of $\bar{\mathcal{F}}$, and the $H_i$ are distinct. We may also assume that for some fixed $\epsilon>0$ there are disjoint Darboux balls $B_i$ of capacity $\epsilon$ around each $p_i$ on which $J$ is standard and the $F$, $G$ and $H_i$ restrict to planes through the origin. \medskip \noindent {\em Step 1:} Blow up the balls $B_i$ of capacity $\epsilon$ around each of the $p_i$. \medskip Denote the new manifold by $(W, \Omega_1)$. It follows from the analysis of the blowup procedure from \cite{mcp}, see also Proposition 9.3.3 of \cite{mcs}, that $(W, \Omega_1)$ contains $2d$ exceptional divisors $\mathcal{E}_i$ each of area $\epsilon$. Since the $H_i$ are $J$-holomorphic in each $B_i$, $(W, \Omega_1)$ also contains the proper transforms of the $H_i$. These are denoted here by $\hat{H}_i$ and are symplectic spheres of area $2-\epsilon$. By property (9) of Proposition \ref{intcount}, $d$ of the $\hat{H}_i$ intersect $\mathbb{E}$ once, and the other $d$ of the $\hat{H}_i$ intersect $E_{1,1}$ once. (Note that we may assume the families of planes $\mathcal{T}_0$ and $\mathcal{T}_{\infty}$, and also the families $\frak s_0$ and $\frak s_{\infty}$, are still $J$-holomorphic after our perturbation of $J$ since the smoothing of $F$ and $G$ occurs away from our broken planes. Therefore $p^{-1}(A_0)$ and $p^{-1}(A_{\infty})$ remain the same sets as in Proposition \ref{intcount} (9).) The proper transforms of $F$ and $G$, denoted by $\hat{F}$ and $\hat{G}$, are also well-defined. These are spheres of area $2d+2-2d\epsilon$ which are now disjoint. Every sphere $H$ of $\bar{\mathcal{F}}$, other than the $H_i$, has a proper transform $\hat{H}$ of area two. This information is collected in the following tables. \medskip \begin{tabular}{ cc } Intersection numbers after Step 1& \quad Areas after Step 1\\ \\ \begin{tabular}{l|l|l|l|l|l|l|l|} \cline{2-8} & $\hat{F}$ & $\hat{G}$ & $\mathbb{E}$ & $E_{1,1}$ & $\hat{H}$ & $\mathcal{E}_i$ & $\hat{H}_i$ \\ \hline \multicolumn{1}{|l|}{$\hat{F}$} & 0 & & & & & & \\ \hline \multicolumn{1}{|l|}{$\hat{G}$} & 0 & 0 & & & & & \\ \hline \multicolumn{1}{|l|}{$\mathbb{E}$} & $k$ & $d-k$ & $*$ & & & & \\ \hline \multicolumn{1}{|l|}{$E_{1,1}$} & $l$ & $d-l$ & 0 & $*$ & & & \\ \hline \multicolumn{1}{|l|}{$\hat{H}$} & $1$ & $1$ & $*$ & $*$ & 0 & & \\ \hline \multicolumn{1}{|l|}{$\{\mathcal{E}_i\}$} & $2d$ & $2d$ & 0 & 0 & 0 & -1 & \\ \hline \multicolumn{1}{|l|}{$\{\hat{H}_i\}$} & 0 & 0 & $ d$ & $d$ & 0 & 1 & -1 \\ \hline \end{tabular} & \quad \begin{tabular}{l|l|} \cline{2-2} & $\Omega_1$--Area \\ \hline \multicolumn{1}{|l|}{$\hat{F}$} & $2+2d -2d\epsilon$ \\ \hline \multicolumn{1}{|l|}{$\hat{G}$} & $2+2d-2d\epsilon$ \\ \hline \multicolumn{1}{|l|}{$\mathbb{E}$} & $1$ \\ \hline \multicolumn{1}{|l|}{$E_{1,1}$} & $1$ \\ \hline \multicolumn{1}{|l|}{$\hat{H}$} & $2$ \\ \hline \multicolumn{1}{|l|}{$\mathcal{E}_i$} & $\epsilon$ \\ \hline \multicolumn{1}{|l|}{$\hat{H}_i$} & $2-\epsilon$ \\ \hline \end{tabular}\\ \end{tabular} \bigskip \noindent {\em Step 2:} Inflate both $\hat{F}$ and $\hat{G}$ by adding a tubular neighborhood of capacity $d$. \medskip For the inflation result from \cite{lm96}, we may assume by Lemma 3.1 in \cite{mcd01} that the new form tames our original almost complex structure. In particular all of our holomorphic curves remain holomorphic and in particular symplectic. Denoting the resulting symplectic form on $W$ by $\Omega_2$ we get the following new symplectic area profile. \bigskip \begin{center} \begin{tabular}{l|l|} \cline{2-2} & $\Omega_2$--Area \\ \hline \multicolumn{1}{|l|}{$\hat{F}$} & $2+2d -2d\epsilon$ \\ \hline \multicolumn{1}{|l|}{$\hat{G}$} & $2+2d-2d\epsilon$ \\ \hline \multicolumn{1}{|l|}{$\mathbb{E}$} & $1+d^2$ \\ \hline \multicolumn{1}{|l|}{$E_{1,1}$} & $1+d^2$ \\ \hline \multicolumn{1}{|l|}{$\hat{H}$} & $2+2d$ \\ \hline \multicolumn{1}{|l|}{$\mathcal{E}_i$} & $\epsilon+2d$ \\ \hline \multicolumn{1}{|l|}{$\hat{H}_i$} & $2-\epsilon$ \\ \hline \end{tabular} \end{center} \bigskip \noindent {\em Step 3:} Apply the negative inflation procedure from \cite{bu}, of size $\epsilon$, to each $\mathcal{E}_i$. \medskip Denoting the resulting symplectic form on $W$ by $\Omega_3$, the area profile becomes \bigskip \begin{center} \begin{tabular}{l|l|} \cline{2-2} & $\Omega_3$--Area \\ \hline \multicolumn{1}{|l|}{$\hat{F}$} & $2+2d $ \\ \hline \multicolumn{1}{|l|}{$\hat{G}$} & $2+2d$ \\ \hline \multicolumn{1}{|l|}{$\mathbb{E}$} & $1+d^2$ \\ \hline \multicolumn{1}{|l|}{$E_{1,1}$} & $1+d^2$ \\ \hline \multicolumn{1}{|l|}{$\hat{H}$} & $2+2d$ \\ \hline \multicolumn{1}{|l|}{$\mathcal{E}_i$} & $2d$ \\ \hline \multicolumn{1}{|l|}{$\hat{H}_i$} & $2$ \\ \hline \end{tabular} \end{center} \bigskip \noindent {\em Step 4:} Blow down each $\hat{H}_i$. \medskip We denote the symplectic manifold resulting from this final step by $(X, \Omega)$. Each of the exceptional divisors $\mathcal{E}_i$ in $(W,\Omega_3)$ is transformed, by Step 4, into a sphere $\mathcal{H}_i$ in $X$ which has $\Omega$-area equal to $2d +2$ and now lies in the same class as the $\hat{H}$. The disks $\mathbb{E}$ and $E_{1,1}$ each intersect $d$ of the $\hat{H}_i$ and so are transformed by Step $4$ into disks $\mathbb{E}^X$ and $E_{1,1}^X$, whose symplectic areas have each been increased by $2d$. \bigskip \begin{tabular}{ cc } Intersection numbers after Step 4& \quad Areas after Step 4\\ \\ \begin{tabular}{l|l|l|l|l|l|l|l} \cline{2-7} & $\hat{F}$ & $\hat{G}$ & $\mathbb{E}^X$ & $E_{1,1}^X$ & $\hat{H}$ & $\mathcal{H}_i$ \\ \hline \multicolumn{1}{|l|}{$\hat{F}$} & 0 & & & & & \\ \hline \multicolumn{1}{|l|}{$\hat{G}$} & 0 & 0 & & & & \\ \hline \multicolumn{1}{|l|}{$\mathbb{E}^X$} & $k$ & $d-k$ & $*$ & & & \\ \hline \multicolumn{1}{|l|}{$E_{1,1}^X$} & $l$ & $d-l$ & 0 & $*$ & & \\ \hline \multicolumn{1}{|l|}{$\hat{H}$} & $1$ & $1$ & $*$ & $*$ & 0 & \\ \hline \multicolumn{1}{|l|}{$\{\mathcal{H}_i\}$} & $2d$ & $2d$ & $d$ & $d$ & 0 & 0 \\ \hline \end{tabular} & \quad \begin{tabular}{l|l|} \cline{2-2} & $\Omega$--Area \\ \hline \multicolumn{1}{|l|}{$\hat{F}$} & $2+2d$ \\ \hline \multicolumn{1}{|l|}{$\hat{G}$} & $2+2d$ \\ \hline \multicolumn{1}{|l|}{$\mathbb{E}^X$} & $1+d^2+2d$ \\ \hline \multicolumn{1}{|l|}{$E_{1,1}^X$} & $1+d^2+2d$ \\ \hline \multicolumn{1}{|l|}{$\hat{H}$} & $2+2d$ \\ \hline \multicolumn{1}{|l|}{$\mathcal{H}_i$} & $2+2d$ \\ \hline \end{tabular}\\ \end{tabular} \bigskip \begin{lemma}\label{s2} $(X, \Omega)$ is symplectomorphic to $$(S^2 \times S^2, (d+1)\omega \oplus (d+1)\omega).$$ \end{lemma} \begin{proof} The presence of the embedded symplectic spheres $\hat{F}$ and $\hat{H}$, which have the same $\Omega$-area and satisfy $$ \hat{F} \bullet \hat{F} = \hat{H} \bullet \hat{H} =0, \text{ and } \hat{F} \bullet \hat{H} =1, $$ implies that either $(X, \omega)$ is symplectomorphic to $$(S^2 \times S^2, (d+1)\omega \oplus (d+1)\omega)$$ or there are finitely many symplectically embedded spheres with self-intersersection number $-1$ in the complement of $\hat{F}$ and $\hat{H}$ in $X$, and $X$ can be blown down to a copy of $S^2 \times S^2$. This follows from the proof of Theorem 9.4.7 of \cite{mcs}. As a consequence if $H_2(X;{\mathbb Z})$ has rank $2$ then $X$ is symplectomorphic to $S^2 \times S^2$. A simple analysis of the construction of $(X,\Omega)$ from $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$ allows us to compute this rank. The $2d$ blow ups in Step 1 imply that the rank of $H_2(W;{\mathbb Z})$ is $2+2d$. The subsequent $2d$ blow down operations in Step 4 imply that the rank of $H_2(X;{\mathbb Z})$ is $2$ as required. \end{proof} Henceforth, we may identify $(X, \Omega)$ with $(S^2 \times S^2, (d+1)\omega \oplus (d+1)\omega)$. The Lagrangian tori $\mathbb{L}$ and $L_{1,1}$ are untouched, as submanifolds, by the four steps above. They remain Lagrangian and disjoint in $(X,\Omega)$. Note that $L_{1,1}$ is not equal to the Clifford torus in $(X, \Omega)$ with respect to the identification above. In what follows we denote the Clifford torus in $(X, \Omega)$ by $L_X$. The manifold $(X,\Omega)$ also inherits an almost complex structure, denoted here by $\hat{J}$, which equals $J$ away from the collection $\{\hat{\mathcal{H}}_i\}$. In particular, $\hat{J}$ is adapted to the original parameterizations $\psi$ and $\psi_{1,1}$ of $L$ and $L_{1,1}$. As in Section \ref{double}, $\hat{J}$ determines a straightened foliation $\hat{\mathcal{F}}= \mathcal{F}(\mathbb{L},L_{1,1}, \psi,\psi_{1,1}, \hat{J})$ of $X \smallsetminus (\mathbb{L} \cup L_{1,1})$. The original collections of planes $\frak{s}_0$, $\frak{s}_{\infty}$, $\mathcal{T}_0$ and $\mathcal{T}_{\infty}$ still comprise the broken leaves of this new foliation. The symplectic spheres $\hat{F}$ and $\hat{G}$ now represent the class $(1,0) \in H_2(X;{\mathbb Z}) = H_2(S^2 \times S^2; {\mathbb Z})$. As in Proposition \ref{intcount}, it is still true that exactly one of $\hat{F}$ and $\hat{G}$ intersects the planes of $\frak{s}_{0}$ and the other intersects the planes of $\frak{s}_{\infty}$, and exactly one of $\hat{F}$ and $\hat{G}$ intersects the planes of $\mathcal{T}_{0}$ and the other intersects the planes of $\mathcal{T}_{\infty}$. \begin{lemma} The Lagrangian tori $\mathbb{L}$ and $L_{1,1}$ are both monotone in $(X, \Omega)$. \end{lemma} \begin{proof} Let $D_{\infty} \colon (D^2, S^1) \to (S^2 \times S^2, \mathbb{L})$ be a compactification of one of the planes of $\mathcal{T}_\infty$. The disk $D_{\infty}$ has Maslov index equal to $2$ and symplectic area equal to $1$ with respect to $\pi_1^*\omega + \pi_2^*\omega$. The map $D_{\infty}|_{S^1}$ represents the foliation class $\beta_{\mathbb{L}}$. The image of the map $D_{\infty}$ is unaffected by the four steps defining the passage from $(S^2 \times S^2, \pi_1^*\omega + \pi_2^*\omega)$ to $(X, \Omega)$. Viewed as a map from $(D^2, S^1)$ to $(X, \mathbb{L})$, $D_{\infty}$ still has Maslov index 2, and $D_{\infty}|_{S^1}$ still represents $\beta_{\mathbb{L}}$. The $\Omega$--area of $D_{\infty}$, as a map into $(X, \Omega)$, is $d+1$. This follows from the fact that exactly one of $F$ and $G$ intersect $D_{\infty}$ and so the inflations in Step 2 increase the symplectic area by $d$. By assertion (4) of Proposition \ref{intcount}, the boundary $\mathbb{E}|_{S^1}$ represents a class which together with $\beta_{\mathbb{L}}$ forms an integral basis of $H_1(\mathbb{L};{\mathbb Z}).$ The same holds for $\mathbb{E}^X|_{S^1}$. To prove that $\mathbb{L}$ is a monotone Lagrangian torus in $(X, \Omega)$ it then suffices, by Lemma \ref{pair}, to prove that the Maslov index of $\mathbb{E}^X \colon (D^2, S^1) \to (X, \mathbb{L})$ is equal to $$\frac{2}{d+1}(1+d^2 +2d) = 2d+2$$ where $1+d^2+2d$ is the area of $\mathbb{E}^X$. This follows from the fact that, in $(W, \Omega_3)$, $\mathbb{E}$ has Maslov index $2$, intersects exactly $d$ of the $\hat{H}_i$, and each of the corresponding intersection numbers is $1$. In blowing down the $\hat{H}_i$, and passing from $\mathbb{E}$ to $\mathbb{E}^X$, each of these intersection points yields an increase of $2$ in the Maslov index. The proof that $L_{1,1}$ is monotone in $(Y, \Omega)$ is identical. \end{proof} \begin{lemma} The Lagrangians $\mathbb{L}$ and $L_{1,1}$ are both Hamiltonian isotopic to the Clifford torus $L_X$ in $(X, \Omega)$. \end{lemma} \begin{proof} This follows from the main result of Cieliebak and Schwingenheuer in \cite{cisc}. In the language of that paper the compactification of the straightened foliation $\hat{\mathcal{F}}= \mathcal{F}(\mathbb{L},L_{1,1}, \psi,\psi_{1,1}, \hat{J})$ yields a fibering of $\mathbb{L}$ and a fibering of $L_{1,1}$. For the fibering of $\mathbb{L}$, the spheres $\hat{F}$ and $\hat{G}$ are disjoint sections in the class $(1,0)$ and exactly one of them intersects the (compactification of the ) planes of $\mathcal{T}_{0}$ and the other intersects the those of $\mathcal{T}_{\infty}$. The main theorem of \cite{cisc}, then implies that $L$ is Hamiltonian isotopic to the Clifford torus $L_X$ in $(X, \Omega)$. An identical argument holds for $L_{1,1}.$ \end{proof} With this, the contradiction to Assumption 2 becomes apparent. The first fundamental intersection result implied by the Floer theory for monotone Lagrangian submanifolds implies that any Lagrangian tori Hamiltonian isotopic to $L_X$ must intersect nontrivially, \cite{oh}. Hence, $\mathbb{L}$ and $L_{1,1}$ can not be disjoint in $(X, \Omega)$. \begin{remark} The assumption that $\mathbb{L}$ and $L_{1,1}$ are disjoint is used twice in the proof of Theorem \ref{one}. At the very end, and in the proof of Refinement 3 in Section \ref{claim3} \end{remark} \begin{remark} The fact that $L_{1,1}$ is the Clifford torus (and not just another monotone Lagrangian torus) is crucial (only) in the proof of the existence results Proposition \ref{existence2} and Proposition \ref{existence3}. \end{remark} \begin{remark} There is an alternative to the argument used at the end of the proof of Theorem \ref{one} that avoids appealing to Lagrangian Floer homology. Instead one can use the fact that the symplectomorphism in Lemma \ref{s2} can be chosen to map $\hat{F}$, $\hat{G}$ and the transforms $\hat{T}_0$ and $\hat{T}_{\infty}$ to the axes $S^2 \times \{0\}$, $S^2 \times \{\infty\}$, $\{0\} \times S^2$ and $\{ \infty\} \times S^2$ respectively. The complement of these axes in $S^2 \times S^2$ can be identified with a domain in $T^* T^2$, in which the Clifford torus is identified with the zero section. We can check that $\mathbb{L}$ and $L_{1,1}$ are homologically nontrivial in this copy of $T^* T^2$ and so, by Theorem \ref{hom}, are Hamiltonian isotopic to constant sections. The monotonicity condition then implies the constant section must be the zero section. Finally Gromov's intersection theorem for exact Lagrangians in cotangent bundles, from Section $2.3.B''_4$ of \cite{gr}, implies that they must intersect. \end{remark} \section{Proof of Theorem \ref{two}.} It suffices to prove the following. \begin{theorem}\label{eps} For any $\epsilon>0$ there is a $\delta >0$ and a symplectic embedding of the polydisk $P(1+ \delta,1 + \delta)$ into $P(2+ \epsilon, 2+ \epsilon)$ whose image is disjoint from the product Lagrangians $L_{k,l}$ for $k,l \in \{1,2\}$. \end{theorem} The desired additional integral Lagrangian torus $L^+$ is the one on (the image of) the boundary of $P(1,1) \subset P(1+ \delta, 1+ \delta)$. \subsection{Proof of Theorem \ref{eps}} We will use rescaled polar coordinates $\theta_i, R_i$ on ${\mathbb R}^4={\mathbb C}^2$ where $R_i = \pi |z_i|^2$ and $\theta_i \in {\mathbb R} \slash {\mathbb Z}$. In these coordinates the standard symplectic form is $$\omega = \sum_{i=1}^2 dR_i \wedge d \theta_i$$ and $L_{k,l} = \{(\theta_1,k,\theta_2,l)\}$. \subsubsection{A polydisk.} For $\epsilon>0$ fixed, choose positive numbers $\ell$, $w$ such that \begin{itemize} \item $2< \ell < 2+\epsilon$ \item $ w < 2$ \item $\frac{1}{\ell} + \frac{1}{w} <1$. \end{itemize} Then choose positive constants $\sigma$ and $\delta$ such that \begin{itemize} \item $\ell + \sigma < 2+\epsilon$ \item $w +\sigma <2$ \item $\frac{1+ \delta}{\ell} + \frac{1+ \delta}{w} <1$ \end{itemize} Set $$S = \{ \sigma < R_1 < \ell + \sigma, \sigma < R_2 < w + \sigma\}$$ and$$T=\left\{ 0 < \theta_1 < \frac{1+ \delta}{\ell}, 0 < \theta_2 < \frac{1+ \delta}{w}\right\}.$$ Note that $S \times T$ is a subset of $P(2+ \epsilon, 2+ \epsilon)$ and is symplectomorphic to $P(1+\delta, 1+ \delta)$. Both $L_{1,1}$ and $L_{2,1}$ intersect $S \times T$, while $L_{1,2}$ and $L_{2,2}$ do not. \subsubsection{The plan} To prove Theorem \ref{eps} it suffices to find a Hamiltonian diffeomorphism of $P(2+ \epsilon, 2+ \epsilon)$ that displaces $S\times T$ from the $L_{k,l}$. Equivalently, we construct a Hamiltonian diffeomorphism $\Psi$ of $P(2+ \epsilon, 2+ \epsilon)$ such that each of the images $\Psi(L_{k,l})$ is disjoint from $S \times T$. To construct $\Psi$ we use Hamiltonian functions which are of the form $F(\theta_1, \theta_2)$. The Hamiltonian flow, $\phi^t_F$, of such a function preserves $\theta_1$ and $\theta_2$ and generates a Hamiltonian vector field parallel to the $R_1R_2$--plane. In particular, the only points of $\phi^t_F(L_{k,l})$ which could possibly intersect $S \times T$ are those whose $(\theta_1, \theta_2)$ coordinates lie in $T$. Since we only need to control the images of the $L_{k,l}$, we can cut off autonomous functions like $F$ in (moving) neighborhoods of $\phi^t_F(L_{k,l})$ for specific values of $k$ and $l$. After this cutting off, the new Hamiltonian will depend on all variables and be time dependent. In general, for a closed subset $V$, we denote the function obtained by cutting of $F$ along $\phi^t_F(V)$ by $F_{[V]}$. Note that $$\phi^t_{F_{[V]}}(v) =\phi^t_F(v),\,\,\text{for all }\,\, v \in V \,\,\text{and}\,\, t \in [0,1].$$ As well, each map $\phi^t_{F_{[V]}}$ is equal to the identity away from an arbitrarily small neighborhood of $$\bigcup_{t \in [0,1]}\phi^t_F(V).$$ \subsubsection{A diagonal move} Let $g \colon {\mathbb R}/{\mathbb Z} \to {\mathbb R}$ be a smooth function such that for some positive real number $c(g)>0$ we have $$g'(s) =c(g),\, \text{for} \,\,s \in \left[0, \frac{1+ \delta}{\ell} + \frac{1+ \delta}{w}\right],$$ $\max (g') =c(g)$, and $\min (g')$ is less than and arbitrarily close to $$-c(g)\left(\frac{\frac{1+ \delta}{\ell} + \frac{1+ \delta}{w}}{1- \frac{1+ \delta}{\ell} -\frac{1+ \delta}{w}}\right).$$ Letting $G(\theta_1, \theta_2) =g(\theta_1+\theta_2)$, we have \begin{equation} \label{gflow} \phi^t_G(\theta_1,R_1, \theta_2, R_2) = (\theta_1,R_1+t g'(\theta_1+\theta_2), \theta_2 , R_2+t g'(\theta_1+\theta_2)) \end{equation} The image $\phi^1_G(L_{1,2})$ is well defined as long as \begin{equation} \label{constraint} c(g) < \frac{1- \frac{1+ \delta}{\ell} -\frac{1+ \delta}{w}}{\frac{1+ \delta}{\ell} + \frac{1+ \delta}{w}} \end{equation} and is contained in $P(2+ \epsilon, 2+ \epsilon)$ as long as $c(g) < \epsilon$. Henceforth, we will assume that $\ell$, $w$ and $\delta$ have been chosen such that the first constraint on $c(g)$, implies the second. It follows from \eqref{gflow} and \eqref{constraint} that $\phi^t_G(L_{1,2})$ is contained in $$\{R_1 \leq 1+ c(g)\} \cap \{R_2 > 1\}$$ for all $t \in [0,1]$. Hence, each image $\phi^t_G(L_{1,2})$ is disjoint from the other $L_{k,l}$. Since $g' =c(g)>0$ on $T$, each $\phi^t_G(L_{1,2})$ is also disjoint from $S \times T$. \subsubsection{A vertical move.} Let $h \colon {\mathbb R}/{\mathbb Z} \to {\mathbb R}$ be a smooth function such that for some positive real number $0< c(h) < \sigma$ we have $$h'(s) =-(1-c(h)),\, \text{for} \,\,s \in \left[0, \frac{1+ \delta}{w}\right],$$ $\min(h') =-(1-c(h))$, and $\max(h')$ is greater than and arbitrarily close to $$\frac{(1-c(h))\left(\frac{1+ \delta}{w}\right)}{1 -\frac{1+ \delta}{w}} =\frac{1-c(h)}{\frac{w}{1+ \delta}-1}$$ which is greater than one since $w+\sigma<2$ and $c(h)< \sigma$. Letting $H(\theta_1, \theta_2) =h(\theta_2)$, we have \begin{equation} \label{hflow} \phi^t_H(\theta_1,R_1, \theta_2, R_2) = (\theta_1,R_1, \theta_2 , R_2+th'(\theta_2)). \end{equation} Clearly, $L_{2,1}$ and $L_{2,2}$ are disjoint from $\phi^t_H(L_{1,1})$ for all $t \in [0,1]$. Moreover, for $\theta_2$ in $\left[0, \frac{1+ \delta}{w}\right]$ we have \begin{equation*} \label{ } \phi^1_H(\theta_1,1, \theta_2, 1) = (\theta_1,1, \theta_2 ,c(h)). \end{equation*} So $\phi^1_H(L_{1,1})$ is disjoint from $T \times S$ by our choice of $c(h)$. Some points of $L_{1,1}$, with values of $\theta_2$ in $\left(\frac{1+ \delta}{w},1\right)$, are mapped by $\phi^1_H$ to points having $R_2$ coordinate greater than and arbitrarily close to $$1 + \frac{1-c(h)}{\frac{w}{1+ \delta}-1}>2.$$ Choosing $w$ sufficiently close to $2$, and $\delta$ sufficiently small we can ensure that $\phi^1_H(L_{1,1})$ lies in $P(2+ \epsilon, 2+ \epsilon)$. \subsubsection{A time delay} The Hamiltonian diffeomorphism $\phi^{1}_{H_{[L_{1,1}]}}$ can not be used to move $L_{1,1}$ off of $S \times T$ while leaving $L_{1,2}$ undisturbed. For, as described in the discussion above, $\phi^{1}_{H_{[L_{1,1}]}}(L_{1,2})$ will intersect $S \times T$. The Hamiltonian diffeomorphism $$\phi^{1}_{H_{[L_{1,1}]}} \circ \phi^1_{G_{[L_{1,2}]}}$$ has the same problem. By \eqref{gflow} and \eqref{hflow}, the image of $(\theta_1,1, \theta_2, 1) \in L_{1,1}$ under $\phi^t_H$ belongs to $\phi^1_G(L_{1,2})$ if and only if $g'(\theta_1+\theta_2)=0$ and $th'(\theta_2)=1$. Since $\max(h')>1$, these intersections occur and so the map above will again push $L_{1,2}$ into $S \times T.$ We can fix this by adding a time delay. The first intersection between $\phi^t_H(L_{1,1})$ and $\phi^1_G(L_{1,2})$ occurs at $t = (\max(h') )^{-1}$. Let $\tau$ be less than and arbitrarily close to $(\max(h') )^{-1}$. Hence, $\tau$ is also less than and arbitrarily close to \begin{equation*} \label{ } \frac{\frac{w}{1+ \delta}-1}{1-c(h)}. \end{equation*} Consider the Hamiltonian diffeomorphism $$\tilde{\Psi}= \phi^{1-\tau}_{H_{\left[\phi^{\tau}_{H}(L_{1,1}) \cup \phi^1_{G}(L_{1,2})\right]} }\circ \phi^{\tau}_{H_{[L_{1,1}]}} \circ \phi^1_{G_{[L_{1,2}]}}.$$ It follows from the analysis above, that the map $\tilde{\Psi}$ is compactly supported in $P(2+ \epsilon, 2+ \epsilon)$. In fact, it is supported in an arbitrarily small neighborhood of the subset $\{R_1 \leq 1+ c(g)\}.$ Hence, $\tilde{\Psi}(L_{2,1}) = L_{2,1}$ and $\tilde{\Psi}(L_{2,2}) = L_{2,2}$. By the definitions of $\tau$ and the cut-off operation we have $\tilde{\Psi}(L_{1,1}) = \phi^1_H(L_{1,1})$ and thus $\tilde{\Psi}(L_{1,1})$ is disjoint from $S \times T$. In addition, we now have the following. \begin{lemma}\label{off} The image $\tilde{\Psi}(L_{1,2})$ is disjoint from $S \times T$ when $c(h)$ is sufficiently close to $\sigma$ and $\delta$ is sufficiently small. \end{lemma} \begin{proof} By construction, for $(\theta_1,\theta_2) \in T$ we have \begin{eqnarray*} \tilde{\Psi}(\theta_1,1, \theta_2, 2) & = &(\theta_1,1+ g'(\theta_1+\theta_2), \theta_2 , 2+ g'(\theta_1+\theta_2)+ (1-\tau)h'(\theta_2)) \\ {} & = & (\theta_1,1+ c(g), \theta_2 , 2+ c(g)-(1-\tau)(1-c(h))). \end{eqnarray*} It suffices to show that we can choose $c(g)$ and $c(h)$ so that \begin{equation} \label{need} 2+ c(g)-(1-\tau)(1-c(h)) > w + \sigma. \end{equation} Since $\tau$ is less than and arbitrarily close to \begin{equation*} \label{ } \frac{\frac{w}{1+ \delta}-1}{1-c(h)}, \end{equation*} is also suffices to show that we can choose $c(g)$ and $c(h)$ so that \begin{equation*} \label{ } c(g) > w\left(1- \frac{1}{1+\delta}\right) +(\sigma-c(h)). \end{equation*} The righthand side can be made arbitrarily small by taking $c(h)$ to be close to $\sigma$ and $\delta$ to be small. Since the choice of $c(g)$ is independent of the choice of $c(h)$ and the constraint \eqref{constraint} on $c(g)$ relaxes as $\delta$ goes to zero, we are done. \end{proof} Henceforth, we will assume that the conditions of Claim \ref{off} hold. \subsubsection{A final (horizontal) adjustment.} The images $\tilde{\Psi}(L_{1,1})$, $\tilde{\Psi}(L_{1,2})$ and $\tilde{\Psi} (L_{2,2})$ are disjoint from $S \times T$ but $\tilde{\Psi}$ still fixes $L_{1,2}$ which intersects $S \times T$. Since $L_{1,2}$ is close to the boundary of $S \times T$, we can make a simple adjustment to obtain the desired map $\Psi$ which moves $L_{1,2}$ off of $S \times T$ as well. Let $f \colon {\mathbb R}/{\mathbb Z} \to {\mathbb R}$ be a smooth function such that for some positive real number $c(f)$ greater than and arbitrarily close to $\ell+\sigma -2$ we have $$f'(s) = c(f),\, \text{for} \,\,s \in \left[0, \frac{1+ \delta}{\ell}\right],$$ $\max(f') = c(f)$, and $\min(f')$ is less than and arbitrarily close to $$-\frac{c(f)}{\frac{\ell}{1+ \delta}-1}.$$ Setting $F(\theta_1, \theta_2) =f(\theta_1)$, we have \begin{equation*} \label{fflow} \phi^t_F(\theta_1,R_1, \theta_2, R_2) = (\theta_1,R_1+tf'(\theta_1), \theta_2 , R_2). \end{equation*} Our lower bound for $c(f)$ implies that $\phi^1_F(L_{2,1})$ is disjoint from $S \times T$. Looking at the $R_2$-component, it is clear that $\phi^1_F(L_{2,1})$ is disjoint from $L_{2,2}= \tilde{\Psi}(L_{2,2})$. To prove that $\phi^1_F(L_{2,1})$ is also disjoint from $\tilde{\Psi}(L_{1,1})$ and $\tilde{\Psi}(L_{1,2})$ it suffices to prove the following. \begin{lemma} The sets $\{R_1 \leq 1+ c(g)\}$ and $\phi^1_F(L_{2,1})$ are disjoint. \end{lemma} \begin{proof} It suffices to prove that \begin{equation*} \label{ } 2-\frac{c(f)}{\frac{\ell}{1+ \delta}-1} > 1+ c(g) \end{equation*} or, even more, that \begin{equation*} \label{ } 1> c(g) + \frac{\ell+\sigma -2}{\frac{\ell}{1+ \delta}-1}. \end{equation*} The latter inequality clearly holds for all sufficiently small values of $c(g)$ and $\ell+\sigma -2$. \end{proof} The Hamiltonian diffeomorphism $$\Psi= \phi^1_{F_{[L_{2,1}]}} \circ \phi^{1-\tau}_{H_{\left[\phi^{\tau}_{H}(L_{1,1}) \cup \phi^1_{G}(L_{1,2})\right]} }\circ \phi^{\tau}_{H_{[L_{1,1}]}} \circ \phi^1_{G_{[L_{1,2}]}}$$ now has all the desired properties. With its construction, the proof of Theorem \ref{eps} is complete. \begin{question} Can $\Psi$, or any other Hamiltonian diffeomorphism which displaces the $L_{k,l}$ from $S \times T$, be generated by an autonomous Hamiltonian? \end{question}
1,108,101,565,882
arxiv
\section{Runway Image Analysis} \label{sec:analysis} Before introducing the design of our system framework, we first conduct a brief analysis on the runway show images we collected to show the rich information of fashion designs we can infer from the images using deep learning neural network models. We used 32 years of runway data, with 952 unique fashion designers, 8965 fashion shows, and 256,907 unique looks in fashion shows. Note that for this paper, we leverage only the fall and spring ready-to-wear collections since it reflects more directly to the apparels worn by consumers and timely fashion trends. To convert runway images to more quantifiable information that we can analyze, we pass through all the collected runway images to a DenseNet that is pre-trained on ImageNet with 1000 classes \cite{DBLP:conf/cvpr/HuangLMW17,deng2009imagenet}. We retrieve the top 10 predicted classes of each image based on the classified probabilities and view them as the possible objects appeared in the images. For each class, we plot their occurrences normalized by the number of designers that had fashion shows in that year. Figure \ref{fig:imagenet_trends} shows the trend comparisons between different fashion designs detected by ImageNet.\footnote{We only plot trends from 2000 and onwards since the years before that include very few designers.} As shown, we can observe drastic changes in trends in the detected fashion designs. For example, as shown in Figure \ref{fig:kimono_vs_trenchcoat}, the trend of stable pieces such as trench coats does not change as much as trendy pieces such as kimonos, similar as the trends of loafers (stable piece) and clogs (trendy piece) in Figure \ref{fig:loafer_vs_clog}. Besides differentiating trendy pieces from stable pieces, Figures \ref{fig:hoopskirt_vs_jean} and \ref{fig:cowboyhat_vs_jersey} also show the changes of different fashion designs' trends. In particular, Figure \ref{fig:cowboyhat_vs_jersey} shows the declines of both cowboy hats and jersey, which were both considered as extremely trendy in the 2000s.\footnote{https://www.wmagazine.com/gallery/paris-hilton-best-2000s-style/}\footnote{https://www.buzzfeed.com/hnigatu/iconic-fashion-trends-from-the-early-2000s/} Through the analysis of runway images using DenseNet pre-trained on ImageNet without any fine-tuning, we may confirm that pre-trained CNN models are capable of assisting us to extract meaningful visual information from the fashion runway images, even though ImageNet is not a fashion-focused dataset. Based on such confirmation, we develop our runway design prediction system relying on the visual information extracted by pre-trained CNN models. \section{Fashion Trend Research} \label{sec:background} The origin of fashion shows goes back to the 1800s in Paris, while it prevailed in the 1920s in the US among the major department stores. It was not until the 1970s that fashion designers started to hold fashion shows outside of the department stores to showcase their newly released collections.\footnote{\url{https://en.wikipedia.org/wiki/Fashion_show}} Buyers from retail stores attend the fashion shows to decide what collections to in stock to their stores for the next seasons, and make the order after the shows. Fashion journalists also attend the shows to report on the newly released collections. The designers also, through the fashion shows, learn about what other designers produce in the season. Such events are viewed as the most important factors of deciding the new fashion trends \cite{skov2006role,jackson2007process}. Nowadays, the fashion shows take place in every fashion seasons, spring, resort, fall, in the major fashion cities, New York, Paris, and Milan. For the fashion seasons, spring shows are held around September, fall shows are held around February, and resort are usually held during summer, in between fall and spring shows. For spring and fall shows, they can be further divided into \emph{ready-to-wear} and \emph{couture} shows. Ready-to-wear shows display the collections that will be in stores, ready for the consumers to purchase, while couture shows display the collections that are for custom-made only. The study of fashion trends can go back to several decades ago. In the 1960s, Blumer studied the trends of fashion from a theoretical point of view, which was a popular approach to study the fashion industry back then \cite{blumer1969fashion,reynolds1968cars,bronfenbrenner1966trends}. In the 1970s, the idea of \emph{fashion leaders} and \emph{design diffusion} also started to receive attention from the fashion researchers \cite{brett1975perceptual,schrank1973correlates,summers1970identity}. Afterward, the studies of the fashion industry and trends have moved toward a more quantitative direction. Tigert et al. studied fashion evolvement using fashion buyer data \cite{tigert1976fashion}. Belleau studied the cycles of dress length changes over the decades using measurements obtained from paintings since 1860 \cite{belleau1987cyclical}. Nowadays, with the immense amount of data accessible, researchers can study fashion through an empirical approach leveraging the rich data of fashion, including online social networks, fashion magazine archives, and fashion runway images. On the consumer end, Sanchis-Ojeda et al. studied the fashion trends through consumer click rates \cite{sanchis2016detection}, and He et al. studied the fashion trends using Amazon's recommendation dataset \cite{he2016ups}. On the designer end, Vittayakorn et al. and Furukawa et al. both explored using visuals to detect fashion trends on the runway \cite{vittayakorn2015runway,furukawa2019visualisation}. We later present the strong signals visuals can convey from the fashion runway images in Section \ref{sec:analysis}. \section{Conclusion}\label{sec:conclusion} In this paper, we propose a framework that leverages three decades of fashion runway image data to predict next season's fashion designs. Our framework consists of two neural networks: a runway embedding learning model and a next-season design RNN/LSTM model. We show that when compared with random guess our framework can well predict between which design will be released. \section{Evaluation} \label{sec:evaluation} In this section, we introduce the dataset we collected and used for evaluation. We then describe our experiment setting. We finally discuss the prediction task formulation and report the results. \subsection{Experiment Setting} We implemented all of our models in Tensorflow. The images are passed through DenseNet to obtain image features. The image features generated by DenseNet are of dimension 50176. The look embedding $h_l$ and collection embedding $h_c$ are of dimension 256. The weights in the models are initialized using Xavier initializer and biases are initialized as zeroes. As a preliminary experiment, due to the cold-start problem, we focus our evaluation on the 202 designers with most fashion shows reported in our dataset. The training of the system consists of two stages: we first train the runway show embedding model, then use the trained model to generate runway show embedding, which in turns is fed as input to the next-season prediction RNN model. We explain the training process below and summarize it in Algorithm \ref{alg:training}. \begin{algorithm}[t!] \caption{Model training process of runway fashion design prediction.}\label{alg:training} \begin{algorithmic}[1] \Procedure{TrainRunwayShowEmbedding}{} \State $epoch \gets 0$ \While{not converged or $epoch < $ \texttt{MAX\_EPOCH}} \State \text{Pass batch of $(\lbrace x \rbrace, y_d, y_s)$ to }\texttt{RunwayShowEmbedding} \If{epoch \texttt{mod} $2 == 0$ } \State Optimize $\mathcal{L}_{season}$ \Else \State Optimize $\mathcal{L}_{designer}$ \EndIf \State $epoch \gets epoch + 1$ \Return Learned parameters $\Theta_{runway}$ \EndWhile \EndProcedure \Procedure{TrainNextSeasonPredictionRNN/LSTM}{} \State $\Theta_{runway} \gets$ \texttt{TrainRunwayShowEmbedding} \State $h_c \gets$ Generate all collection embedding with $\Theta_{runway}$ \State $h_{tr} \gets$ Generate all trend embedding with $\Theta_{runway}$ \For{designer $d \in \mathcal{D}$} \State $epoch \gets 0$ \While{not converged or $epoch <$ \texttt{MAX\_EPOCH}} \State Pass one batch of ($\Theta_{runway}, d$) \texttt{NextSeasonRNN} \EndWhile \State Add $\Theta_{rnn}^d$ to $\Theta_{rnn}$ \EndFor \Return Learned parameters $\Theta_{rnn}$ \EndProcedure \end{algorithmic} \end{algorithm} \subsubsection{Runway Show Embedding Model} Runway show embedding model is a joint-task neural network with two objective functions to minimize. Since $\mathcal{L}_{designer}$ is a multi-class classification loss (202 classes in the experiment) and $\mathcal{L}_{season}$ is a binary classification loss, the scales of the two losses are very different. Minimizing the two by linearly adding them together will dilute one of the loss' value and affect the optimization. To prevent this, we train the two objectives in each epoch interchangeably. Each input for runway show embedding model follows the format of $(\lbrace x \rbrace, y_d, y_s)$, where the first element is a set of images. We use 70\% of the data for training, 20\% for validation and 10\% for testing. We set the batch size to 16, and use AdamDelta optimizer for backpropagation, and terminate training until both $\mathcal{L}_{designer}$ and $\mathcal{L}_{season}$ converge. For interpretability, we evaluate the model's performance on embedding learning using accuracy (i.e., the number of instances classified correctly) instead of cross entropy. At this preliminary stage, we find that given a set of runway images, the model can better distinguish the designers who create the collection (2.5\% for a 202-way classification [baseline: 0.4\%]) than the season the collection is released (50.25\% for a binary classification [baseline: 50\%]). \subsubsection{Next-Season Prediction RNN/LSTM Model} After training of the runway show embedding model is done, we use the trained model to create each fashion show's \emph{collection embedding}, $h_c$, and each season's \emph{trend embedding}, $h_{tr}$. Each input for next-season prediction RNN model for a designer $d$ follows the format of $[(h_c^1, h_{tr}^1), (h_c^2, h_{tr}^2), ..., (h_c^{T_d}, h_{tr}^{T_d})]$, where $T_d$ is the maximum timestamp for designer $d$. We train a next-season prediction RNN/LSTM model for each designer with batch size 16. We use Adam optimizer with learning rate 0.0001 for backpropagation. The training stops until $\mathcal{L}_{rnn}$ converges or until the maximum number of epochs achieves (500). \begin{figure}[t!] \centering \includegraphics[width=\linewidth]{figures/evaluation_plot.pdf} \caption{Individual performance on next-season prediction.} \label{fig:evaluation_plot} \end{figure} \begin{table}[t!] \caption{Performance summary on next-season prediction.} \label{table:evaluation_results} \begin{tabular}{@{}llll@{}} \toprule \textbf{Cell type} & \textbf{Min. AUC} & \textbf{Avg. AUC} & \textbf{Max. AUC} \\ \midrule \textbf{RNN} & 53.85\% & 78.40\% & 93.02\% \\ \textbf{LSTM} & 64.17\% & 78.42\% & 95.00\% \\ \textbf{Random} & 50.00\% & 50.00\% & 50.00\% \\ \bottomrule \end{tabular} \end{table} \subsection{Next-Season Prediction Task} To evaluate the performance of our proposed framework, we conduct a next-season design prediction task. We formulate the prediction task as follows. Given a designer $d$, her collections from time $1$ to $t-1$, her collection at time $t$ as positive collection $X_i$ and a random collection not designed by $d$ as negative collection $X_j$, the objective is to predict next-season scores so that $z^{(t-1)}_{d,i} > z^{(t-1)}_{d,j}$, where $z$ is computed as follows. \begin{align} z^{(t-1)}_{d,i} = \texttt{cosine}([c_1, ..., c_{t-1}], X_i) \end{align} For each designer $d$ in each season, we randomly select a collection from any designer $d'$ ($d' \neq d$) in any season $s'$ as negative collection $X_j$ to form evaluation samples $\mathcal{E}_d$. We evaluate the prediction task by calculating the Area Under Curve (AUC) as follows. \begin{align} AUC = \frac{1}{|\mathcal{D}|}\sum_{d\in\mathcal{D}}\frac{1}{|\mathcal{E}_d|} \sum_{(i,j) \in \mathcal{E}_d} \mathbb{I} (z^{(t-1)}_{d,i} > z^{(t-1)}_{d,j}) \end{align} \noindent where $\mathbb{I}(\cdot)$ is an identity function counting the number of times $z^{(t-1)}_{d,i} > z^{(t-1)}_{d,j}$ is true. We experimented with both cell alternatives, RNN and LSTM. The results are shown in Figure \ref{fig:evaluation_plot} and Table \ref{table:evaluation_results}. At this preliminary stage, without much parameter tuning, for RNN, we achieve an average AUC of 78.40\% for all the 202 designers (solid green line), which is superior to a baseline of 50\% (red dashed line) for binary classification problems. The highest performance we get for an individual designer is 93.02\% and the worst performance being 53\%. As for LSTM, we achieve an average of 78.42\% AUC, with the best performance of 95\% and worst being 64.17\%. \section{Introduction} Fashion is a fast-paced and dynamic industry. While the majority of fashion consumers obtain their fashion-related products from the mass market, the trends are often driven by high-fashion designers. In the fashion industry, the high-fashion designers (e.g., Chanel and Christian Dior) are the innovators that propose new fashion design ideas, while the mass market (e.g., Nike and GAP) and fast fashion brands (e.g., Zara and H\&M) are the followers. Fashion trends have been long-perceived as a product of subjective process. However, researchers have shown that fashion trends often follow specific patterns \cite{he2016ups,crane2012fashion,sproles1981analyzing}. Fashion trend forecasting is not a new problem. WGSN has been the long-standing brand that provides fashion forecast reports every season since 1998, with clients including Coach, Nike, Adidas, and Levis'.\footnote{\url{https://www.wgsn.com/}} However, to the best of the authors' knowledge, the reports they produce are more based on qualitative analysis and consists of suggestive insights. While these are incredibly valuable resources, we believe a more precise, large-scale, and quantitative prediction is also necessary. Google once released a report on fashion trends based on search queries in 2016.\footnote{https://www.thinkwithgoogle.com/advertising-channels/search/fashion-trends-2016-google-data-consumer-insights/} While a further pursuit of the insightful results is yet to continued since then. Being able to foresee the upcoming trends in fashion has various benefits. First of all, from the designers' perspective, knowing the competitors' potential next design can help adjust their in-house designs for the next-season collection. Secondly, from the retailers' perspective, knowing ahead what trends will take off soon helps them plan of what inventory to stock up. Thirdly, from the consumers' perspective, for those that are highly trend-aware (such as online fashion influencers), knowing the popular trends in advance help them stay on the top of the fashion game. While the change of fashion trends may seem extremely volatile and irregular, the core that drives the change of trends in the fashion industry is heavily organized and periodic: fashion shows. Fashion shows are viewed as one of the most critical events in the fashion industry that drive the fashion trends forward every fashion season for decades. We hence focus on the fashion show information to construct our fashion trend prediction system. But even with the fashion show information, capturing the fashion trends and being able to predict the next fashion design is challenging. First of all, how to extract meaningful information from the visual data of fashion shows can be difficult. Although there has been an abundance of deep learning models built to learn high-quality embedding of images, the datasets the models are trained on are very general and not fashion focused. Secondly, fashion designers' styles change over time. How to encounter such evolvement is a critical question. Thirdly, besides the changing styles of designers, fashion designs are also influenced by the overall industry trend. A mechanism that can capture the concept of the overall trend in the fashion industry at a specific time is needed. In this paper, we propose to leverage the fashion show data to do \emph{next-season design prediction}. We collected fashion show data of three decades, which consists of the images of each designer's collections in each fashion show. We design a prediction framework that can achieve the following three. (1) It takes in the visual of fashion show images, learns the embedding of each fashion show. (2) It trains an RNN/LSTM model \cite{rumelhart1988learning,hochreiter1997long} for each fashion designer based on their fashion designs, along with other designs in the industry. (3) It uses the learned fashion show embedding and trained RNN/LSTM models to predict, given a fashion designer and its past designs, the design they will put out in the next season. For predicting the next season's design through a Bayesian Personalized Ranking formulation, the highest area under curve (AUC) we achieve on average 78.42\% using LSTM, and even for an individual designer with 95\% AUC. The rest of this paper is organized as follows. In Section \ref{sec:background}, we introduce the background and literature related to fashion shows and fashion research. We then conduct data analysis on the runway image data in Section \ref{sec:analysis}. Our proposed framework is presented in Section \ref{sec:proposed} and evaluated in Section \ref{sec:evaluation}. We finally conclude this work in Section \ref{sec:conclusion}. \section{Methodology}\label{sec:proposed} The system framework we propose in this paper consists of two main modules: (1) runway show embedding learning model (Figure \ref{fig:runway_show_embedding_network}) and (2) next-season prediction RNN model (Figure \ref{fig:next_season_lstm}). Overview of the system design is shown in Figure \ref{fig:system_framework}. For clarity, we summarize the notations used in this paper in Table \ref{table:symbol_definition}. \begin{table}[t!] \caption{Symbol definition} \label{table:symbol_definition} \begin{tabular}{@{}cl@{}} \toprule \textbf{Symbol} & \textbf{Definition} \\ \midrule $\mathcal{D}$ & Designer set \\ $\mathcal{S}$ & Season set \\ $\mathcal{X}$ & Look set \\ \midrule $x$ & Look image input \\ $y_d$ & Designer output \\ $y_s$ & Season output \\ \midrule $\Phi(\cdot)$ & CNN model \\ $f(\cdot)$ & Fully-connected layer \\ \midrule $h_v$ & Visual embedding \\ $h_l$ & Look embedding \\ $h_c$ & Collection embedding \\ $h_{ds}$ & Designer style embedding \\ $h_{tr}$ & Trend embedding \\ \midrule $W, U$ & RNN/LSTM transitional matrices \\ \bottomrule \end{tabular} \end{table} \subsection{Problem Formulation} Each designer $d \in \mathcal{D}$ puts out a collection of fashion designs in season $t$. Each collection consists of a set of looks $X=\lbrace x_1, ..., x_k \rbrace \subset \mathcal{X}$, where $k$ is the number of looks in the collection and $\mathcal{X}$ is the universal set of looks. The goal is given a designer $d$, its collections $[c_1,...c_{t-1}]$, predict its design at season $t$. \begin{figure*}[t!] \centering \includegraphics[width=.9\linewidth]{figures/runway_show_embedding_network_v2.pdf} \caption{Runway show embedding learning model} \label{fig:runway_show_embedding_network} \end{figure*} \begin{figure}[t!] \centering \includegraphics[width=.95\linewidth]{figures/next_season_lstm_v2.pdf} \caption{Next-season prediction RNN/LSTM model} \label{fig:next_season_lstm} \end{figure} \subsection{Runway Show Embedding Learning Model} Each collection by each designer consists of multiple images that capture the outfit on the runway models, which we call them \emph{looks}. The number of looks in each collection varies. To generate an embedding for a given collection, we first pass through all the looks' images $\bold{x}$ to a pre-trained CNN model of choice $\Phi(\bold{x})$ (e.g., DenseNet \cite{DBLP:conf/cvpr/HuangLMW17}) to generate visual embedding, $\bold{h_v}$. We then pass the image embedding through a fully-connected layer $f(\bold{h_a})$ to reduce their dimension, and generate \emph{look embedding}, $\bold{h_l}$. We then do a max pooling across all the look embedding to generate a \emph{collection embedding}, $h_c$. The above process can be summarized as follows. \begin{align} \bold{h_a} &= \Phi(\bold{x}) \\ \bold{h_l} & = f(\bold{h_a}) \\ h_c &= \texttt{maxpool}(\bold{h_l}) \end{align} A good collection embedding should be able to capture its designer and the season it belongs to. We therefore design the model to be multi-task. Firstly, with the collection embedding $h_c$, the model predicts which designer designed this collection. $h_c$ is passed through a fully-connected layer $f_d(h_c)$, then a \texttt{softmax} layer, which further outputs $\hat{y}_d$. $\hat{y}_d$ is a $|\mathcal{D}|$-dimensional vector, where the $i$\textsuperscript{th} value in $\hat{y}_d$ represents the probability of the collection being designed by the $i$\textsuperscript{th} designer in $\mathcal{D}$. The above process is summarized as follows. \begin{align} \hat{y}_d = \texttt{softmax}(f_d(h_c)) \end{align} Secondly, also with the collection embedding $h_c$, the model predicts which season\footnote{The season here we refer to is spring, resort, fall, couture, etc, regardless of the year.} this collection is released. $h_c$ is passed through a fully-connected layer $f_s(h_c)$, then a \texttt{softmax} layer, which further outputs $\hat{y}_s$. $\hat{y}_s$ is a $|\mathcal{S}|$-dimensional vector, where the $j$\textsuperscript{th} value in $\hat{y}_s$ represents the probability of the collection being released in the $j$\textsuperscript{th} season in $\mathcal{S}$. The above process is summarized as follows. \begin{align} \hat{y}_s = \texttt{softmax}(f_s(h_c)) \end{align} The objective of the model is to minimize the following two loss functions. \begin{align} \mathcal{L}_{designer} &= \frac{1}{|\mathcal{D}|}\sum H(\hat{y}_d, y_d) \\ \mathcal{L}_{season} &= \frac{1}{|\mathcal{D}|}\sum H(\hat{y}_s, y_s) \end{align} \noindent where $H$ is the cross entropy. \subsection{Next-Season Prediction Model} Abundant factors are influencing what a designer will design for the next season. We believe the two most important parts are what the designers have designed in the past that define their styles, and what the industry has put out as a whole in the previous seasons. We call the first component \emph{designer style} and the second component \emph{trend}. Such concept is illustrated in Figure \ref{fig:next_season_lstm}. The trend embedding of the whole industry at a given time $j$ can be obtained through an aggregation over all the designers' collection embedding at time $t$, which is expressed as follows. \begin{align} h_{tr}^t = \texttt{maxpool} (\lbrace h_c^{t(1)},...,h_c^{t(|\mathcal{D}|)} \rbrace) \end{align} \noindent where $h_c^{t(i)}$ denotes designer $i$'s collection embedding at time $t$. The designer style of a designer $i$ at a given time $j$ can be generated using all of their collection embedding $\bold{h_c}$ from the past ($i=1...t-1$). We design two alternatives to capture such sequential evolution. The first alternative is the recurrent neural network (RNN), at time $t$, designer $i$'s designer style embedding $h_{ds}$ is obtained as follows. \begin{align} h_{ds}^t = \texttt{tanh} \big( W [h_c^t || h_{tr}^{t-1}] + U h_{ds}^{t-1} +b \big) \end{align} \noindent where $W$ and $U$ are fully-connected layers: $W$ transforms the collection embedding $h_c$ and trend embedding $h_{tr}$ to smaller hidden embedding in RNN and $U$ transforms the design style embedding at time $t-1$ to at time $t$. By using RNN, we capture the designer's \emph{evolvement of styles} throughout the time, rather than just looking at a single snapshot. This alternative is shown in Figure \ref{fig:rnn_cell}. \begin{figure}[t!] \centering \begin{subfigure}[t]{0.5\textwidth} \centering \includegraphics[width=.75\linewidth]{figures/rnn_cell.pdf} \caption{RNN cell.} \label{fig:rnn_cell} \end{subfigure}\vspace{5pt} \begin{subfigure}[t]{0.5\textwidth} \centering \includegraphics[width=.75\linewidth]{figures/lstm_cell.pdf} \caption{LSTM cell.} \label{fig:lstm_cell} \end{subfigure} \caption{Cell alternatives for next-season prediction model.} \end{figure} Another option is to leverage long-short-term memory (LSTM) design, where the evolution of designers' styles can be modeled as below. \begin{align} f_t &= \sigma_g \big( W_f [h_c^t || h_{tr}^{t-1}] + W_f h_{ds}^{t-1} + b_f \big) \\ i_t &= \sigma_g \big( W_i [h_c^t || h_{tr}^{t-1}] + W_i h_{ds}^{t-1} + b_i \big) \\ o_t &= \sigma_g \big( W_o [h_c^t || h_{tr}^{t-1}] + W_o h_{ds}^{t-1} + b_o \big) \\ c_t &= f_t \odot c_{t-1} + i_t \odot \texttt{tanh} \big( W_c [h_c^t || h_{tr}^{t-1}] + U_c h_{ds}^{t-1} +b_c \big) \\ h_t &= o_t \odot \texttt{tanh}(c_t) \end{align} \noindent where the purposes of $W$ and $U$ are similar as in RNN. Also, forget gates are included to enable the model's ability to capture designers' short-term and long-term dependencies on the designs and trends in the past. This alternative is shown in Figure \ref{fig:lstm_cell}. Given a designer, a sequence of collection embedding from time $1$ to $t-1$, the objective of the model is to minimize the following loss. \begin{align} \mathcal{L}_{rnn} = \frac{1}{|\mathcal{D}|}\sum\texttt{cosine} (h_c^t, \hat{h}_c^t) \end{align} \noindent where $\texttt{cosine}$ is the cosine distance. \section{Introduction} ACM's consolidated article template, introduced in 2017, provides a consistent \LaTeX\ style for use across ACM publications, and incorporates accessibility and metadata-extraction functionality necessary for future Digital Library endeavors. Numerous ACM and SIG-specific \LaTeX\ templates have been examined, and their unique features incorporated into this single new template. If you are new to publishing with ACM, this document is a valuable guide to the process of preparing your work for publication. If you have published with ACM before, this document provides insight and instruction into more recent changes to the article template. The ``\verb|acmart|'' document class can be used to prepare articles for any ACM publication --- conference or journal, and for any stage of publication, from review to final ``camera-ready'' copy, to the author's own version, with {\it very} few changes to the source. \section{Template Overview} As noted in the introduction, the ``\verb|acmart|'' document class can be used to prepare many different kinds of documentation --- a double-blind initial submission of a full-length technical paper, a two-page SIGGRAPH Emerging Technologies abstract, a ``camera-ready'' journal article, a SIGCHI Extended Abstract, and more --- all by selecting the appropriate {\it template style} and {\it template parameters}. This document will explain the major features of the document class. For further information, the {\it \LaTeX\ User's Guide} is available from \url{https://www.acm.org/publications/proceedings-template}. \subsection{Template Styles} The primary parameter given to the ``\verb|acmart|'' document class is the {\it template style} which corresponds to the kind of publication or SIG publishing the work. This parameter is enclosed in square brackets and is a part of the {\verb|documentclass|} command: \begin{verbatim} \documentclass[STYLE]{acmart} \end{verbatim} Journals use one of three template styles. All but three ACM journals use the {\verb|acmsmall|} template style: \begin{itemize} \item {\verb|acmsmall|}: The default journal template style. \item {\verb|acmlarge|}: Used by JOCCH and TAP. \item {\verb|acmtog|}: Used by TOG. \end{itemize} The majority of conference proceedings documentation will use the {\verb|acmconf|} template style. \begin{itemize} \item {\verb|acmconf|}: The default proceedings template style. \item{\verb|sigchi|}: Used for SIGCHI conference articles. \item{\verb|sigchi-a|}: Used for SIGCHI ``Extended Abstract'' articles. \item{\verb|sigplan|}: Used for SIGPLAN conference articles. \end{itemize} \subsection{Template Parameters} In addition to specifying the {\it template style} to be used in formatting your work, there are a number of {\it template parameters} which modify some part of the applied template style. A complete list of these parameters can be found in the {\it \LaTeX\ User's Guide.} Frequently-used parameters, or combinations of parameters, include: \begin{itemize} \item {\verb|anonymous,review|}: Suitable for a ``double-blind'' conference submission. Anonymizes the work and includes line numbers. Use with the \verb|\acmSubmissionID| command to print the submission's unique ID on each page of the work. \item{\verb|authorversion|}: Produces a version of the work suitable for posting by the author. \item{\verb|screen|}: Produces colored hyperlinks. \end{itemize} This document uses the following string as the first command in the source file: \verb|\documentclass[sigconf,screen]{acmart}|. \section{Modifications} Modifying the template --- including but not limited to: adjusting margins, typeface sizes, line spacing, paragraph and list definitions, and the use of the \verb|\vspace| command to manually adjust the vertical spacing between elements of your work --- is not allowed. {\bf Your document will be returned to you for revision if modifications are discovered.} \section{Typefaces} The ``\verb|acmart|'' document class requires the use of the ``Libertine'' typeface family. Your \TeX\ installation should include this set of packages. Please do not substitute other typefaces. The ``\verb|lmodern|'' and ``\verb|ltimes|'' packages should not be used, as they will override the built-in typeface families. \section{Title Information} The title of your work should use capital letters appropriately - \url{https://capitalizemytitle.com/} has useful rules for capitalization. Use the {\verb|title|} command to define the title of your work. If your work has a subtitle, define it with the {\verb|subtitle|} command. Do not insert line breaks in your title. If your title is lengthy, you must define a short version to be used in the page headers, to prevent overlapping text. The \verb|title| command has a ``short title'' parameter: \begin{verbatim} \title[short title]{full title} \end{verbatim} \section{Authors and Affiliations} Each author must be defined separately for accurate metadata identification. Multiple authors may share one affiliation. Authors' names should not be abbreviated; use full first names wherever possible. Include authors' e-mail addresses whenever possible. Grouping authors' names or e-mail addresses, or providing an ``e-mail alias,'' as shown below, is not acceptable: \begin{verbatim} \author{Brooke Aster, David Mehldau} \email{dave,judy,[email protected]} \email{[email protected]} \end{verbatim} The \verb|authornote| and \verb|authornotemark| commands allow a note to apply to multiple authors --- for example, if the first two authors of an article contributed equally to the work. If your author list is lengthy, you must define a shortened version of the list of authors to be used in the page headers, to prevent overlapping text. The following command should be placed just after the last \verb|\author{}| definition: \begin{verbatim} \renewcommand{\shortauthors}{McCartney, et al.} \end{verbatim} Omitting this command will force the use of a concatenated list of all of the authors' names, which may result in overlapping text in the page headers. The article template's documentation, available at \url{https://www.acm.org/publications/proceedings-template}, has a complete explanation of these commands and tips for their effective use. \section{Rights Information} Authors of any work published by ACM will need to complete a rights form. Depending on the kind of work, and the rights management choice made by the author, this may be copyright transfer, permission, license, or an OA (open access) agreement. Regardless of the rights management choice, the author will receive a copy of the completed rights form once it has been submitted. This form contains \LaTeX\ commands that must be copied into the source document. When the document source is compiled, these commands and their parameters add formatted text to several areas of the final document: \begin{itemize} \item the ``ACM Reference Format'' text on the first page. \item the ``rights management'' text on the first page. \item the conference information in the page header(s). \end{itemize} Rights information is unique to the work; if you are preparing several works for an event, make sure to use the correct set of commands with each of the works. \section{CCS Concepts and User-Defined Keywords} Two elements of the ``acmart'' document class provide powerful taxonomic tools for you to help readers find your work in an online search. The ACM Computing Classification System --- \url{https://www.acm.org/publications/class-2012} --- is a set of classifiers and concepts that describe the computing discipline. Authors can select entries from this classification system, via \url{https://dl.acm.org/ccs/ccs.cfm}, and generate the commands to be included in the \LaTeX\ source. User-defined keywords are a comma-separated list of words and phrases of the authors' choosing, providing a more flexible way of describing the research being presented. CCS concepts and user-defined keywords are required for all short- and full-length articles, and optional for two-page abstracts. \section{Sectioning Commands} Your work should use standard \LaTeX\ sectioning commands: \verb|section|, \verb|subsection|, \verb|subsubsection|, and \verb|paragraph|. They should be numbered; do not remove the numbering from the commands. Simulating a sectioning command by setting the first word or words of a paragraph in boldface or italicized text is {\bf not allowed.} \section{Tables} The ``\verb|acmart|'' document class includes the ``\verb|booktabs|'' package --- \url{https://ctan.org/pkg/booktabs} --- for preparing high-quality tables. Table captions are placed {\it above} the table. Because tables cannot be split across pages, the best placement for them is typically the top of the page nearest their initial cite. To ensure this proper ``floating'' placement of tables, use the environment \textbf{table} to enclose the table's contents and the table caption. The contents of the table itself must go in the \textbf{tabular} environment, to be aligned properly in rows and columns, with the desired horizontal and vertical rules. Again, detailed instructions on \textbf{tabular} material are found in the \textit{\LaTeX\ User's Guide}. Immediately following this sentence is the point at which Table~\ref{tab:freq} is included in the input file; compare the placement of the table here with the table in the printed output of this document. \begin{table} \caption{Frequency of Special Characters} \label{tab:freq} \begin{tabular}{ccl} \toprule Non-English or Math&Frequency&Comments\\ \midrule \O & 1 in 1,000& For Swedish names\\ $\pi$ & 1 in 5& Common in math\\ \$ & 4 in 5 & Used in business\\ $\Psi^2_1$ & 1 in 40,000& Unexplained usage\\ \bottomrule \end{tabular} \end{table} To set a wider table, which takes up the whole width of the page's live area, use the environment \textbf{table*} to enclose the table's contents and the table caption. As with a single-column table, this wide table will ``float'' to a location deemed more desirable. Immediately following this sentence is the point at which Table~\ref{tab:commands} is included in the input file; again, it is instructive to compare the placement of the table here with the table in the printed output of this document. \begin{table*} \caption{Some Typical Commands} \label{tab:commands} \begin{tabular}{ccl} \toprule Command &A Number & Comments\\ \midrule \texttt{{\char'134}author} & 100& Author \\ \texttt{{\char'134}table}& 300 & For tables\\ \texttt{{\char'134}table*}& 400& For wider tables\\ \bottomrule \end{tabular} \end{table*} \section{Math Equations} You may want to display math equations in three distinct styles: inline, numbered or non-numbered display. Each of the three are discussed in the next sections. \subsection{Inline (In-text) Equations} A formula that appears in the running text is called an inline or in-text formula. It is produced by the \textbf{math} environment, which can be invoked with the usual \texttt{{\char'134}begin\,\ldots{\char'134}end} construction or with the short form \texttt{\$\,\ldots\$}. You can use any of the symbols and structures, from $\alpha$ to $\omega$, available in \LaTeX~\cite{Lamport:LaTeX}; this section will simply show a few examples of in-text equations in context. Notice how this equation: \begin{math} \lim_{n\rightarrow \infty}x=0 \end{math}, set here in in-line math style, looks slightly different when set in display style. (See next section). \subsection{Display Equations} A numbered display equation---one set off by vertical space from the text and centered horizontally---is produced by the \textbf{equation} environment. An unnumbered display equation is produced by the \textbf{displaymath} environment. Again, in either environment, you can use any of the symbols and structures available in \LaTeX\@; this section will just give a couple of examples of display equations in context. First, consider the equation, shown as an inline equation above: \begin{equation} \lim_{n\rightarrow \infty}x=0 \end{equation} Notice how it is formatted somewhat differently in the \textbf{displaymath} environment. Now, we'll enter an unnumbered equation: \begin{displaymath} \sum_{i=0}^{\infty} x + 1 \end{displaymath} and follow it with another numbered equation: \begin{equation} \sum_{i=0}^{\infty}x_i=\int_{0}^{\pi+2} f \end{equation} just to demonstrate \LaTeX's able handling of numbering. \section{Figures} The ``\verb|figure|'' environment should be used for figures. One or more images can be placed within a figure. If your figure contains third-party material, you must clearly identify it as such, as shown in the example below. \begin{figure}[h] \centering \includegraphics[width=\linewidth]{sample-franklin} \caption{1907 Franklin Model D roadster. Photograph by Harris \& Ewing, Inc. [Public domain], via Wikimedia Commons. (\url{https://goo.gl/VLCRBB}).} \Description{The 1907 Franklin Model D roadster.} \end{figure} Your figures should contain a caption which describes the figure to the reader. Figure captions go below the figure. Your figures should {\bf also} include a description suitable for screen readers, to assist the visually-challenged to better understand your work. Figure captions are placed {\it below} the figure. \subsection{The ``Teaser Figure''} A ``teaser figure'' is an image, or set of images in one figure, that are placed after all author and affiliation information, and before the body of the article, spanning the page. If you wish to have such a figure in your article, place the command immediately before the \verb|\maketitle| command: \begin{verbatim} \begin{teaserfigure} \includegraphics[width=\textwidth]{sampleteaser} \caption{figure caption} \Description{figure description} \end{teaserfigure} \end{verbatim} \section{Citations and Bibliographies} The use of {\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08emT\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}\ for the preparation and formatting of one's references is strongly recommended. Authors' names should be complete --- use full first names (``Donald E. Knuth'') not initials (``D. E. Knuth'') --- and the salient identifying features of a reference should be included: title, year, volume, number, pages, article DOI, etc. The bibliography is included in your source document with these two commands, placed just before the \verb|\end{document}| command: \begin{verbatim} \bibliographystyle{ACM-Reference-Format}
1,108,101,565,883
arxiv
\section{Introduction} The need to enlarge or revise General Relativity (GR) arises from some open problems in modern physics at both ultraviolet and infrared scales. For instance, at astrophysical and cosmological scales the predictions of GR agree with observations only after the theoretical assumption regarding the existence of the so-called dark matter and dark energy. But at fundamental level, up to now there are no experimental evidences that such unknown forms of matter and energy really exist. This fact together with other shortcomings of Einstein's theory represents signals of a possible breakdown in our understanding of gravity, and the opportunity of developing extended or alternative theories of gravity is to be seriously considered. \\ Actually, several extensions of GR have been formulated in the last thirty years; among these, $f(R)$-gravity is one of the most direct and simplest: it consists in relaxing the hypothesis that the gravitational Lagrangian be a linear function of the Ricci scalar $R$, thus allowing a dependence on $R$ more general than that of the Einstein-Hilbert action. In the last years, $f(R)$-gravity has received great attention due to the fact that it seems to account for both cosmic speed-up and missing matter at cosmological and astrophysical scales, respectively \cite{n-o,o,m/1}. \\ At the same time, another way to extend GR consists in considering torsion, including it among the geometrical properties of space-time. Torsion was first introduced in the geometrical background by Cartan; then Sciama and Kibble embodied it into the framework of Einstein gravity following the idea that energy is the source of curvature in the same way in which spin is the source of torsion. The resulting Einstein-Cartan-Sciama-Kibble (ECSK) theory has been the first generalization of GR trying to take the spin of elementary fields into account and it is still one of the most serious attempts in this direction \cite{hehl,m/2}. \\ According to this line of thinking, $f(R)$-gravity with torsion represents one of the simplest extensions of the ECSK theory, just as purely metric $f(R)$-gravity is with respect to GR. Indeed, the basic idea is again that of replacing the Einstein-Hilbert Lagrangian with a non-linear function. An important consequence of the non-linearity of the gravitational Lagrangian is that we have non-vanishing torsion even without spin, as long as the stress-energy trace is not constant \cite{CCSV1,CCSV2,CCSV3,CV4}. It is known that torsion may give rise to singularity-free and accelerated cosmological models \cite{SIV}; just arising from the non-linearity of the gravitational Lagrangian function $f(R)$, a propagating torsion could amplify this effect and this point certainly deserves further attention. \\ Besides, in a recent paper \cite{FV1} we have studied in detail $f(R)$-gravity with torsion in presence of Dirac fields, in order to better exploit the coupling between torsion and spin within the framework of $f(R)$-theories. \\ In the present work we explore possible cosmological applications of the theory proposed in \cite{FV1}. More precisely, we study Dirac fields in Bianchi type-I (BI) $f(R)$-cosmological models. BI is the simplest anisotropic space-time that generalizes the spatially flat Friedmann-Lema\"{\i}tre-Robertson-Walker (FLRW) universe. The difference with respect to the FLRW flat model is that in a BI universe the scale factors of spatial directions can differ from each other, thus yielding anisotropy. \\ Though FLRW models provide very accurate descriptions of the universe observed today, the latter could have undergone an anisotropic phase in its early epoch; for instance, the detected anisotropies in the cosmic microwave background could represent evidences in this direction. On the other hand, non trivial Dirac fields are seen to generate spin-torsion which is incompatible with spatial isotropy \cite{t}: when dealing with Dirac fields in presence of torsion, anisotropic models are then a forced choice. Of course, for consistency with the observed data at present day, the initial anisotropic phase have to undergo a subsequent isotropization process as the Dirac fields energy and spin decrease and universe expands. This dynamical behavior can represent a selection rule for viable functions $f(R)$. \\ The layout of the paper is the following. In Sections 2, we review some generalities about $f(R)$-gravity with torsion in presence of Dirac fields. In Section 3, we introduce BI cosmological models in torsional $f(R)$-gravity still in presence of Dirac fields and, for completeness, perfect fluid. Following the line traced in previous papers \cite{Saha1,Saha3,Saha2}, we derive a solution of the generalized Dirac equations, holding independently of the assumed model $f(R)$; we integrate the Einstein-like equations in the simplest case $f(R)=R$ and discuss an approximate solution for $f(R)=R+\alpha R^2$. Section 4 is devoted to discuss the field equations in the Einstein frame; we show that, through a conformal transformation from the Jordan to the Einstein frame, we can put the Einstein-like equations in a quadratically integrable form; this result turns out to be useful for a qualitative analysis of the solutions or numerical integration even when explicit calculations for analytical integration are difficult; two examples are given. Finally, in Section 5, we face a crucial aspect in dealing with Einstein-like equations, namely the preservation in time of the Hamiltonian constraint (momentum constraints are here trivial identities); in GR this result is generally ensured by the Bianchi identities and the matter field equations, but here it is not {\it a priori} expected and it has to be proved; anyway, by working out the general conservation laws proved in \cite{FV1}, we show that classical results by Bruhat \cite{yvonne4} apply also in the present case, thus ensuring the required time-preservation of the Hamiltonian constraint. \section{Dirac fields in $f(R)$-gravity with torsion} In previous papers \cite{CCSV1,CCSV2,CV4,Rubilar}, $f(R)$-gravity with torsion has been formulated within both the metric-affine and the tetrad-affine framework. According to the basic paradigm of $f(R)$-gravity, the gravitational Lagrangian of the theory is assumed to be a real function $f(R)$, where $R$ is here the Ricci curvature scalar written in terms of a metric $g$ and a $g$-compatible connection $\Gamma$, or equivalently a tetrad field $e$ and a spin-connection $\omega$. The pairs $(g,\Gamma)$ and $(e,\omega)$ represent the gravitational dynamical fields of the theory in the metric-affine and tetrad-affine approaches respectively. The field equations turn out to be \begin{subequations} \label{2.1} \begin{equation} \label{2.1a} f'\/(R)R_{ij} -\frac{1}{2}f\/(R)g_{ij}=\Sigma_{ij} \end{equation} \begin{equation} \label{2.1b} T_{ij}^{\;\;\;h} =\frac{1}{f'(R)} \left[\frac{1}{2}\left(\de{f'(R)}/de{x^{p}}+S_{pq}^{\;\;\;q}\right) \left(\delta^{p}_{j}\delta^{h}_{i}-\delta^{p}_{i}\delta^{h}_{j}\right) +S_{ij}^{\;\;\;h}\right] \end{equation} \end{subequations} where $R_{ij}$ and $T_{ij}^{\;\;\;h}$ are the Ricci and Torsion tensors, while $\Sigma_{ij}$ and $S_{ij}^{\;\;\;h}$ are the stress-energy and spin density tensors of the matter fields. From \eqref{2.1b}, it is seen that we can have non-vanishing torsion even in absence of spin density. \\ Making use of the Bianchi identities, it is possible to derive the conservation laws \begin{subequations} \label{2.2} \begin{equation} \label{2.2a} \nabla_{i}\Sigma^{ij}+T_{i}\Sigma^{ij}-\Sigma_{pi}T^{jpi}-\frac{1}{2}S_{sti}R^{stij}=0 \end{equation} \begin{equation} \label{2.2b} \nabla_{h}S^{ijh}+T_{h}S^{ijh}+\Sigma^{ij}-\Sigma^{ji}=0 \end{equation} \end{subequations} under which the stress-energy and spin density tensors of the matter fields must undergo once the matter field equations are assigned \cite{FV1}. In equations \eqref{2.2} the symbols $\nabla_i$ denote covariant derivative with respect to the dynamical connection $\Gamma$ while $R^{stij}$ is the curvature tensor of $\Gamma$. Indices are lowered and raised by the metric $g_{ij}$. \\ In this paper we deal with $f(R)$-gravity coupled with Dirac fields and possibly a perfect fluid (without spin). Denoting by $\gamma^\mu$ ($\mu=0,1,2,3$) Dirac matrices, we introduce the notation $\Gamma^i = e^i_\mu\gamma^\mu$ where $e^\mu_i$ ($e^\mu_{i}e^i_\nu=\delta^\mu_\nu$ and $e^i_{\mu}e^\mu_j=\delta^i_j$) indicate a tetrad field associated with a metric $g_{ij}$. Moreover, setting $S_{\mu\nu}:= \frac{1}{8}[\gamma_\mu,\gamma_\nu]$, we denote the covariant derivative of the Dirac field $\psi$ by $D_i\psi = \de\psi/de{x^i} + \omega_i^{\;\;\mu\nu}S_{\mu\nu}\psi\/$ and $D_i\bar\psi = \de{\bar\psi}/de{x^i} - \bar\psi\omega_i^{\;\;\mu\nu}S_{\mu\nu}\/$, where $\omega_i^{\;\;\mu\nu}$ is a spin connection. Equivalently, we can put $D_i\psi = \de\psi/de{x^i} - \Omega_i\psi$ and $D_i\bar\psi = \de{\bar\psi}/de{x^i} + \bar{\psi}\Omega_i$ where \begin{equation}\label{2.3} \Omega_i := - \frac{1}{4}g_{jh}\left(\Gamma_{ik}^{\;\;\;j} - e^j_\mu\partial_i\/e^\mu_k \right)\Gamma^h\Gamma^k \end{equation} $\Gamma_{ik}^{\;\;\;j}$ being the coefficients of a linear connection $\Gamma$. The relation between linear and spin connection is given by \begin{equation}\label{2.4} \Gamma_{ij}^{\;\;\;h} = \omega_{i\;\;\;\nu}^{\;\;\mu}e_\mu^h\/e^\nu_j + e^{h}_{\mu}\partial_{i}e^{\mu}_{j} \end{equation} The stress-energy tensors of the matter fields are then described as \begin{subequations}\label{2.5} \begin{equation}\label{2.5a} \Sigma^D_{ij} := \frac{i}{4}\/\left( \bar\psi\Gamma_{i}{D}_{j}\psi - {D}_{j}\bar{\psi}\Gamma_{i}\psi \right) \end{equation} and \begin{equation}\label{2.5b} \Sigma^F_{ij}:= (\rho +p)\/U_iU_j -pg_{ij} \end{equation} while the spin density tensor is expressed as \begin{equation}\label{2.5c} S_{ij}^{\;\;\;h}=\frac{i}{2}\bar\psi\left\{\Gamma^{h},S_{ij}\right\}\psi \equiv-\frac{1}{4}\eta^{\mu\sigma}\epsilon_{\sigma\nu\lambda\tau} \left(\bar{\psi}\gamma_{5}\gamma^{\tau}\psi\right)e^{h}_{\mu}e^{\nu}_{i}e^{\lambda}_{j} \end{equation} \end{subequations} In equations \eqref{2.5}, $\rho$, $p$ and $U_i$ denote respectively the matter-energy density, the pressure and the four velocity of the fluid, $S_{ij}:= \frac{1}{8}[\Gamma_i,\Gamma_j]$ and $\eta^{\mu\sigma}$ is the Minkowskian metric with signature $(1,-1,-1,-1)$. \\ Following previous works \cite{CCSV1,CCSV2,CCSV3,CV4,FV1}, we suppose that the trace of equations \eqref{2.1a} \begin{equation}\label{2.6} f'(R)R -2f(R)=\Sigma \end{equation} gives rise to an invertible relation between the Ricci scalar curvature $R$ and the trace $\Sigma$ of the stress-energy tensor. Also, we assume that $f(R)\not = kR^2$ (the case $f(R)=kR^2$ is only compatible with the condition $\Sigma=0$). Under the assumed conditions, from equation \eqref{2.6} it is possible to derive the expression of $R$ as function of $\Sigma$, namely $R=F(\Sigma)$. After that, introducing the scalar field \begin{equation}\label{2.7} \varphi := f'\/(F\/(\Sigma)) \end{equation} and the effective potential \begin{equation}\label{2.8} V\/(\varphi):= \frac{1}{4}\left[ \varphi F^{-1}\/((f')^{-1}\/(\varphi))+ \varphi^2\/(f')^{-1}\/(\varphi)\right] \end{equation} as well as separating the Levi--Civita contributions from the torsional ones, we can express the field equations \eqref{2.1a} in the Einstein-like form \begin{equation}\label{2.9} \begin{split} \tilde{R}_{ij} -\frac{1}{2}\tilde{R}g_{ij}= \frac{1}{\varphi}\Sigma^F_{ij} + \frac{1}{\varphi}\Sigma^D_{ij} + \frac{1}{\varphi^2}\left( - \frac{3}{2}\de\varphi/de{x^i}\de\varphi/de{x^j} + \varphi\tilde{\nabla}_{j}\de\varphi/de{x^i} + \frac{3}{4}\de\varphi/de{x^h}\de\varphi/de{x^k}g^{hk}g_{ij} \right. \\ \left. - \varphi\tilde{\nabla}^h\de\varphi/de{x^h}g_{ij} - V\/(\varphi)g_{ij} \right) + \tilde{\nabla}_h\hat{S}_{ji}^{\;\;\;h} + \hat{S}_{hi}^{\;\;\;p}\hat{S}_{jp}^{\;\;\;h} - \frac{1}{2}\hat{S}_{hq}^{\;\;\;p}\hat{S}_{\;\;p}^{q\;\;\;h}g_{ij} \end{split} \end{equation} where $\tilde{R}_{ij}$, $\tilde R$ and $\tilde{\nabla}_i$ denote respectively the Ricci tensor, the Ricci scalar curvature and the covariant derivative of the Levi--Civita connection, and $\hat{S}_{ij}^{\;\;\;h}:=-\frac{1}{2\varphi}S_{ij}^{\;\;\;h}\/$. \\ In addition to this, the generalized Dirac equations for the spinor field are \begin{equation}\label{2.10} i\Gamma^{h}D_{h}\psi + \frac{i}{2}T_h\Gamma^h\psi- m\psi=0 \end{equation} where $T_h :=T_{hj}^{\;\;\;j}$ is the torsion vector. As it has been proved in \cite{FV1}, equations \eqref{2.10} imply automatically the validity of the conservation laws \begin{subequations}\label{2.11} \begin{equation}\label{2.11a} \nabla_{i}\Sigma^{D\/{ij}}+T_{i}\Sigma^{D\/{ij}}-\Sigma^D_{\/pi}T^{jpi}-\frac{1}{2}S_{sti}R^{stij}=0 \end{equation} \begin{equation}\label{2.11b} \nabla_{h}S^{ijh}+T_{h}S^{ijh}+\Sigma^{D\/{ij}}-\Sigma^{D\/{ji}}=0 \end{equation} \end{subequations} Moreover, still in \cite{FV1}, it has been shown that equations \eqref{2.11b} are equivalent to the antisymmetric part of the Einstein-like equations \eqref{2.9}. From this, we get two results: on one hand, we have that the significant part of the Einstein-like field equations \eqref{2.9} is the symmetric one; on the other hand, comparing equations \eqref{2.11a} with equations \eqref{2.2a}, we derive the conservation law for the perfect fluid. Indeed, introducing the contorsion tensor \begin{equation}\label{2.12} K_{ij}^{\;\;\;h} =\frac{1}{2}\left(-T_{ij}^{\;\;\;h}+T_{j\;\;\;i}^{\;\;h}-T^{h}_{\;\;ij}\right) \end{equation} and expressing the dynamical connection as the sum \begin{equation}\label{2.13} \Gamma_{ij}^{\;\;\;h}=\tilde{\Gamma}_{ij}^{\;\;\;h}-K_{ij}^{\;\;\;h} \end{equation} where $\tilde{\Gamma}_{ij}^{\;\;\;h}$ are the coefficients of the Levi--Civita connection, a direct comparison between equations \eqref{2.11a} with equations \eqref{2.2a} yields the identity \begin{equation}\label{2.14} \begin{split} \nabla_{i}\Sigma^{F\/{ij}}+T_{i}\Sigma^{F\/{ij}}-\Sigma^F_{\/pi}T^{jpi}= \tilde{\nabla}_i\Sigma^{F\/{ij}} - K_{ih}^{\;\;\;i}\Sigma^{F\/{hj}} - K_{ih}^{\;\;\;j}\Sigma^{F\/{ih}}\\ +T_{h}\Sigma^{F\/{hj}}-T^{j}_{\;\;ih}\Sigma^{F\/{ih}} = \tilde{\nabla}_i\Sigma^{F\/{ij}} =0 \end{split} \end{equation} which is the same conservation laws holding in general relativity. \\ To conclude this preliminary section, we notice that the symmetrized part of the Einstein-like equations \eqref{2.9} as well as the Dirac equations \eqref{2.10} can be worked out as in \cite{FV1} assuming the final form \begin{equation}\label{2.15} \begin{split} \tilde{R}_{ij} -\frac{1}{2}\tilde{R}g_{ij}= \frac{1}{\varphi}\Sigma^F_{ij} + \frac{1}{\varphi}\tilde{\Sigma}^D_{ij} + \frac{1}{\varphi^2}\left( - \frac{3}{2}\de\varphi/de{x^i}\de\varphi/de{x^j} + \varphi\tilde{\nabla}_{j}\de\varphi/de{x^i} + \frac{3}{4}\de\varphi/de{x^h}\de\varphi/de{x^k}g^{hk}g_{ij} \right. \\ \left. - \varphi\tilde{\nabla}^h\de\varphi/de{x^h}g_{ij} - V\/(\varphi)g_{ij} \right) + \frac{3}{64\varphi^2}(\bar{\psi}\gamma_5\gamma^\tau\psi)(\bar{\psi}\gamma_5\gamma_\tau\psi)g_{ij} \end{split} \end{equation} and \begin{equation}\label{2.16} i\Gamma^{h}\tilde{D}_{h}\psi -\frac{1}{\varphi}\frac{3}{16}\left[(\bar{\psi}\psi) +i(i\bar{\psi}\gamma_5\psi)\gamma_5\right]\psi-m\psi=0 \end{equation} where \begin{equation}\label{2.17} \tilde{\Sigma}^D_{ij} := \frac{i}{4}\/\left[ \bar\psi\Gamma_{(i}\tilde{D}_{j)}\psi - \left(\tilde{D}_{(j}\bar\psi\right)\Gamma_{i)}\psi \right] \end{equation} $\tilde{D}_i$ denoting covariant derivative with respect to the Levi--Civita connection. \section{Bianchi-I cosmological models} Let us consider a Bianchi type I metric of the form \begin{equation}\label{3.1} ds^2 = dt^2 - a^2(t)\,dx^2 - b^2(t)\,dy^2 - c^2(t)\,dz^2 \end{equation} The tetrad field associated with the metric \eqref{3.1} is given by \begin{equation}\label{3.2} e^\mu_0=\delta^\mu_0, \quad e^\mu_1 = a(t)\/\delta^\mu_1, \quad e^\mu_2 = b(t)\/\delta^\mu_2, \quad e^\mu_3 = c(t)\/\delta^\mu_3 \qquad \mu =0,1,2,3 \end{equation} The inverse relations of \eqref{3.3} are expressed as \begin{equation}\label{3.3} e^0_\mu = \delta^0_\mu, \quad e^1_\mu = \frac{1}{a(t)}\delta^1_\mu, \quad e^2_\mu = \frac{1}{b(t)}\delta^2_\mu, \quad e^3_\mu = \frac{1}{c(t)}\delta^3_\mu \qquad \mu =0,1,2,3 \end{equation} The non-trivial Christoffel symbols associated to the metric \eqref{3.1} are \begin{equation}\label{3.4} \begin{split} \tilde{\Gamma}_{10}^{\;\;\;1}= \frac{\dot a}{a}, \quad \tilde{\Gamma}_{20}^{\;\;\;2}= \frac{\dot b}{b}, \quad \tilde{\Gamma}_{30}^{\;\;\;3}= \frac{\dot c}{c}\\ \tilde{\Gamma}_{11}^{\;\;\;0}= a{\dot a}, \quad \tilde{\Gamma}_{22}^{\;\;\;0}= b{\dot b}, \quad \tilde{\Gamma}_{33}^{\;\;\;0}= c{\dot c} \end{split} \end{equation} In this case, the matrices $\Gamma^i = e^i_\mu\gamma^\mu$ assume the explicit form \begin{equation}\label{3.5} \Gamma^0 = \gamma^0,\quad \Gamma^1 = \frac{1}{a(t)}\gamma^1, \quad \Gamma^2 = \frac{1}{b(t)}\gamma^2, \quad \Gamma^3 = \frac{1}{c(t)}\gamma^3 \end{equation} The spinorial covariant derivative induced by the Levi--Civita connection is \begin{equation}\label{3.7} \tilde{D}_i\psi = \partial_i\psi - \tilde{\Omega}_i\psi, \qquad \tilde{D}_i\bar\psi = \partial_i\bar\psi + \bar{\psi}\tilde{\Omega}_i \end{equation} where the spinor connection coefficients $\tilde{\Omega}_i$ are given by \begin{equation}\label{3.8} \tilde{\Omega}_0=0, \quad \tilde{\Omega}_1=\frac{1}{2}{\dot a}\gamma^1\gamma^0, \quad \tilde{\Omega}_2=\frac{1}{2}{\dot b}\gamma^2\gamma^0, \quad \tilde{\Omega}_3=\frac{1}{2}{\dot c}\gamma^3\gamma^0 \end{equation} Taking equations \eqref{3.7} and \eqref{3.8} into account, it is easily seen that the Dirac equations \eqref{2.16} assume the form \begin{subequations}\label{3.9} \begin{equation}\label{3.9a} \dot\psi + \frac{\dot\tau}{2\tau}\psi + im\gamma^0\psi - \frac{3i}{16\varphi}\/\left[ (\bar\psi\psi)\gamma^0 +i\/(i\bar\psi\gamma^5\psi)\gamma^0\gamma^5 \right]\psi =0 \end{equation} \begin{equation}\label{3.9b} \dot{\bar\psi} + \frac{\dot\tau}{2\tau}\bar\psi - im\bar{\psi}\gamma^0 + \frac{3i}{16\varphi}\bar\psi\/\left[ (\bar\psi\psi)\gamma^0 +i\/(i\bar\psi\gamma^5\psi)\gamma^5\gamma^0 \right] =0 \end{equation} \end{subequations} where, borrowing from \cite{Saha1,Saha2}, we have defined $\tau := abc$. Analogously, evaluating the Einstein-like equations \eqref{2.15} for the metric \eqref{3.1}, we get \begin{subequations}\label{3.10} \begin{equation}\label{3.10a} \begin{split} \frac{\dot a}{a}\frac{\dot b}{b} + \frac{\dot b}{b}\frac{\dot c}{c} + \frac{\dot a}{a}\frac{\dot c}{c} = \frac{\rho}{\varphi} + \frac{1}{2\varphi}m\bar\psi\psi - \frac{3}{64\varphi^2}(\bar{\psi}\gamma_5\gamma^\tau\psi)(\bar{\psi}\gamma_5\gamma_\tau\psi)+\\ +\frac{1}{\varphi^2}\left[- \frac{3}{4}{\dot\varphi}^2 - \varphi\dot\varphi\frac{\dot\tau}{\tau} - V(\varphi)\right] \end{split} \end{equation} \begin{equation}\label{3.10b} \begin{split} \frac{\ddot b}{b} + \frac{\ddot c}{c} + \frac{\dot b}{b}\frac{\dot c}{c} = - \frac{p}{\varphi} + \frac{1}{\varphi^2}\left[\varphi\dot\varphi\frac{\dot a}{a}+\frac{3}{4}{\dot\varphi}^2 -\varphi\left( \ddot\varphi + \frac{\dot\tau}{\tau}\dot\varphi \right) - V(\varphi)\right]+\\ +\frac{3}{64\varphi^2}(\bar{\psi}\gamma_5\gamma^\tau\psi)(\bar{\psi}\gamma_5\gamma_\tau\psi) \end{split} \end{equation} \begin{equation}\label{3.10c} \begin{split} \frac{\ddot a}{a} + \frac{\ddot c}{c} + \frac{\dot a}{a}\frac{\dot c}{c} = - \frac{p}{\varphi} + \frac{1}{\varphi^2}\left[\varphi\dot\varphi\frac{\dot b}{b} + \frac{3}{4}{\dot\varphi}^2 -\varphi\left( \ddot\varphi + \frac{\dot\tau}{\tau}\dot\varphi \right) - V(\varphi)\right]+\\ +\frac{3}{64\varphi^2}(\bar{\psi}\gamma_5\gamma^\tau\psi)(\bar{\psi}\gamma_5\gamma_\tau\psi) \end{split} \end{equation} \begin{equation}\label{3.10d} \begin{split} \frac{\ddot a}{a} + \frac{\ddot b}{b} + \frac{\dot a}{a}\frac{\dot b}{b} = - \frac{p}{\varphi} + \frac{1}{\varphi^2}\left[\varphi\dot\varphi\frac{\dot c}{c} + \frac{3}{4}{\dot\varphi}^2 -\varphi\left( \ddot\varphi + \frac{\dot\tau}{\tau}\dot\varphi \right) - V(\varphi)\right]+\\ +\frac{3}{64\varphi^2}(\bar{\psi}\gamma_5\gamma^\tau\psi)(\bar{\psi}\gamma_5\gamma_\tau\psi) \end{split} \end{equation} \end{subequations} together with the conditions \begin{subequations} \label{3.11} \begin{equation}\label{3.11a} \tilde{\Sigma}^D_{12}=0\quad \Rightarrow \quad a\/\dot{b} - b\/\dot{a}=0 \quad \cup \quad \bar\psi\gamma^5\gamma^3\psi =0 \end{equation} \begin{equation}\label{3.11b} \tilde{\Sigma}^D_{23}=0\quad \Rightarrow \quad c\/\dot{b} - b\/\dot{c}=0 \quad \cup \quad \bar\psi\gamma^5\gamma^1\psi =0 \end{equation} \begin{equation}\label{3.11c} \tilde{\Sigma}^D_{13}=0\quad \Rightarrow \quad a\/\dot{c} - c\/\dot{a}=0 \quad \cup \quad \bar\psi\gamma^5\gamma^2\psi =0 \end{equation} \end{subequations} The equations $\tilde{\Sigma}^D_{0A}=0$ ($A=1,2,3$) are automatically satisfied identities. Finally, the conservation law \eqref{2.14} together with an equation of state of the kind $p=\lambda\rho$ ($\lambda\in [0,1[$) yield the last equation \begin{equation}\label{3.12} \dot\rho + \frac{\dot\tau}{\tau}(1+\lambda)\rho =0 \end{equation} which completes the whole set of field equations. The general solution of \eqref{3.12} is given by \begin{equation}\label{3.12bis} \rho = \rho_0\tau^{-(1+\lambda)} \qquad \rho_0 = {\rm constant} \end{equation} \\ Conditions \eqref{3.11} are constraints imposed on the metric or on the Dirac field. We see that there are in general three ways of satisfying these conditions: one is to impose constraints of purely geometrical origin by insisting that $a\dot{b}-b\dot{a}=0$, $a\dot{c}-c\dot{a}=0$, $c\dot{b}-b\dot{c}=0$ giving an isotropic universe filled with fermionic matter fields, which is problematic due to the fact that it is known that Dirac fields do not undergo the cosmological principle \cite{t}; another is to impose constraints of purely material origin by insisting that $\bar\psi\gamma^5\gamma^1\psi=0$, $\bar\psi\gamma^5\gamma^2\psi=0$, $\bar\psi\gamma^5\gamma^3\psi=0$ giving an anisotropic universe without fermionic torsional interactions, which we regard as unsatisfactory because if Dirac fields are absent then it is not clear what may then justify anisotropies; the last situation would be of both geometrical and material origin by insisting that for instance $a\dot{b}-b\dot{a}=0$ with $\bar\psi\gamma^5\gamma^1\psi=0$, $\bar\psi\gamma^5\gamma^2\psi=0$ giving a partial isotropy for only two axes with the corresponding two components of the spin vector vanishing, describing a universe shaped as an ellipsoid of rotation about the only axis along which the spin vector does not vanish. Notice that by insisting on the proportionality between two couples of axes we inevitably get the total isotropy of the $3$-dimensional space. Therefore, the situation in which we have $a=b$ with $\bar\psi\gamma^5\gamma^1\psi=\bar\psi\gamma^5\gamma^2\psi=0$ is the only one that we believe to be entirely satisfactory, and from now on we shall work in this situation. \\ Here, the Dirac and Einstein-like equations \eqref{3.9} and \eqref{3.10} can be worked out as in \cite{Saha1,Saha2}: for instance, through suitable combinations of \eqref{3.10} we obtain the equations \begin{subequations}\label{3.13} \begin{equation}\label{3.13a} \frac{d}{dt}(\psi^{\dagger}\psi\tau)=0 \end{equation} \begin{equation}\label{3.13b} \frac{d}{dt}(\bar\psi\psi\tau) -\frac{3}{8\varphi}(i\bar\psi\gamma^5\psi)(\psi^{\dagger}\gamma^5\psi\tau)=0 \end{equation} \begin{equation}\label{3.13c} \frac{d}{dt}(i\bar\psi\gamma^5\psi\tau) +\left[2m+\frac{3}{8\varphi}(\bar\psi\psi)\right](\psi^{\dagger}\gamma^5\psi\tau)=0 \end{equation} \begin{equation}\label{3.13d} \frac{d}{dt}(\psi^{\dagger}\gamma^5\psi\tau)-2m(i\bar\psi\gamma^5\psi\tau)=0 \end{equation} \end{subequations} From equations \eqref{3.13} it is easy to deduce that \begin{subequations} \label{3.13bis} \begin{equation}\label{3.13abis} (\bar\psi\gamma^5\gamma^3\psi)^2=(\bar\psi\psi)^2 + (i\bar\psi\gamma^5\psi)^2 + (\bar\psi\gamma^5\gamma^0\psi)^2 = \frac{C^2}{\tau^2} \end{equation} \begin{equation}\label{3.13bbis} (\psi^{\dagger}\psi)^2=\frac{K^2}{\tau^2} \end{equation} \end{subequations} with $C$ and $K$ constants. We notice that in this special case the theory has an additional discrete symmetry given by the fact that under the discrete transformation $\psi \rightarrow \gamma^5\gamma^0\gamma^1\psi$ all field equations are invariant; this implies that in the Dirac equation the total number of $4$ components is in this case reduced to $2$ components alone; however $2$ components with complex values are equivalent to $4$ components of real values: so the $4$ equations for real fields given in \eqref{3.13} are the system of field equations we will have to solve. The compatibility with all constraints allows only three classes of spinors, each of which has a general member written in the following form \begin{eqnarray} \nonumber \label{generalspinor} &\psi=\frac{1}{\sqrt{2\tau}}\left(\begin{tabular}{c} $\sqrt{K-C}\cos{\zeta_{1}}e^{i\theta_{1}}$\\ $\sqrt{K+C}\cos{\zeta_{2}}e^{i\vartheta_{1}}$\\ $\sqrt{K-C}\sin{\zeta_{1}}e^{i\vartheta_{2}}$\\ $\sqrt{K+C}\sin{\zeta_{2}}e^{i\theta_{2}}$ \end{tabular}\right) \end{eqnarray} with constraints $\tan{\zeta_{1}}\tan{\zeta_{2}}=(-1)^{n+1}$ and $\theta_{1}+\theta_{2}-\vartheta_{1}-\vartheta_{2}=\pi n$ for any $n$ integer, and also \begin{eqnarray} \label{restrictedspinor1} &\psi=\frac{1}{\sqrt{2\tau}}\left(\begin{tabular}{c} $\sqrt{K-C}\cos{\zeta_{1}}e^{i\theta_{1}}$\\ $0$\\ $0$\\ $\sqrt{K+C}\sin{\zeta_{2}}e^{i\theta_{2}}$ \end{tabular}\right) \end{eqnarray} and \begin{eqnarray} \label{restrictedspinor2} &\psi=\frac{1}{\sqrt{2\tau}}\left(\begin{tabular}{c} $0$\\ $\sqrt{K+C}\cos{\zeta_{1}}e^{i\vartheta_{1}}$\\ $\sqrt{K-C}\sin{\zeta_{2}}e^{i\vartheta_{2}}$\\ $0$ \end{tabular}\right) \end{eqnarray} where all angular functions $\zeta_{1}$, $\zeta_{2}$ and $\theta_{1}$, $\theta_{2}$, $\vartheta_{1}$, $\vartheta_{2}$ have only temporal dependence, that has to be determined by plugging the spinor back into the Dirac equations. \\ For the gravitational field, borrowing again from \cite{Saha2}, we subtract equation \eqref{3.10b} from equation \eqref{3.10d} obtaining \begin{equation}\label{3.14.0} \varphi\tau\frac{d}{dt}\left(\frac{\dot a}{a} - \frac{\dot c}{c}\right) + \varphi\dot\tau\left(\frac{\dot a}{a} - \frac{\dot c}{c}\right) + \dot{\varphi}\tau\left(\frac{\dot a}{a} - \frac{\dot c}{c}\right)=0 \end{equation} from which we derive \begin{equation}\label{3.14} \frac{a}{c}=De^{\left(X\int{\frac{dt}{\varphi\tau}}\right)} \end{equation} $D$ and $X$ being suitable constants, which gives the evolution equation for the ratio of the two axes that are not constantly proportional, and therefore it can be considered as the evolution equation for the shape of the universe; also, multiplying \eqref{3.10a} by $3$ and adding the result to the summation of equations \eqref{3.10b}, \eqref{3.10c} and \eqref{3.10d}, we get the final equation for $\tau$ \begin{equation}\label{3.15} 2\frac{\ddot\tau}{\tau} + 3\frac{\ddot\varphi}{\varphi} = 3\frac{\rho}{\varphi} - 3\frac{p}{\varphi} + \frac{3}{2\varphi}m\bar\psi\psi - 5\frac{\dot\tau}{\tau}\frac{\dot\varphi}{\varphi} - \frac{6}{\varphi^2}V\/(\varphi) \end{equation} which can be considered as the evolution equation of the volume of the universe. Here, it is worth noticing that equation \eqref{3.10a} plays the role of a constraint on the initial data: thus for consistency we have to check that, if satisfied initially, this constraint is preserved in time. To see this point, we first observe that the Einstein-like equations \eqref{2.15}, and thus also \eqref{3.10}, can be written in the equivalent form \begin{equation}\label{3.14.1} \tilde{R}_{ij}= \tilde{T}_{ij} -\frac{1}{2}\tilde{T}g_{ij} \end{equation} where \begin{equation}\label{3.14.2} \begin{split} \tilde{T}_{ij}:= \frac{1}{\varphi}\Sigma^F_{ij} + \frac{1}{\varphi}\tilde{\Sigma}^D_{ij} + \frac{1}{\varphi^2}\left( - \frac{3}{2}\de\varphi/de{x^i}\de\varphi/de{x^j} + \varphi\tilde{\nabla}_{j}\de\varphi/de{x^i} + \frac{3}{4}\de\varphi/de{x^h}\de\varphi/de{x^k}g^{hk}g_{ij} \right. \\ \left. - \varphi\tilde{\nabla}^h\de\varphi/de{x^h}g_{ij} - V\/(\varphi)g_{ij} \right) + \frac{3}{64\varphi^2}(\bar{\psi}\gamma_5\gamma^\tau\psi)(\bar{\psi}\gamma_5\gamma_\tau\psi)g_{ij} \end{split} \end{equation} denotes the effective stress-energy tensor appearing on the right hand side of equations \eqref{2.15}, while $\tilde T$ is its trace. It is then a straightforward matter to verify that equations \eqref{3.14.0} and \eqref{3.15} can be equivalently obtained by suitably combining the space-space equations of the set \eqref{3.14.1}; more in detail, equation \eqref{3.14.0} is obtained by subtracting each to other the two distinct space-space equations of \eqref{3.14.1}, while equation \eqref{3.15} is obtained adding together all the space-space equations of \eqref{3.14.1}. As a consequence, we have that solving equations \eqref{3.14.0} and \eqref{3.15} amounts to solve all the space-space equations of the set \eqref{3.14.1}. In addition to this, as it is proved in Section 5, the conservation laws \eqref{2.2} automatically imply the vanishing of the four-divergence of the Einstein-like equations \eqref{2.15}. The two just mentioned facts allow to apply to the present case a result by Bruhat (see \cite{yvonne4}, Theorem 4.1, pag. 150) which ensures that the constraint \eqref{3.10a} is actually satisfied for all time. \paragraph{Examples: $f(R)\equiv R$ and $f(R)\equiv R+\delta\alpha R^{2}$ models.} In the following, we will consider the situation given by $f(R)\equiv R+\delta\alpha R^{2}$ as a correction in $\delta\alpha$ with respect to the $f(R)\equiv R$ case, and we are going to take solutions for the Einstein-like field equations that are corrections in $\delta\alpha$ of the exact solutions of the Einstein field equations: thus the first step is to find the exact solutions of the $f(R)\equiv R$ case. \\ To this extent, consider for simplicity the simplest case without fluid. Then, we choose for the Dirac field the solution in the form given as in \eqref{restrictedspinor2} by \begin{eqnarray} \label{spinorsolution} &\psi=\frac{1}{\sqrt{2\tau}}\left(\begin{tabular}{c} $0$\\ $\sqrt{K+C}e^{i\left(-mt-\frac{3C}{16}\int{\frac{dt}{\tau}}\right)}$\\ $\sqrt{K-C}e^{-i\left(-mt-\frac{3C}{16}\int{\frac{dt}{\tau}}\right)}$\\ $0$ \end{tabular}\right) \end{eqnarray} with constraints $\bar\psi\psi=\frac{C}{\tau}$, $\psi^{\dagger}\psi=\frac{K}{\tau}$ and $\psi^{\dagger}\gamma^5\psi=0$, $i\bar\psi\gamma^5\psi=0$, and with the solution defined up to a global unitary phase which is then irrelevant: this solution albeit simple is nevertheless not trivial, and it will actually lead to a correspondingly simple form of the gravitational evolution equations for the shape and volume of the universe as \begin{equation} \label{3.18} \frac{a}{c}=De^{\left(X\int{\frac{1}{\tau}dt}\right)} \end{equation} and also \begin{equation} \label{3.19} \ddot\tau-\frac{3mC}{4}=0 \end{equation} in terms of the constants $m$, $C$, $X$, $D$ alone. As it is clear, it is possible to integrate the equation \eqref{3.19} to give the evolution of the volume of the universe as \begin{equation} \label{3.20} \tau=\frac{3mC}{8}(b+2\beta t +t^{2}) \end{equation} where $b$ is the integration constant that contains the information about the initial volume of the universe, which has to be positive thus giving rise to a initial state without singularity, and $\beta$ is the integration constant that contains the information about the initial velocity of expansion of the universe; then by recalling that $\tau=a^{2}c$ we can use equation \eqref{3.18} to obtain the evolution of the shape of the universe as in the following \begin{equation} \label{3.21} \frac{a}{c}=D\left(\frac{t+\beta+\sqrt{\beta^{2}-b}}{t+\beta-\sqrt{\beta^{2}-b}}\right)^{-\frac{8X}{6mC\sqrt{\beta^{2}-b}}} \end{equation} in which we see that for $t$ that tends to infinity the two factors tend to become constantly proportional, giving isotropization: by setting then $D=1$ and choosing for simplicity $8X=-3mC\sqrt{\beta^{2}-b}$ the evolution of the single factors can be given as in the following \begin{subequations} \label{3.22} \begin{equation} \label{3.22a} a=\sqrt[3]{\frac{3mC}{8}}\left(t+\beta-\sqrt{\beta^{2}-b}\right)^{-\frac{1}{3}} \left(b+2\beta t +t^{2}\right)^{\frac{1}{2}} \end{equation} \begin{equation} \label{3.22c} c=\sqrt[3]{\frac{3mC}{8}}\left(t+\beta-\sqrt{\beta^{2}-b}\right)^{\frac{2}{3}} \end{equation} \end{subequations} with constraint given by $3m^{2}(\beta^{2}-b)=1$ for consistency with the fact that the time-time gravitational field equation is not actually a field equation but a constraint. Finally the spinor evolves as \begin{eqnarray} &\psi=\frac{2}{\sqrt{3m(b+2\beta t +t^{2})}}\left(\begin{tabular}{c} $0$\\ $\sqrt{\left(\frac{K}{C}+1\right)}e^{i\left(-mt +\sqrt{\frac{3}{16}}\ln{\left(\frac{m\sqrt{3}(t+\beta)+1}{m\sqrt{3}(t+\beta)-1}\right)}\right)}$\\ $\sqrt{\left(\frac{K}{C}-1\right)}e^{-i\left(-mt +\sqrt{\frac{3}{16}}\ln{\left(\frac{m\sqrt{3}(t+\beta)+1}{m\sqrt{3}(t+\beta)-1}\right)}\right)}$\\ $0$ \end{tabular}\right) \end{eqnarray} with the spinor bilinears diluting as the inverse of the volume, correspondingly to the fact that they represent densities. By reading this solution we have that: at the first instant of the evolution the universe has no singularities as it occupies a finite volume and it is shaped as a disc whose shorter axis is along the third coordinate described by the $c$ factor, and it is filled with spinor fields with torsional interactions responsible for the anisotropy of the universe; as the time goes by the spinor field dilutes down, allowing isotropization of the universe without relenting its expansion; eventually for very late epochs the scale factors have the $t^{\frac{2}{3}}$ scaling we know for the standard FLRW cosmic evolution. \\ Now that we have the exact solutions of the $f(R)\equiv R$ case, we can employ them as the basis upon which to build the correction in $\delta\alpha$ for the more general solutions of the $f(R)\equiv R+\delta\alpha R^{2}$. \\ The form of the Dirac field will be unchanged: its form is going to give rise to a correspondingly simple form of the $\varphi$ function given by \begin{equation} \varphi\equiv 1-\delta\alpha\frac{ m C}{\tau} \end{equation} with potential \begin{equation} V(\varphi)\equiv \delta\alpha \frac{m^{2} C^{2}}{8\tau^{2}} \end{equation} and the gravitational evolution equation for the volume of the universe is \begin{equation} \left(\ddot\tau-\frac{3mC}{4}\right) -\delta\alpha\frac{ mC}{2}\left(\frac{\dot\tau^{2}}{\tau^{2}}-\frac{\ddot\tau}{\tau} -\frac{3mC}{4\tau}\right)=0 \end{equation} up to higher-order powers of $\delta\alpha$. As before, it is possible to integrate it to give the evolution of the volume of the universe as \begin{eqnarray} &\tau=\frac{3mC}{8}(b\!+\!2\beta t\!+\!t^{2}) +\delta\alpha \frac{mC}{2}\left(\varsigma\!\left(\xi\!+\!t\right)\! +\!m\sqrt{3}\left(\beta\!+\!t\right) \ln{\left(\frac{m\sqrt{3}(t+\beta)+1}{m\sqrt{3}(t+\beta)-1}\right)}\right) \end{eqnarray} where $\varsigma$ and $\xi$ are two new integration constants for which the initial volume of the universe is now different, but of course there is still no singularity: again these two new integration constants are linked by a relationship coming from the fact that the time-time gravitational field equation is a constraint. We remark that at later cosmological times in the evolution of the universe we still have isotropization, but more in general we have that this solution tends to be approximated to the same we would have had in the Einstein theory in what could be called Einsteinization. \section{From the Jordan to the Einstein frame} In this section we shall show that, by passing from the Jordan ($g_{ij}$) to the Einstein frame ($\bar{g}_{ij}$) through a conformal transformation of the kind $\bar{g}_{ij}=\varphi\/g_{ij}$ (if $\varphi > 0$) or $\bar{g}_{ij}=-\varphi\/g_{ij}$ (if $\varphi< 0$), it is possible to quadratically integrate the Einstein-like equations \eqref{3.10} also in the general case $f(R)\not = R$. \\ To start with, supposing for simplicity $\varphi >0$, we note that the relations between the components of the tetrad fields associated with the metric tensors $\bar{g}_{ij}$ and $g_{ij}$ are expressed as \begin{equation}\label{4.1} \bar{e}^\mu_i =\sqrt{\varphi}e^\mu_i, \qquad \bar{e}^i_\mu = \frac{1}{\sqrt{\varphi}}e^i_\mu \end{equation} while the relation between the Levi--Civita connections induced by the two different frames is given by \begin{equation}\label{4.2} \bar{\Gamma}_{ij}^{\;\;\;h}= \tilde{\Gamma}_{ij}^{\;\;\;h} + \frac{1}{2\varphi}\de\varphi/de{x^j}\delta^h_i - \frac{1}{2\varphi}\de\varphi/de{x^p}g^{ph}g_{ij} + \frac{1}{2\varphi}\de\varphi/de{x^i}\delta^h_j\,. \end{equation} Moreover, we have also the identities \begin{equation}\label{4.3} \Gamma^h = e^h_\mu\/\gamma^\mu = \sqrt{\varphi}\bar{e}^h_\mu\/\gamma^\mu = \sqrt{\varphi}\bar{\Gamma}^h, \qquad \Gamma_h = \frac{1}{\sqrt{\varphi}}\bar{\Gamma}_h \end{equation} between the matrices $\Gamma$ and $\bar\Gamma$ associated with the tetrads $e$ and $\bar e$ respectively. \\ Our aim is now to express the Dirac and Einstein-like equations in terms of the conformally transformed metric $\bar{g}_{ij}$ and its tetrad field $\bar{e}^\mu_i$. The covariant derivatives of the spinor fields are defined as in \eqref{3.7}; in order to correctly evaluate them in the Einstein frame, we need to express the coefficients $\tilde{\Omega}_i$ using the metric $\bar{g}_{ij}$ and its related quantities. With respect to this point, taking equations \eqref{2.3}, \eqref{4.1}, \eqref{4.2} and \eqref{4.3} as well as the identity \begin{equation}\label{4.4} - e^j_\mu\partial_i\/e^\mu_k = - \bar{e}^j_\mu\partial_i\/\bar{e}^\mu_k + \frac{1}{2\varphi}\partial_i\varphi\delta^j_k \end{equation} into account, we get the representation \begin{equation}\label{4.5} \tilde{\Omega}_i = - \frac{1}{4}\bar{g}_{jh}\left(\bar{\Gamma}_{ik}^{\;\;\;j} - \bar{e}^j_\mu\partial_i\/\bar{e}^\mu_k - \frac{1}{2\varphi}\partial_k\varphi\delta^j_i + \frac{1}{2\varphi}\partial_p\varphi\/\bar{g}^{pj}\bar{g}_{ik} \right)\bar{\Gamma}^h\bar{\Gamma}^k \end{equation} Now, supposing that the metric $\bar{g}_{ij}$ is of the Bianchi type-I form (the metric $g_{ij}$ is then of the Bianchi type-I only up to a time reparametrization $d\bar{t}=\frac{1}{\sqrt{\varphi}}dt$) \begin{equation}\label{4.6} ds^2 = dt^2 - \bar{a}^2(t)\,dx^2 - \bar{b}^2\,(t)dy^2 - \bar{c}^2(t)\,dz^2 \end{equation} the quantities \eqref{4.5} assume the explicit expression \begin{equation}\label{4.7} \begin{split} \tilde{\Omega}_0=0,\qquad \tilde{\Omega}_1= \frac{1}{2}\dot{\bar a}\gamma^1\gamma^0 - \frac{\dot{\varphi}}{4\varphi}\bar{a}\gamma^1\gamma^0 := \bar{\Omega}_1 - \frac{\dot{\varphi}}{4\varphi}\bar{a}\gamma^1\gamma^0, \\ \tilde{\Omega}_2= \frac{1}{2}\dot{\bar b}\gamma^2\gamma^0 - \frac{\dot{\varphi}}{4\varphi}\bar{b}\gamma^2\gamma^0 := \bar{\Omega}_2 - \frac{\dot{\varphi}}{4\varphi}\bar{b}\gamma^2\gamma^0, \\ \tilde{\Omega}_3= \frac{1}{2}\dot{\bar c}\gamma^3\gamma^0 - \frac{\dot{\varphi}}{4\varphi}\bar{c}\gamma^3\gamma^0 := \bar{\Omega}_3 - \frac{\dot{\varphi}}{4\varphi}\bar{c}\gamma^3\gamma^0 \end{split} \end{equation} Inserting equations \eqref{4.3} and \eqref{4.7} into equations \eqref{2.16}, we obtain the following representation of the Dirac equations \begin{subequations}\label{4.8} \begin{equation}\label{4.8a} \dot\psi + \frac{\dot{\bar\tau}}{2\bar\tau}\psi - \frac{3\dot\varphi}{4\varphi}\psi + \frac{im}{\sqrt{\varphi}}\gamma^0\psi + \frac{3i}{16\varphi\sqrt{\varphi}}\left[ (\bar\psi\psi)\gamma^0 + i(i\bar\psi\gamma^5\psi)\gamma^0\gamma^5 \right]\psi =0 \end{equation} \begin{equation}\label{4.8b} \dot{\bar\psi} + \frac{\dot{\bar\tau}}{2\bar\tau}\bar\psi - \frac{3\dot\varphi}{4\varphi}\bar\psi - \frac{im}{\sqrt{\varphi}}\bar{\psi}\gamma^0 - \frac{3i}{16\varphi\sqrt{\varphi}}\bar{\psi}\/\left[ (\bar\psi\psi)\gamma^0 + i(i\bar\psi\gamma^5\psi)\gamma^5\gamma^0 \right] =0 \end{equation} \end{subequations} where we have defined $\bar{\tau}:=\bar{a}\bar{b}\bar{c}$. Proceeding as in the Jordan frame, from equations \eqref{4.8} we derive the differential equations \begin{subequations}\label{4.9} \begin{equation}\label{4.9a} \frac{d}{dt}[\bar{\tau}\Theta(\bar\psi\psi)] + \frac{3}{8\varphi}\bar{\tau}(i\bar\psi\gamma^5\psi)(\bar\psi\gamma^5\gamma^0\psi) =0 \end{equation} \begin{equation}\label{4.9b} \frac{d}{dt}[\bar{\tau}\Theta(i\bar\psi\gamma^5\psi)] -\frac{2m\bar{\tau}\Theta}{\sqrt{\varphi}}\/(\bar\psi\gamma^5\gamma^0\psi) - \frac{3}{8\varphi}\bar{\tau}(\bar\psi\psi)(\bar\psi\gamma^5\gamma^0\psi) =0 \end{equation} \begin{equation}\label{4.9c} \frac{d}{dt}[\bar{\tau}\Theta(\bar\psi\gamma^5\gamma^0\psi)] + \frac{2m\bar{\tau}\Theta}{\sqrt{\varphi}}\/(i\bar\psi\gamma^5\psi) =0 \end{equation} \end{subequations} where $\Theta:= \varphi^{-\frac{3}{2}}$. Finally, equations \eqref{4.9} yield the relation (analogous of equation \eqref{3.13bis}) \begin{equation}\label{4.10} (\bar\psi\psi)^2 + (i\bar\psi\gamma^5\psi)^2 + (\bar\psi\gamma^5\gamma^0\psi)^2 = \frac{C^2}{\bar{\tau}^2\Theta^2} \end{equation} $C$ being a constant. \\ Now, we deal with the Einstein-like equations \eqref{2.15}. As it has been shown in \cite{CCSV1,CV4,CV1,CV2,CV3}, equations \eqref{2.15} can be expressed in terms of the metric $\bar{g}_{ij}$, assuming the form \begin{equation}\label{4.11} \bar{R}_{ij} -\frac{1}{2}\bar{R}\bar{g}_{ij}= \frac{1}{\varphi}\tilde{\Sigma}^D_{ij} + \frac{1}{\varphi}\Sigma^F_{ij} - \frac{1}{\varphi^3}V\/(\varphi)\bar{g}_{ij} + \frac{3}{64\varphi^3}(\bar{\psi}\gamma_5\gamma^\tau\psi)(\bar{\psi}\gamma_5\gamma_\tau\psi)\bar{g}_{ij} \end{equation} where $\bar{R}_{ij}$ and $\bar{R}$ denote the Ricci tensor and the Ricci scalar curvature induced by the metric $\bar{g}_{ij}$. Of course, also the tensors $\tilde{\Sigma}^D_{ij}$ and $\Sigma^F_{ij}$ have to be represented making use of the metric $\bar{g}_{ij}$ and the associated tetrad $\bar{e}^\mu_i$. In this regard, we have \begin{equation}\label{4.12} \Sigma^F_{ij}:= \frac{1}{\varphi}(\rho +p)\/\bar{U}_i\bar{U}_j -\frac{p}{\varphi}\bar{g}_{ij} \end{equation} with $\bar{U}_i\bar{U}_j\/\bar{g}^{ij}=1$. Concerning the tensor $\tilde{\Sigma}^D_{ij}$, from a direct calculation we obtain the identities \begin{equation}\label{4.13} \tilde{\Sigma}^D_{00}= \frac{1}{2\varphi}\left[ m\bar\psi\psi - \frac{3}{16\varphi}(\bar{\psi}\gamma_5\gamma^\nu\psi)(\bar{\psi}\gamma_5\gamma_\nu\psi)\right], \qquad \tilde{\Sigma}^D_{AA}=0 \quad A=1,2,3 \end{equation} while, due to equations \eqref{4.11}, the vanishing of the non diagonal part of $\tilde{\Sigma}^D_{ij}$ gives rise to constraints analogous to those existing in the Jordan frame, namely \begin{subequations}\label{4.14} \begin{equation}\label{4.14a} \tilde{\Sigma}^D_{12}=0\quad \Rightarrow \quad \bar{a}\dot{\bar{b}} - \bar{b}\dot{\bar{a}}=0 \quad \cup \quad \bar\psi\gamma^5\gamma^3\psi =0 \end{equation} \begin{equation}\label{4.14b} \tilde{\Sigma}^D_{23}=0\quad \Rightarrow \quad \bar{c}\dot{\bar{b}} - \bar{b}\dot{\bar{c}}=0 \quad \cup \quad \bar\psi\gamma^5\gamma^1\psi =0 \end{equation} \begin{equation}\label{4.14c} \tilde{\Sigma}^D_{13}=0\quad \Rightarrow \quad \bar{a}\dot{\bar{c}} - \bar{c}\dot{\bar{a}}=0 \quad \cup \quad \bar\psi\gamma^5\gamma^2\psi =0 \end{equation} \end{subequations} As in the Jordan frame, it is also seen that $\tilde{\Sigma}^D_{0A}=0$ ($A=1,2,3$) are trivial identities, yielding no restrictions. \\ Collecting all the obtained results, we conclude that in the Einstein frame \eqref{4.6} the Einstein-like equations \eqref{2.15} are expressed as \begin{subequations}\label{4.15} \begin{equation}\label{4.15a} \frac{\dot{\bar a}}{\bar a}\frac{\dot{\bar b}}{\bar b} + \frac{\dot{\bar b}}{\bar b}\frac{\dot{\bar c}}{\bar c} + \frac{\dot{\bar a}}{\bar a}\frac{\dot{\bar c}}{\bar c} = \frac{\rho}{\varphi^2} + \frac{1}{2\varphi^2}m\bar\psi\psi - \frac{3}{64\varphi^3}(\bar{\psi}\gamma_5\gamma^\tau\psi)(\bar{\psi}\gamma_5\gamma_\tau\psi) - \frac{1}{\varphi^3}V(\varphi) \end{equation} \begin{equation}\label{4.15b} \frac{\ddot{\bar b}}{\bar b} + \frac{\ddot{\bar c}}{\bar c} + \frac{\dot{\bar b}}{\bar b}\frac{\dot{\bar c}}{\bar c} = - \frac{p}{\varphi^2} - \frac{1}{\varphi^3}V(\varphi) + \frac{3}{64\varphi^3}(\bar{\psi}\gamma_5\gamma^\tau\psi)(\bar{\psi}\gamma_5\gamma_\tau\psi) \end{equation} \begin{equation}\label{4.15c} \frac{\ddot{\bar a}}{\bar a} + \frac{\ddot{\bar c}}{\bar c} + \frac{\dot{\bar a}}{\bar a}\frac{\dot{\bar c}}{\bar c} = - \frac{p}{\varphi^2} - \frac{1}{\varphi^3}V(\varphi) + \frac{3}{64\varphi^3}(\bar{\psi}\gamma_5\gamma^\tau\psi)(\bar{\psi}\gamma_5\gamma_\tau\psi) \end{equation} \begin{equation}\label{4.15d} \frac{\ddot{\bar a}}{\bar a} + \frac{\ddot{\bar b}}{\bar b} + \frac{\dot{\bar a}}{\bar a}\frac{\dot{\bar b}}{\bar b} = - \frac{p}{\varphi^2} - \frac{1}{\varphi^3}V(\varphi) + \frac{3}{64\varphi^3}(\bar{\psi}\gamma_5\gamma^\tau\psi)(\bar{\psi}\gamma_5\gamma_\tau\psi) \end{equation} \end{subequations} together with the constraints \eqref{4.14}. Again, the only significant case is when two components among $\bar{a}(t)$, $\bar{b}(t)$ and $\bar{c}(t)$ are equal. For instance, we suppose $\bar{a}(t)=\bar{b}(t)$ and thus $\bar\psi\gamma^5\gamma^1\psi = \bar\psi\gamma^5\gamma^2\psi =0$. We can now proceed as in the Jordan frame (see section 3), so obtaining from equations \eqref{4.15} the relations \begin{equation}\label{4.16} \frac{\bar a}{\bar c} = D_1\exp\left(X_1\int{\frac{1}{\bar\tau}dt}\right) \end{equation} \begin{equation}\label{4.17} 2\frac{\ddot{\bar\tau}}{\bar\tau} = 3\frac{\rho}{\varphi^2} - 3\frac{p}{\varphi^2} + \frac{3}{2\varphi^2}m\bar\psi\psi - \frac{6}{\varphi^3}V\/(\varphi) \end{equation} The last equations we have to express in terms of the metric $\bar{g}_{ij}$ are those concerning the conservation law for the perfect fluid \begin{equation} \label{4.18} \tilde{\nabla}_i\Sigma_F^{ij} =0 \end{equation} with \begin{equation} \label{4.18bis} \Sigma_F^{ij}= \varphi(\rho +p)\bar{U}^i\bar{U}^j - \varphi\/p\/\bar{g}^{ij} \end{equation} Taking equations \eqref{4.2} and \eqref{4.6} into account, it is easily seen that equations \eqref{4.18} and \eqref{4.18bis} reduce to \begin{equation}\label{4.19} \frac{\dot\rho}{\rho} + (1+\lambda)\frac{\dot{\bar\tau}}{\bar\tau} - \frac{3}{2}\frac{\dot\varphi}{\varphi}(1+\lambda)=0 \end{equation} Setting $\Theta:= \varphi^{-\frac{3}{2}}$, one has $- \frac{3}{2}\frac{\dot\varphi}{\varphi}=\frac{\dot\Theta}{\Theta}$ and we can integrate equation \eqref{4.19} as \begin{equation}\label{4.20} \rho =\rho_0\/(\bar{\tau}\Theta)^{-(1+\lambda)} \qquad \rho_0= {\rm constant} \end{equation} At this point, we notice that if we could express the quantities $\rho$, $\bar\psi\psi$ e $\varphi$ (or, equivalently, $\Theta=\varphi^{-\frac{3}{2}}$) as functions of $\bar\tau$ and insert the so obtained results into equation \eqref{4.17}, we would get a final equation for $\bar\tau$ of the kind \begin{equation}\label{4.21} \ddot{\bar\tau}= f(\bar\tau) \end{equation} which is quadratically integrable. On the other hand, there exists a special though not too restrictive case for which the above mentioned conditions are satisfied. Indeed, setting $i\bar\psi\gamma^5\psi=\bar\psi\gamma^5\gamma^0\psi=0$ in \eqref{4.10}, we obtain \begin{equation}\label{4.22} \bar\psi\psi = \frac{C}{\bar{\tau}\Theta} \end{equation} equation \eqref{4.22} together with \eqref{4.20} and $\Theta^{-\frac{2}{3}}=\varphi=f'(R(\bar\psi\psi,\rho))$ constitute a set of three relations involving the four variables $\bar\tau$, $\bar\psi\psi$, $\rho$ e $\Theta$. In principle, it is then possible to express the last three variables as suitable functions of $\bar\tau$ alone, thus obtaining from \eqref{4.17} a final quadratically integrable equation of the kind \eqref{4.21}. \\ As a last remark, we note that equation \eqref{4.15a} has the nature of a constraint on the initial data. As made in the Jordan frame, we have again to check that such a constraint is satisfied for all time after the initial instant. In connection with this, in Section 5 it is proved that the conservation laws \eqref{2.2} ensure that the four-divergence (with respect to the Levi--Civita connection associated with the conformal metric $\bar{g}_{ij}$) of the effective stress-energy tensor \begin{equation}\label{4.23} T_{ij}:= \frac{1}{\varphi}\tilde{\Sigma}^D_{ij} + \frac{1}{\varphi}\Sigma^F_{ij} - \frac{1}{\varphi^3}V\/(\varphi)\bar{g}_{ij} + \frac{3}{64\varphi^3}(\bar{\psi}\gamma_5\gamma^\tau\psi)(\bar{\psi}\gamma_5\gamma_\tau\psi)\bar{g}_{ij} \end{equation} vanishes. Repeating the arguments developed in section 3, we can apply again the Bruhat's result \cite{yvonne4} and thus verify that the Hamiltonian constraint \eqref{4.15a} is preserved in time. \\ We now illustrate two explicit examples where the above procedure actually works: \paragraph{Example 1: $f(R)\equiv R^{4}$ model.} To begin with, let us consider the model $f(R)=R^4$ coupled with a Dirac field (without perfect fluid for simplicity). In such a case, the corresponding potential \eqref{2.8} assumes the form \begin{equation}\label{6.1} V\/(\varphi)= \left(\frac{1}{4}\right)^{\frac{1}{3}}\frac{3}{8}\varphi^{\frac{7}{3}} \end{equation} with the scalar field \eqref{2.7} given by \begin{equation}\label{6.2} \varphi = 4\left(\frac{m}{4}\bar\psi\psi\right)^{\frac{3}{4}} \end{equation} Through equation \eqref{4.22} and the definition of $\Theta=\varphi^{-\frac{3}{2}}$, we get the relation \begin{equation} \label{6.3} \Theta = A\/\left(\frac{C}{\bar\tau}\right)^9 \end{equation} with \begin{equation} \label{6.3bis} A=\left[4\/\left(\frac{m}{4}\right)^{\frac{3}{4}}\right]^{12} \end{equation} from which, using again \eqref{4.22}, we have \begin{equation}\label{6.4} \frac{3}{2\varphi^2}m\bar\psi\psi= \frac{3m}{32C^8\/\left(\frac{m}{4AC^8}\right)^{\frac{3}{4}}}\bar{\tau}^{-4} \end{equation} and \begin{equation}\label{6.5} -\frac{6}{\varphi^3}V\/(\varphi) = -\frac{9}{4}\left(\frac{1}{4}\right)^{\frac{5}{6}}\left(\frac{m}{4AC^8}\right)^{-\frac{1}{2}}\bar{\tau}^{-4} \end{equation} Inserting equations \eqref{6.4} and \eqref{6.5} into \eqref{4.17} and choosing the constant $C$ sufficiently small, we obtain the final differential equation \begin{equation}\label{6.6} \ddot{\bar\tau}= B\bar{\tau}^{-3} \end{equation} with $B>0$. The latter can be easily integrated as \begin{equation}\label{6.7} \bar\tau = \sqrt{D\/(t+E)^2 + \frac{B}{D}} \end{equation} where the constants $D>0$ and $E$ have to be determined consistently with equation \eqref{4.15a}. From equations \eqref{6.6} and \eqref{6.7} we have also $\bar{\tau}^2 \geq \frac{B}{D}$. We see that, at least in the Einstein frame, the model $f(R)=R^4$ coupled with just one Dirac Field gives rise to a universe with no initial singularity but undergoing a slow expansion without isotropization. \paragraph{Example 2: $f(R)\equiv R+\alpha R^{2}$ model.} The second example we consider is given by the model $f(R)=R+\alpha\/R^2$ with $\alpha < 0$, still coupled with a Dirac field alone. The associated potential \eqref{2.8} is now of the form \begin{equation}\label{6.8} V(\varphi)= \frac{1}{8\alpha}\varphi(\varphi -1)^2 \end{equation} while the scalar field \eqref{2.7} is \begin{equation}\label{6.9} \varphi=1 - \alpha\/m\/\bar\psi\psi \end{equation} From equation \eqref{4.22} and the definition of $\Theta$ we get the algebraic equation of third order (for the unknown $\Theta\bar\tau$) \begin{equation}\label{6.10} \left(\bar\tau\/\Theta - \alpha\/mC \right) ^{3} - {\bar\tau}^{3}\/\Theta=0 \end{equation} linking $\Theta$ to the volume-scale $\bar\tau$. We recall that we are working under the assumption $\varphi >0$. We are then forced to require $\Theta >0$. In order to satisfy this condition, we have to impose that equation \eqref{6.10} has three real solutions (indeed, if equation \eqref{6.10} had only one real solution, the latter would be negative as it is easily seen solving \eqref{6.10} by a graph). The imposed requirement yields the condition $\bar\tau \geq \frac{3\sqrt{3}}{2}|\alpha|\/mC$ which represents a lower bound for the volume-scale $\bar\tau$. Therefore, again we can avoid initial singularity. Anyway, a solution remaining positive in time is \begin{equation}\label{6.11} \Theta\bar\tau=\alpha\/mC+\frac{2}{3}\sqrt {3}\bar\tau\/\cos\left(\frac{1}{3}\phi \right) \end{equation} where \begin{equation} \phi = \arccos \left( \frac{3}{2}{\frac{\sqrt{3}\left| \alpha \right|\/mC}{\bar\tau}} \right) \end{equation} At this point, choosing parameters such that $\alpha= - \frac{1}{2}$ and $mC=1$ for simplicity, and inserting equations \eqref{6.10} and \eqref{6.11} into \eqref{4.17}, after some calculation we end up with the final differential equation \begin{equation}\label{6.12} \ddot{\bar\tau} =\frac{\sqrt {3}}{4}\cos \left( \frac{1}{3}\,\arccos \left( \frac {3\sqrt {3}}{4\bar\tau} \right) \right) + \frac{3\sqrt {3}}{16}\left( \cos \left( \frac{1}{3} \arccos \left( \frac{3\sqrt{3}}{4\bar\tau} \right) \right) \right) ^{-1} \end{equation} Equation \eqref{6.12} is quadratically integrable, but explicit calculations are not so easy. We can then proceed to numerically plotting it. For instance, if initial data $\bar\tau(0)=\frac{3\sqrt{3}}{4}$ and $\dot{\bar\tau}(0)= 1$ are assigned, the corresponding behavior of the volume-scale $\bar\tau(t)$ during time is given by the graph in Figure 1 \begin{figure}[htb] \includegraphics[height=7.5cm]{./pic1.jpg} \caption{volume-scale $\bar\tau$ vs time t} \label{fig:1} \end{figure} showing an (at least initially) accelerated expansion of the volume-scale $\bar\tau$. Moreover, by plotting the function $\frac{\bar\tau}{t}$, it is seen that the volume-scale $\bar\tau$ in infinite of order $n>1$. Due to equation \eqref{4.16}, this fact implies that the universe isotropizes at large $t$. Also, we notice that at large $t$ one has $\varphi \approx 1$ and the Einstein and Jordan frames tend to coincide. \section{Conservation laws} We clarify the relationship between the conservation laws \eqref{2.2} and the Einstein-like equations \eqref{2.15}. More in particular, we show that the validity the conservation laws \eqref{2.2} implies that the four-divergence of the Einstein-like equations \eqref{2.15}, with respect to the Levi--Civita connection, vanishes and vice-versa. Also, we prove that the same result holds in the Einstein-frame, namely the four-divergence of the conformally transformed Einstein-like equations \eqref{4.11}, with respect to the Levi--Civita connection associated with the conformal metric $\bar{g}_{ij}=\varphi\/g_{ij}$, is zero. \\ To start with, for convenience of the reader, we rewrite equations \eqref{2.2} \begin{subequations}\label{A.1} \begin{equation}\label{A.1a} \nabla_{i}\Sigma^{ij}+T_{i}\Sigma^{ij}-\Sigma_{ih}T^{jih}-\frac{1}{2}S_{hiq}R^{hiqj}=0 \end{equation} \begin{equation}\label{A.1b} \nabla_{h}S^{ijh}+T_{h}S^{ijh}+\Sigma^{ij}-\Sigma^{ji}=0 \end{equation} \end{subequations} where $\Sigma^{ij}$ denotes the sum of the stress-energy tensors of the Dirac field and the perfect fluid. The curvature tensor can be decomposed in the form \begin{equation}\label{A.2} R^h_{\;\;iqj}= \tilde{R}^h_{\;\;iqj} + \tilde{\nabla}_jK_{qi}^{\;\;\;h} - \tilde{\nabla}_qK_{ji}^{\;\;\;h} + K_{ji}^{\;\;\;p}K_{qp}^{\;\;\;h} - K_{qi}^{\;\;\;p}K_{jp}^{\;\;\;h} \end{equation} where the contorsion tensor $K_{ij}^{\;\;\;h}$ is expressed as the sum \cite{CCSV2,FV1} \begin{equation}\label{A.3} K_{ij}^{\;\;\;h}= \hat{K}_{ij}^{\;\;\;h} + \hat{S}_{ij}^{\;\;\;h} \end{equation} with \begin{subequations}\label{A.4} \begin{equation}\label{A.4a} \hat{S}_{ij}^{\;\;\;h}:=-\frac{1}{2\varphi}\/S_{ij}^{\;\;\;h} \end{equation} \begin{equation}\label{A.4b} \hat{K}_{ij}^{\;\;\;h} := -\hat{T}_j\delta^h_i + \hat{T}_pg^{ph}g_{ij} \end{equation} \begin{equation}\label{A.4c} \hat{T}_j:=\frac{1}{2\varphi}\/\de{\varphi}/de{x^j} \end{equation} \end{subequations} Taking equations \eqref{A.2}, \eqref{A.3} and \eqref{A.4} into account, a direct calculation shows that \begin{eqnarray}\label{A.5} &\nonumber -\frac{1}{2}S_{hiq}R^{hiqj} = - S_h^{\;\;ip}\hat{S}^j_{\;\;ip}\hat{T}^h - \frac{1}{2}\varphi\tilde\nabla^j\/\left(\hat{S}_{qih}\hat{S}^{qih}\right)-\\ &-\varphi\tilde\nabla_i\/\left(\hat{S}^{hqi}\hat{S}^j_{\;\;qh}\right) +\varphi\tilde\nabla_i\/\left(\hat{S}^{hqi}\right)\/\hat{S}^j_{\;\;qh} \end{eqnarray} Moreover, making use of equations \eqref{2.12} and \eqref{2.13}, it is easily seen that \begin{equation}\label{A.6} \nabla_{i}\Sigma^{ij}+T_{i}\Sigma^{ij}-\Sigma_{ih}T^{jih} = \tilde\nabla_{i}\Sigma^{(ij)} + \tilde\nabla_{i}\Sigma^{[ij]} - K_{ih}^{\;\;\;j}\Sigma^{[ih]} - T^j_{\;\;ih}\Sigma^{[ih]} \end{equation} In \cite{FV1} it is proved that $\Sigma^{(ij)} = \tilde{\Sigma}^{(ij)} - \varphi\hat{S}^{hip}\hat{S}^j_{\;\;ph}$ and also that the conservation laws \eqref{A.1b} amount to the identities $\frac{1}{\varphi}\Sigma^{[ij]} + \tilde\nabla_h\hat{S}^{jih}$. From this and equations \eqref{A.3} and \eqref{A.4} one gets \begin{subequations}\label{A.7} \begin{equation}\label{A.7a} \tilde\nabla_i\Sigma^{(ij)} = \tilde\nabla_i\tilde{\Sigma}^{(ij)} - \tilde\nabla_i\/\left(\varphi\hat{S}^{hip}\hat{S}^{j}_{\;\;ph}\right) \end{equation} \begin{equation}\label{A.7b} \tilde\nabla_i\Sigma^{[ih]} = - \varphi_i\tilde\nabla_h\hat{S}^{jih} \end{equation} \begin{equation}\label{A.7c} - K_{ih}^{\;\;\;j}\Sigma^{[ih]} = - \varphi\/\hat{T}_h\tilde\nabla_q\hat{S}^{hjq} + \varphi\hat{S}_{ih}^{\;\;\;j}\tilde\nabla_q\hat{S}^{hiq} \end{equation} \begin{equation}\label{A.7d} - T^j_{\;\;ih}\Sigma^{[ih]} = \varphi\/\hat{T}_i\tilde\nabla_q\hat{S}^{jiq} - 2\varphi\hat{S}_{jih}\tilde\nabla_q\hat{S}^{hiq} \end{equation} \end{subequations} where for simplicity we have defined $\varphi_i := \de\varphi/de{x^i}$. Inserting equations \eqref{A.5}, \eqref{A.6} and \eqref{A.7} into \eqref{A.1a}, the latter reduce to \begin{equation}\label{A.8} \tilde\nabla_i\tilde{\Sigma}^{(ij)} - \frac{1}{2}\varphi\tilde\nabla^j\left(\hat{S}_{hqp}\hat{S}^{hqp}\right) =0 \end{equation} At this point, we recall that equations \eqref{2.7} and \eqref{2.8} are equivalent to the relation \begin{equation}\label{A.9} \Sigma -\frac{6}{\varphi}V(\varphi) + 2V'(\varphi)=0 \end{equation} (see \cite{CCSV1} for the proof). After that, taking the trace of equation \eqref{2.15} as well as the identity $-\frac{1}{2}\hat{S}_{hqp}\hat{S}^{hqp} = \frac{3}{64\varphi^2}(\bar{\psi}\gamma_5\gamma^\tau\psi)(\bar{\psi}\gamma_5\gamma_\tau\psi)$ \cite{FV1} into account, we get \begin{equation}\label{A.10} \tilde\Sigma= -\varphi\tilde{R} - \frac{3}{2}\frac{1}{\varphi}\varphi_i\varphi^i + 3\tilde{\nabla}_i\varphi^i + \frac{4}{\varphi}V(\varphi) + 2\varphi\hat{S}_{hqp}\hat{S}^{hqp} \end{equation} Substituting the identity $\Sigma = \tilde\Sigma - \varphi\hat{S}_{hqp}\hat{S}^{hqp}$ \cite{FV1} and equation \eqref{A.10} in equation \eqref{A.9}, we obtain \begin{equation}\label{A.11} \tilde{R} + \frac{3}{2}\frac{1}{\varphi^2}\varphi_i\varphi^i - \frac{3}{\varphi}\tilde{\nabla}_i\varphi^i + \frac{2}{\varphi^2}V(\varphi) - \frac{2}{\varphi}V'(\varphi) - \hat{S}_{hqp}\hat{S}^{hqp}=0 \end{equation} We rewrite equation \eqref{2.15} in the form \begin{equation}\label{A.12} \begin{split} \varphi\tilde{R}_{ij} -\frac{\varphi}{2}\tilde{R}g_{ij}= \tilde\Sigma_{(ij)} + \frac{1}{\varphi}\left( - \frac{3}{2}\varphi_i\varphi_j + \varphi\tilde{\nabla}_{j}\varphi_i + \frac{3}{4}\varphi_h\varphi^h\/g_{ij} + \right. \\ \left. - \varphi\tilde{\nabla}^h\varphi_h\/g_{ij} - V\/(\varphi)g_{ij} \right) -\frac{1}{2}\varphi\hat{S}_{hqp}\hat{S}^{hqp}g_{ij} \end{split} \end{equation} The covariant divergence of \eqref{A.12} yields \begin{equation}\label{A.13} \begin{split} (\tilde\nabla^j\varphi)\tilde{R}_{ij} + \varphi\tilde\nabla^j\tilde{G}_{ij} -\frac{1}{2}\tilde{R}\tilde\nabla_i\varphi = \tilde\nabla^j\tilde\Sigma_{(ij)} + \left(\tilde\nabla^j\tilde{\nabla}_{j}\tilde\nabla_i - \tilde\nabla_i\tilde{\nabla}^j\tilde\nabla_j\right)\varphi +\\ + \tilde\nabla^j\left[\frac{1}{\varphi}\left(-\frac{3}{2}\varphi_i\varphi_j + \frac{3}{4}\varphi_h\varphi^h\/g_{ij} - V\/(\varphi)g_{ij}\right)\right] - \frac{1}{2}\tilde\nabla_i\left(\varphi\hat{S}_{hqp}\hat{S}^{hqp}\right) \end{split} \end{equation} By definition, the Einstein and the Ricci tensors satisfy $\tilde\nabla^j\tilde{G}_{ij}=0$ and $(\tilde\nabla^j\varphi)\tilde{R}_{ij} = \left(\tilde\nabla^j\tilde{\nabla}_{j}\tilde\nabla_i - \tilde\nabla_i\tilde{\nabla}^j\tilde\nabla_j\right)\varphi$. Then equation \eqref{A.13} reduces to \begin{eqnarray}\label{A.14} \nonumber &-\frac{1}{2}\tilde{R}\tilde\nabla_i\varphi = \tilde\nabla^j\tilde\Sigma_{(ij)}+ \tilde\nabla^j\left[\frac{1}{\varphi}\left(-\frac{3}{2}\varphi_i\varphi_j + \frac{3}{4}\varphi_h\varphi^h\/g_{ij} - V\/(\varphi)g_{ij}\right)\right]-\\ &-\frac{1}{2}\tilde\nabla_i\left(\varphi\hat{S}_{hqp}\hat{S}^{hqp}\right) \end{eqnarray} Finally, making use of equation \eqref{A.10} it is easily seen that \begin{equation}\label{A.15} -\frac{1}{2}\tilde{R}\tilde\nabla_i\varphi = \tilde\nabla^j\left[\frac{1}{\varphi}\left(-\frac{3}{2}\varphi_i\varphi_j + \frac{3}{4}\varphi_h\varphi^h\/g_{ij} - V\/(\varphi)g_{ij}\right)\right] - \frac{1}{2}\varphi_i\hat{S}_{hqp}\hat{S}^{hqp} \end{equation} and then equations \eqref{A.13} amount to the equations $\tilde\nabla^j\tilde{\Sigma}_{(ij)} - \frac{1}{2}\varphi\tilde\nabla_i\left(\hat{S}_{hqp}\hat{S}^{hqp}\right) =0$, clearly identical to \eqref{A.8}. \\ Now, we denote by \begin{equation}\label{B.2} T_{ij}= \frac{1}{\varphi}\tilde{\Sigma}_{(ij)} - \frac{1}{\varphi^3}V\/(\varphi)\bar{g}_{ij} - \frac{1}{2\varphi}\hat{S}_{hqp}\hat{S}^{hqp}\bar{g}_{ij} \end{equation} the effective stress-energy tensor on the right hand side of equations \eqref{4.11}. Indicating by $\bar\nabla_i$ the covariant derivative associated with the conformal metric $\bar{g}_{ij}=\varphi\/g_{ij}$, we show that the condition $\bar{\nabla}^j\/T_{ij}=0\/$ is equivalent to the conservation laws $\tilde{\nabla}^j\tilde\Sigma_{(ij)}- \frac{1}{2}\varphi\tilde\nabla_i\left(\hat{S}_{hqp}\hat{S}^{hqp}\right)=0$. Recalling equations \eqref{4.2}, we have \begin{equation}\label{B.3} \begin{split} \bar{\nabla}^j\/T_{ij}= \frac{1}{\varphi}g^{sj}\bar{\nabla}_s\/T_{ij}= \frac{1}{\varphi}g^{sj}\left[\tilde{\nabla}_s\/T_{ij} - \frac{1}{2\varphi}\left(\de{\varphi}/de{x^i}\delta^q_s + \de{\varphi}/de{x^s}\delta^q_i - \de{\varphi}/de{x^u}g^{uq}g_{si}\right)T_{qj} +\right. \\ \left. - \frac{1}{2\varphi}\left(\de{\varphi}/de{x^j}\delta^q_s + \de{\varphi}/de{x^s}\delta^q_j - \de{\varphi}/de{x^u}g^{uq}g_{sj}\right)T_{iq}\right] \end{split} \end{equation} We have separately \begin{equation}\label{B.4} \begin{split} \frac{1}{\varphi}g^{sj}\tilde{\nabla}_s\/T_{ij}=\frac{1}{\varphi}g^{sj}\tilde{\nabla}_s\/\left(\frac{1}{\varphi}\tilde\Sigma_{(ij)} - \frac{1}{\varphi^2}V\/(\varphi)g_{ij} - \frac{1}{2}\hat{S}_{hqp}\hat{S}^{hqp}g_{ij}\right)=\\ \frac{1}{\varphi^2}\tilde{\nabla}^j\tilde\Sigma_{(ij)} - \frac{1}{\varphi^3}\de{\varphi}/de{x^s}\tilde\Sigma_{(i}^{\;\;s)} + - \frac{1}{\varphi}\tilde\nabla_i\left(\frac{1}{\varphi^2}V\/(\varphi)\right) -\frac{1}{2\varphi}\tilde\nabla_i\left(\hat{S}_{hqp}\hat{S}^{hqp}\right) \end{split} \end{equation} \begin{equation}\label{B.5} \begin{split} \frac{1}{\varphi}g^{sj}\frac{1}{2\varphi}\left(\de{\varphi}/de{x^i}\delta^q_s + \de{\varphi}/de{x^s}\delta^q_i - \de{\varphi}/de{x^u}g^{uq}g_{si}\right)T_{qj}=\\ =\frac{1}{\varphi}g^{sj}\frac{1}{2\varphi}\left(\de{\varphi}/de{x^i}\delta^q_s + \de{\varphi}/de{x^s}\delta^q_i - \de{\varphi}/de{x^u}g^{uq}g_{si}\right)\times\\ \times\left(\frac{1}{\varphi}\tilde\Sigma_{(qj)} - \frac{1}{\varphi^2}V\/(\varphi)g_{qj} -\frac{1}{2}\hat{S}_{hqp}\hat{S}^{hqp}g_{qj}\right)=\\ =\frac{1}{2\varphi^3}g^{sj}\/\left(\de{\varphi}/de{x^i}\tilde\Sigma_{(sj)} + \de{\varphi}/de{x^s}\tilde\Sigma_{(ij)} - \de{\varphi}/de{x^u}g_{si}\tilde\Sigma^{(u}_{\;\;j)}\right)+\\ -\frac{1}{2\varphi^4}g^{sj}\/\left(\de{\varphi}/de{x^i}V\/(\varphi)g_{sj} + \de{\varphi}/de{x^s}V\/(\varphi)g_{ij} - \de{\varphi}/de{x^u}V\/(\varphi)\delta^u_jg_{si}\right)=\\ -\frac{1}{4\varphi^2}g^{sj}\left(\de{\varphi}/de{x^i}\hat{S}_{hqp}\hat{S}^{hqp}g_{sj} + \de{\varphi}/de{x^s}\hat{S}_{hqp}\hat{S}^{hqp}g_{ij} - \de{\varphi}/de{x^j}\hat{S}_{hqp}\hat{S}^{hqp}g_{si}\right)=\\ \\=\frac{1}{2\varphi^3}\de{\varphi}/de{x^i}\tilde\Sigma - \frac{2}{\varphi^4}\de{\varphi}/de{x^i}V\/(\varphi) - \frac{1}{\varphi^2}\de{\varphi}/de{x^i}\hat{S}_{hqp}\hat{S}^{hqp} \end{split} \end{equation} and \begin{equation}\label{B.6} \begin{split} \frac{1}{\varphi}g^{sj}\frac{1}{2\varphi}\left(\de{\varphi}/de{x^j}\delta^q_s + \de{\varphi}/de{x^s}\delta^q_j - \de{\varphi}/de{x^u}g^{uq}g_{sj}\right)T_{iq}=\\ =\frac{1}{\varphi}g^{sj}\frac{1}{2\varphi}\left(\de{\varphi}/de{x^j}\delta^q_s + \de{\varphi}/de{x^s}\delta^q_j - \de{\varphi}/de{x^u}g^{uq}g_{sj}\right)\times\\ \times\left(\frac{1}{\varphi}\tilde\Sigma_{(iq)} - \frac{1}{\varphi^2}V\/(\varphi)g_{iq} -\frac{1}{2}\hat{S}_{hqp}\hat{S}^{hqp}g_{iq}\right)=\\ =\frac{1}{2\varphi^3}g^{sj}\/\left(\de{\varphi}/de{x^j}\tilde\Sigma_{(is)} + \de{\varphi}/de{x^s}\tilde\Sigma_{(ij)} - \de{\varphi}/de{x^u}g_{sj}\tilde\Sigma^{(u}_{\;\;i)}\right)+\\ -\frac{1}{2\varphi^4}g^{sj}\/\left(\de{\varphi}/de{x^j}V\/(\varphi)g_{si} + \de{\varphi}/de{x^s}V\/(\varphi)g_{ij} - \de{\varphi}/de{x^u}V\/(\varphi)\delta^u_ig_{sj}\right)=\\ =-\frac{1}{4\varphi^2}g^{sj}\left(\de{\varphi}/de{x^j}\hat{S}_{hqp}\hat{S}^{hqp}g_{is} + \de{\varphi}/de{x^s}\hat{S}_{hqp}\hat{S}^{hqp}g_{ij} - \de{\varphi}/de{x^i}\hat{S}_{hqp}\hat{S}^{hqp}g_{sj}\right)=\\ = -\frac{1}{\varphi^3}\de{\varphi}/de{x^s}\tilde\Sigma^{\;\;s)}_{(i} + \frac{1}{\varphi^4}\de{\varphi}/de{x^i}V\/(\varphi) +\frac{1}{2\varphi^2}\de{\varphi}/de{x^i}\hat{S}_{hqp}\hat{S}^{hqp} \end{split} \end{equation} Collecting equations \eqref{B.4}, \eqref{B.5} and \eqref{B.6} and recalling that $\Sigma=\tilde\Sigma - \varphi\hat{S}_{hqp}\hat{S}^{hqp}$, we have then \begin{equation}\label{B.7} \begin{split} \bar{\nabla}^j\/T_{ij}=\frac{1}{\varphi^2}\left(\tilde{\nabla}^j\tilde\Sigma_{(ij)} -\frac{\varphi}{2}\tilde\nabla_i\hat{S}_{hqp}\hat{S}^{hqp}\right) + \frac{\varphi_i}{\varphi^3}\left[-\frac{1}{2}\Sigma + \frac{3}{\varphi}V(\varphi) - V'(\varphi)\right]= \\ = \frac{1}{\varphi^2}\left(\tilde{\nabla}^j\tilde\Sigma_{(ij)} -\frac{\varphi}{2}\tilde\nabla_i\hat{S}_{hqp}\hat{S}^{hqp}\right) \end{split} \end{equation} because the identity $-\frac{1}{2}\Sigma + \frac{3}{\varphi}V(\varphi) - V'(\varphi)=0$ holds identically, being equivalent to the definition $\varphi=f'(F(\Sigma))$ \cite{CCSV1}. \section{Conclusion} In the present paper we started from gravitational theories of the $f(R)$ type in which the Ricci scalar $R$ contains torsion as well as the metric field, considering them in the case in which the spacetime is filled with Dirac matter fields; we have employed these models to study anisotropic cosmological models BI: with respect to Einstein gravity, additional gravitational effects arise as a consequence of the non-linearity of the $f(R)$ function and the natural torsion-spin coupling, known to induce centrifugal barriers \cite{s-s}. These non-linear and repulsive centrifugal effects may enforce one another, especially in the case anisotropies are considered; the relationship between torsion and anisotropies of the spacetime has been widely investigated in the literature, and recent accounts are for example those in \cite{m-b,s-r}). \\ An important issue that must be highlighted is the fact that, despite the anisotropic background, the Einstein tensor is diagonal, while, because of the intrinsic features of the spinor field, the energy tensor is not diagonal: in this circumstance the non-diagonal part of the gravitational field equations results into the constraints \eqref{3.11} characterizing the structure of the spacetime or the helicity of the spinor field or both; in our understanding, the only physically meaningful situation is the one in which two axes are equal and one spatial component of the axial vector torsion does not vanish, giving rise to a universe that is spatially shaped as an ellipsoid of rotation revolving about the only axis along which the spin density is not equal to zero. It is also worth noticing that for the specific form of the Dirac field we have chosen, although the Dirac field is massive, still we have the conservation of the axial current; on the other hand it is widely known that quantum corrections might produce additional terms for which the axial current is not conserved anymore, if the Dirac field is charged or if additional terms in the dynamics are introduced, and then it would be interesting to see what are the effects that constraints such as \eqref{3.11} have on these partially conserved axial currents. However because in the present paper we do not deal with quantum corrections, nor with charged fields or extensions in the electrodynamics, addressing this issue would bring us far from the main aim of this work, and we refer the reader to \cite{m/3} and references therein for a discussion on this subject. \\ The field equations have been worked out in both the Jordan and the Einstein frames, showing that the procedure to get solutions proposed in \cite{Saha1,Saha2} applies also here, independently of the given function $f(R)$. In particular, in the Einstein frame the Einstein-like equations result in general to be quadratically integrable. \\ As it may have been expected, these models are intrinsically quite difficult to study in general; we have focused on some specific examples for which the field equations are considerably simplified: we have studied the model $f(R)=R^4$, where exact analytic solutions are found, although this model does not reproduce the physical content we expect; we have further studied the model $f(R)=R+\alpha R^2$, for which exact analytic solutions are very difficult to obtain; therefore, in this case, we have first worked in the Jordan frame, finding solutions that are analytic but approximated to the first order of $\alpha$, and then we have moved in the Einstein frame, employing numerical methods. In the case $f(R)=R^4$, we have found that the universe does not display isotropization, as it should, and there is no correct Einstein limit; in the $f(R)=R+\alpha R^2$ case, we have found that the results obtained in the two frames coincide, and they yield Einsteinization and isotropization, that is as the cosmic time goes by both the non-linearities of the $f(R)$ function and the anisotropies of the background tend to vanish. In both models however, no initial singularity for the volume of the universe has been found at all. \\ Finally, we have proved that the proposed method for solving the field equations is consistent with the assignment of initial data, clarifying the role of the previously stated general conservation laws \cite{FV1} in preserving the Hamiltonian constraint. \\ To summarize, we have found here three types of results: first of all, from the point of view of the behaviour of the universe, our results may help to solve one of the major conceptual problems of $f(R)$ theories related to the arbitrariness of the $f(R)$ function; indeed, as we have shown, a given $f(R)$ may either give rise to physics with no good behaviour or yield physics with good behaviour; thus although there is no theoretical reason to prefer a particular form for the $f(R)$ function instead of another, nevertheless there are phenomenological set-ups in which selection rules discriminating physical $f(R)$ function from infeasible ones may be established. Secondly, universes that have finite initial volume are possible, and therefore the gravitational singularity problem receives new fuel from the present discussion. Finally, the initial values problem is shown to be well formulated. \\ The general outlook emerging from our study encourages to pursue the study of torsional $f(R)$ models, since their field equations and conservation laws are well defined and consistent with the assignment of initial data, while the presence of torsion can remove the singularity problem, and there are hopes that a possible specific $f(R)$ may be selected which will be able to describe what is missing in the evolution of the universe as we know it.
1,108,101,565,884
arxiv
\section{Introduction} Despite the impressive and unquestioned empirical success of quantum theory, the physical meaning of its basic object, the wave function, is still controversial. The standard---or Copenhagen---interpretation of quantum theory asserts that the wave function embodies the most complete description possible of the state of a physical system, while connecting it with experience, and thereby assigning to it physical significance, only via a set of rules for calculating probabilities of results of ``measurements.'' It seems essential within the standard interpretation that ``measurements'' be distinguished from other physical processes, and that attention be paid to the fact that the theory makes predictions {\it only}\/ about results of ``measurements'': otherwise one runs into the well-known measurement problem or, more pictorially, the paradox of Schr\"odinger's cat. In any case, the fundamental role of ``measurements'' (which is sometimes shifted to ``observers'') in the Copenhagen interpretation leads first of all to the theory's not being well-formulated as a fundamental (as opposed to phenomenological) theory because what constitutes a ``measurement'' is not specified. Secondly, with regard to cosmology, the necessity to invoke an outside measurement apparatus or observer seems rather awkward. (For extraordinarily clear presentations of the problems of quantum theory as well as of possible solutions see \cite{Bell,Bellmeas,Albert}.) An alternative interpretation resp.\ theory agreeing with quantum theory on (most of) its predictions which is not based on the notion of ``measurement'' or ``observer'' is usually called a ``realistic'' interpretation resp.\ theory.\footnote{This is a rather unfortunate term---can ``realism,'' i.e., the belief that there is a material world the description of which is the task of physics, seriously be questioned in physics? See also \cite{Maud95}.} More precisely, we shall understand by a ``realistic quantum theory'' a theory, agreeing with quantum theory on (most of) its predictions, in which it is explicitly specified what the material world is thought to be made of---be it particles or fields or what have you---and how these entities behave. We emphasize that this by no means implies a ``naive realism''; on the contrary, these entities---what Bell called the ``beables'' of the theory---can be rather remote from our perception of the world. Moreover, the performance of experiments may disturb the behavior of the beables, so that the ``observed'' properties of matter may be quite different from those left ``unobserved.'' In nonrelativistic quantum theory there are two principal routes for setting up a realistic quantum theory: Either the wave function is not the complete description of the state of a physical system, but must be supplemented by some further quantities, commonly (and unfortunately) called ``hidden variables,'' or the unitary evolution of the wave function must be modified. The paradigmatic example of the first route is Bohmian mechanics \cite{Bohm52,DGZ92a}, that of the second route the theory of Ghirardi, Rimini, and Weber (GRW) \cite{GRW}. We shall call a realistic quantum theory of the first kind a ``Bohmian theory.'' Our objective is to find a Lorentz invariant Bohmian theory which extends Bohmian mechanics, i.e., which leads to Bohmian mechanics in the nonrelativistic limit. \bigskip For systems of a single particle, a Lorentz invariant Bohmian theory is immediately specified \cite{Bohm53,BohmHiley,Holland}: the beables are the wave function $\psi(x^\mu )$ and a particle path, which may be specified as an integral curve of a 4-vector field $j^\mu$ (for example, of the current naturally associated with the Klein-Gordon or Dirac wave function) \begin{equation} \label{1partbohm} \frac{dX^\mu}{ds} = j^\mu (X^\mu) .\end{equation} Multiplication of $j^\mu$ by a positive scalar field $a(x^\mu )$ changes only the parametrization, not the path, understood as the equivalence class of curves $X^\mu : {\rm I\! R}\to {\rm I\! R}^4$, $s\mapsto X^\mu(s)$ differing only in their parametrization, or as the image $X^\mu ({\rm I\! R})$ of a curve $X^\mu$, i.e., a 1-dimensional subset of ${\rm I\! R}^4$. If $j^\mu$ is everywhere timelike, i.e., if $j_\mu j^\mu >0$ with the sign convention for the metric $g_{00}=1$, $g_{11}= g_{22}= g_{33}=-1$, a parametrization by proper time may be obtained by replacing Eqn.\ (\ref{1partbohm}) by $\displaystyle \frac{dX^\mu}{d\tau } = u^\mu (X^\mu)$ with the 4-velocity $u=aj$, $a=(j_\mu j^\mu )^{- 1/2}$. In general there is no distinguished parametrization, and the parametrization chosen in writing Eqn.\ (\ref{1partbohm}) has no physical significance as such: all equations of the form $\displaystyle \frac{dX^\mu}{ds } = a(X^\mu )j^\mu (X^\mu)$ with different $a$ are physically equivalent. The Dirac current $j^\mu = \bar \psi \gamma ^\mu\psi $ is a timelike future-oriented vector; thus the curves which are solutions of (\ref{1partbohm}) run from $t=-\infty$ to $t=+\infty$, never backwards in time, with velocity everywhere bounded by $c$. In particular, every path crosses every $t=$const.-hyperplane of every Lorentz frame of reference---or, indeed, every spacelike hypersurface---exactly once, and thus there is a one-to-one correspondence between paths and points---their crossing points---on an arbitrary spacelike hypersurface. Because the Dirac current is divergence free, it allows moreover for a straightforward introduction of a dynamically distinguished measure on the set of particle paths as follows: In an arbitrary Lorentz frame, take $\rho = j^0 = \psi ^\dagger \psi$ as the density of crossings through a $t=t_0$-hyperplane at an arbitrary time $t= t_0$.\footnote{If $\int j^0dx^1dx^2dx^3 <\infty$, we may normalize the measure by replacing $j$ by $aj$ with $a^{-1}=\int j^0dx^1dx^2dx^3 $ to obtain a probability measure.} Then the density of crossings $\rho$ arising from (\ref{1partbohm}) satisfies $\rho = j^0$ at all times in this frame, i.e., $j^0=\psi ^\dagger \psi$ is an ``equivariant'' density. Furthermore, ``quantum equilibrium'' $\rho = j^0$ holds then in {\it all}\/ Lorentz frames at all times. The distribution $\rho = j^0 = \psi ^\dagger \psi$ is hence the relativistic generalization of the ``quantum equilibrium distribution'' $\rho =|\psi |^2$ of nonrelativistic Bohmian mechanics, which is the essential tool for the derivation of the nonrelativistic quantum formalism \cite{DGZ92a}. In fact, any divergence free current $j^\mu$, in particular also the Klein-Gordon current which is in general not globally timelike, gives rise to a natural measure on the set of trajectories which are integral curves of $j^\mu$ (i.e., solutions of (\ref{1partbohm})), in a way extending the above definition of a natural measure for the Bohm-Dirac theory. Moreover, the fact that Klein-Gordon trajectories possibly ``run backwards in time'' may well be viewed as naturally describing pair creation and annihilation. We shall discuss these topics in a subsequent work. \bigskip For systems of more than one particle, it is not at all obvious how to construct a Lorentz invariant realistic quantum theory, in fact it is not even clear whether this is possible at all. The problem is due to the unavoidable nonlocality of any realistic (or, more accurately, of any precise (\cite{Bell}, pp.\ 171, 194)) version of quantum theory: The incompleteness argument of Einstein, Podolsky, and Rosen (EPR) \cite{EPR} together with the analysis of Bell (\cite{Bell}, Chapter 2)\footnote{For particularly clear presentations see also \cite{Bell}, Chapter 16, as well as \cite{Albert,Maudlin}.} shows that every theory giving the quantum mechanical predictions must be nonlocal. This obviously conflicts with what is often considered to be the essence of Einsteinian relativity---the locality of physical interactions. The requirement of the Lorentz invariance of a physical theory, however, doesn't force locality. Thus a nonlocal Lorentz invariant theory is certainly possible. This is already rather clear from the meaning of the terms: While ``Lorentz invariance'' describes the behavior of a theory under certain transformations of reference frame, the term ``locality'' conveys that there is no action-at-a-distance. For an exhaustive discussion, see \cite{Maudlin}. An interesting classical example is the action-at-a-distance theory of Schwarzschild-Tetrode-Fokker-Wheeler-Feynman (see \cite{WF} and the references therein) replacing classical electrodynamics: In this Lorentz invariant theory the point charges interact directly with each other (on forward and backward light cones)---in a manner unmediated by an electromagnetic field, which is not a fundamental entity here. Bohmian mechanics \cite{Bohm52,DGZ92a} is manifestly nonlocal: the velocity of a particle at time $t$ depends in general upon the positions of all the other particles at that time \begin{equation} \label{guidlaw} {\bf v} _k ({\bf q}_1,\dots , {\bf q}_N,t) = \frac \hbar {m_k} \, {\rm Im}\, \frac {\nabla _k \psi _t({\bf q}_1,\dots , {\bf q}_N)}{ \psi _t({\bf q}_1,\dots , {\bf q}_N)} . \end{equation} In contrast to Newtonian mechanics, where for realistic interactions the instantaneous influence of the other particles decreases with increasing distance, and therefore widely separated systems are (in an certain sense) approximately independent, for Bohmian mechanics the spatial distance between the particles is irrelevant so long as the wave function of the entire system has a suitably entangled form. For a system of many Dirac particles, Bohm \cite{Bohmreview,BohmHiley} has proposed the following guiding condition \begin{equation} \label{bohmdirac} {\bf v} _k = \frac{ \psi^\dagger \bbox{\alpha} _k \psi}{\psi ^\dagger \psi} , \end{equation} which is formulated with respect to a certain reference frame, and is in fact not Lorentz invariant. Analogously to the nonrelativistic theory, the quantum flux equation which is a consequence of the many-particle Dirac equation guarantees that $\psi^\dagger \psi$ is an equivariant ensemble density for this dynamical system {\em in the chosen reference frame}, and therefore this theory reproduces the quantum predictions insofar as they derive from the probability density $\psi^\dagger \psi$. These predictions don't contain a trace of the preferred frame: Lorentz invariance holds on the observational, but not on the fundamental level. (The situation is similar for Bohm's quantum field theory \cite{Bohm52,Bohmreview,BohmHiley}.) There have been a number of arguments to the effect that a Bohmian theory must involve a preferred frame of reference, and thus must violate Lorentz invariance. The most interesting such argument has been put forward by Hardy \cite{Hardy}, who by discussing an intriguing experiment---one that we shall discuss in this paper as well, and that has been shown to contain even more surprises (\cite{Hardymax,Shellymax}, and in particular a nonlocality argument in a sense involving but one photon \cite{onephot})---claims to have shown that every realistic quantum theory must possess a preferred frame of reference, and thus that there can be no Lorentz invariant realistic quantum theory. However, because it rests on an unsuitable ``reality criterion'' \cite{Comment,Hreply}, Hardy's argument is wrong. There are even counterexamples to Hardy's argument: the multitime translation invariant formulation of the GRW theory by Bell (\cite{Bell}, Chapter 22) as well as the multitime translation invariant Bohmian theory we present in this paper are realistic models for the discussed experiment without a preferred frame. Furthermore, there is an outline for a relativistic Bohmian quantum field theory, in which a foliation of space-time into spacelike hypersurfaces is an additional beable \cite{DGZ90}. Finally one can find a number of models of relativistic $N$-particle theories with an action-at-a-distance defined in a Lorentz invariant manner, models that therefore have the potential to properly and relativistically describe quantum nonlocality as exhibited in Hardy's experiment. We allude to one such possibility in Section \ref{4}, but shall discuss these models in a subsequent work. No nontrivial Lorentz invariant realistic quantum theory is as yet known, but there is no compelling argument that this should be impossible.\footnote{And the history of the issue of hidden variables, i.e., of the completeness of the description provided by the wave function, should strongly warn us against too readily accepting impossibility claims.} On the contrary, the above mentioned models are steps towards a Lorentz invariant realistic quantum theory. One should, however, be aware that the determination of the empirical predictions of these models may present a difficult problem; in fact, for many models there is in general no reason that quantum equilibrium should hold with respect to any reasonable family of hypersurfaces; thus the statistical analysis will be different from that in nonrelativistic Bohmian mechanics and moreover, presumably, the predictions of such a theory won't agree with (all of) those of quantum theory. Similarly Albert (\cite{Albert}, p.\ 159ff), Bohm and Hiley (\cite{BohmHiley}, Section 12.6), Ghirardi et al.\ \cite{Ghirpar}, and Hardy and Squires \cite{HarSqu} also argue that a Bohmian theory must violate Lorentz invariance because a preferred frame is needed. The above mentioned models without a preferred frame (but with some ``simultaneity'' fixed in a Lorentz invariant way---note that this entails that there always are Lorentz frames in which future events influence the past, in contrast to assumptions in \cite{Albert,BohmHiley,Ghirpar,HarSqu}) show that less is established than claimed. \bigskip This paper is organized as follows: We show in Section \ref{2} that the joint distribution of the particle positions cannot in general agree with the quantum mechanical distribution in all Lorentz frames. This is in contrast to the situation for 1 particle---or, indeed, $N$ independent particles---as explained above. We also discuss why nevertheless the quantum mechanical predictions for performed measurements can be obtained. In Section \ref{3} we present a concrete step towards a Lorentz invariant Bohmian theory: a Bohmian theory invariant under certain limits of Lorentz transformations, limits defining a symmetry that expresses the essence of relativistic space-time---the non-existence of absolute time resp.\ simultaneity. These transformations, which we shall call ``multitime translations,'' have been discussed by Bell in connection with the GRW theory (\cite{Bell}, Chapter 22, and \cite{Belltalk}; Bell calls them ``relative time translations''). In Section \ref{3.1} we describe a multitime translation invariant formulation of Schr\"odinger's equation for systems composed of noninteracting parts. In Section \ref{3.2} we present the corresponding multitime translation invariant Bohmian theory and discuss its statistical properties. In Section \ref{3.3} we apply the general analysis to Hardy's experiment, focusing on how this experiment illustrates the general discussion in Section \ref{2}. We remark that there is no difficulty formulating a Lorentz invariant multitime version of the Dirac equation for a system of noninteracting Dirac particles \cite{wentzel}. However, the corresponding Lorentz invariant Bohmian theory lacks statistical transparency. Indeed, at first sight, Lorentz invariance and statistical transparency appear to be mutually exclusive. See Section \ref{4} for a bit more detail on this, as well as some further reflections on Lorentz invariance. For systems that consist of noninteracting subsystems, Bell has shown that the GRW theory can be reformulated in such a way that it becomes invariant under multitime translation (\cite{Bell}, Chapter 22). He regarded this as an important step towards a genuinely Lorentz invariant precise formulation of quantum theory, declaring that ``And I am particularly struck by the fact that the model is as Lorentz invariant as it could be in the nonrelativistic version. It takes away the ground of my fear that any exact formulation of quantum mechanics must conflict with fundamental Lorentz invariance.'' (\cite{Bell}, p.\ 209). The multitime translation invariant Bohmian theory we discuss in this paper may, perhaps, be regarded as showing that this assertion applies also to Bohmian mechanics. \bigskip For simplicity, we shall put all masses $m_k=1$ and $\hbar =c=1$. \section{Quantum equilibrium cannot hold in all Lorentz frames}\label{2} We consider an arbitrary theory for $N(\geq 2)$ particles, i.e., a (possibly statistical) specification of all possible $N$-tuples of space-time paths for the $N$ particles (for example as given by solutions of a system of differential equations). We shall call each such possible ``history'' an {\it N-path}. We assume that each spacelike hypersurface is crossed exactly once by each trajectory, and consider an arbitrary probability measure $P$ on the $N$-paths. This determines the distribution of crossings $\rho ^\Sigma :\Sigma ^N \to {\rm I\! R}$ for any spacelike hypersurface $\Sigma$. We now want the probabilistic predictions of the theory to agree as far as possible with those of quantum theory. Complete agreement would be straightforward if for any quantum state $\psi$ there were a $P$ such that for all spacelike hypersurfaces $\Sigma$ the distribution of crossings $\rho ^\Sigma$ agrees with the quantum mechanical joint distribution of the (measured) positions on $\Sigma$. For $\Sigma$ a spacelike hyperplane, i.e., a simultaneity plane or constant-time slice of a Lorentz frame $\Lambda$, this is given by ${|\psi^{\Sigma}|}^2$ where $\psi^{\Sigma}=\psi^{\Lambda}$, the wave function in frame $\Lambda$. However, this is not in general possible: \medskip \begin{deflist}{$(*)$} \item[$(*)$]{\it There does not in general exist a probability measure P on N-paths for which the distribution of crossings $\rho^\Sigma$ agrees with the corresponding quantum mechanical distribution on all spacelike hyperplanes $\Sigma$.} \end{deflist} \medskip The field theoretical analogue of this assertion has been conjectured by D\"urr, Goldstein, and Zangh\`{\i} in 1990 \cite{DGZ90}. Samols discusses the equivalent result for his stochastic realistic model of a light cone lattice quantum field theory \cite{Samols}. The caveat ``in general'' refers to the fact that there are exceptional physical situations for which such a $P$ does exist. Consider, for example, 2 independent Dirac particles, i.e., with a wave function that is a product of 1-particle wave functions $\psi = \psi _a \psi_b$ and independent evolutions given by (\ref{1partbohm}): $\frac {dX_k}{ds}=j_k(X_k)$, $j_k^\mu =\bar \psi _k \gamma ^\mu \psi _k$, $k=a,b$. Then, as explained above, if $\rho^{\Sigma_0} = j^0_a j^0_b$ with respect to one spacelike hyperplane $\Sigma_0$, then $\rho ^\Sigma = j^0_a j^0_b$ for all spacelike hyperplanes $\Sigma$. We believe, however, that such exceptional physical situations are rare. The assertion $(*)$ is more or less an immediate consequence of any of the no-hidden-variables-nonlocality theorems---Bell's \cite{Bell}, that of Clauser, Horne, Shimony, Holt \cite{CHSH}, that of Greenberger, Horne, Zeilinger \cite{GHZ} (see also \cite{Mermin}), or what have you---for the spin components of a multiparticle system: By means of a suitable placement of appropriate Stern-Gerlach magnets the inconsistent joint spin correlations can be transformed to (the same) inconsistent joint spatial correlations for particles at different times. Since the existence of a probability measure $P$ on $N$-paths implies the existence and hence the consistency of all crossing distributions, the assertion follows. Since this this is an important result, we shall provide an elaboration using one of the sharpest nonlocality theorems, that of Hardy \cite{Hardy}. It should be clear from our treatment of this example how to arrange the magnets to deal with any other version. Consider the experiment described in Figure \ref{rhopsibild}, which is similar to the EPR-Bohm experiment and which is a slight modification of the experiment discussed by Hardy \cite{Hardy}, which we shall call ``Hardy's experiment.'' A pair of particles is prepared in Hardy's state $\psi =\psi _{\rm Hardy}$, which has, say in frame {\it I}, the form (we write only the ``spin'' part) \begin{eqnarray} \psi_{\rm Hardy} & = & \frac 1{\sqrt 3} \Bigl( |+\rangle _z^a|-\rangle _z^b-\sqrt 2 |-\rangle _x^a|+\rangle _z^b \Bigr) \label{hardy1}\\ & = & \frac 1{\sqrt3} \Bigl( |-\rangle _z^a|+\rangle _z^b - \sqrt 2 |+\rangle _z^a|-\rangle _x^b \Bigr) \label{hardy3}\\ & = & \frac 1{\sqrt 3}\Bigl( |+\rangle _z^a|-\rangle _z^b- |+\rangle _z^a|+\rangle _z^b+|-\rangle _z^a|+\rangle _z^b \Bigr) \label{hardy2}\\ & = & \frac 1{\sqrt{12}} \Bigl( |+\rangle _x^a|+\rangle _x^b - |+\rangle _x^a|-\rangle _x^b - |-\rangle _x^a|+\rangle _x^b - 3 |-\rangle _x^a|-\rangle _x^b \Bigr), \label{hardy4} \end{eqnarray} where $|+\rangle _x,|-\rangle _x$ denote the eigenfunctions of $\sigma _x$ with eigenvalue $+1$ resp.\ $-1$, and $|+\rangle _z,|-\rangle _z$ denote the eigenfunctions of $\sigma _z$ with eigenvalue $+1$ resp.\ $-1$. We have used that $|+\rangle _x=(|+\rangle _z+|- \rangle _z)/\sqrt 2$ and $|-\rangle _x= (|+\rangle _z-|-\rangle _z)/\sqrt 2$. Denoting by $(a,b)_{(x,z)}$ the components of spin in direction $x$ resp.\ $z$ of particle $a$ resp.\ $b$, the following quantum mechanical predictions can be read off from the form of the wave function: \begin{eqnarray} & a_x=+1 \ \Rightarrow \ b_z=-1 & (\mbox{from } (\ref{hardy1}) ) \label{pred1}\\ & b_x=+1 \ \Rightarrow \ a_z=-1 & (\mbox{from } (\ref{hardy3}) ) \label{pred3}\\ & \mbox{not }(a_z=-1 \ \mbox{and} \ b_z=-1) & (\mbox{from } (\ref{hardy2}) ) \label{pred2}\\ & \displaystyle \mbox{Prob}(a_x=+1 \ \mbox{and} \ b_x=+1)= \frac 1{12} \quad & (\mbox{from }(\ref{hardy4}) ) \label{pred4} \end{eqnarray} These predictions are clearly inconsistent for random variables since the last one together with the first two then imply that $\{a_z=-1$ and $b_z=-1\}$ has probability at least 1/12. \begin{figure}[t] \begin{center} \leavevmode \epsfxsize=10.3cm \epsffile{rhopsic.eps} \end{center} \caption{Space-time diagram of the evolution of the wave function in Hardy's experiment. In the shaded regions there are Stern-Gerlach magnets $A_x$, $A_z$, $B_x$, and $B_z$, which split the respective parts of the wave function into the respective eigenfunctions $({|+\rangle ,|- \rangle })_{x,z}^{a,b}$. Three different frames of reference are also drawn.} \label{rhopsibild} \end{figure} Now suppose that the setup is such that after the two particles are widely separated from each other, each of them runs through a Stern-Gerlach magnet $A_x$ resp.\ $B_x$, which splits the respective parts of the wave function into the eigenfunctions $|+\rangle _x^a$ and $|-\rangle _x^a$ resp.\ $|+\rangle _x^b$ and $|-\rangle _x^b$. These parts are later recombined by reverse magnets after which they are lead through a second Stern-Gerlach magnet $A_z$ resp.\ $B_z$, which splits the wave function into the eigenfunctions $|+\rangle _z^a$ and $|- \rangle _z^a$ resp.\ $|+\rangle _z^b$ and $|-\rangle _z^b$. Thus the spin components are (more or less) perfectly correlated with the path variables as indicated in Figure \ref{rhopsibild}, which therefore inherit the inconsistency of the spin components. The assertion follows. We remark that the measurements to which the quantum mechanical predictions refer might well be performed in this way, but with the insertion of photographic plates behind the appropriate Stern-Gerlach magnets. We perhaps should be even more explicit, particularly since we will need later to refer to some of the notation to be developed here. Suppose that there is a theory for 2 particles for which the distributions of crossings $\rho^\Sigma$ agrees with the quantum mechanical distribution for position (measurements), given by $|\psi ^\Sigma |^2$, for all spacelike hyperplanes $\Sigma$. The hyperplanes we shall consider are simultaneity planes in the Lorentz frames {\it I} at $t^I=t^I_1$ and $t^I_2$, {\it II} at $t^{\it II}=0$, and {\it III} at $t^{\it III}=0$, as shown in Figure \ref{rhopsibild}. We shall denote these by $\Sigma ^{\it I}(t^I_1)$, $\Sigma ^{\it II}(0)$, etc.. Furthermore we shall abbreviate $\psi ^{\Sigma ^{\it I}(t^I_1)}$ by $\psi ^I_1$, $\rho ^{\Sigma ^{\it I}(t^I_1)}$ by $\rho ^I_1$, $\psi ^{\Sigma ^{\it II}(0)}$ by $\psi ^{\it II}_0$, $\rho ^{\Sigma ^{\it II}(0)}$ by $\rho ^{\it II}_0$, etc.. Consider the configurational part of the wave function in these Lorentz frames. We shall now regard $|\pm \rangle ^{a,b} _{x,z}$ as representing the appropriate configurational part of the wave function as indicated in Figure \ref{rhopsibild}, with ${\rm supp}\, |\pm \rangle ^{a,b} _{x,z}$ denoting its spatial support.\footnote{It should perhaps be noted that a Dirac spinor which in frame {\it I}\/ is a spin $x$/$z$ eigenfunction will not be a spin $x$/$z$ eigenfunction in the frames {\it II}\/ or {\it III}\/ which are boosted in the $y$-direction. Our notation here should not be construed as implying otherwise.} Then \begin{eqnarray} \psi^I_1 & = & \frac 1{\sqrt{12}} \Bigl( |+\rangle _x^a|+\rangle _x^b - |+\rangle _x^a|-\rangle _x^b - |-\rangle _x^a|+\rangle _x^b - 3 |-\rangle _x^a|-\rangle _x^b \Bigr) \label{psi11} \\ \psi ^{\it II}_0 & = & \frac 1{\sqrt6} \Bigl( |-\rangle _z^a|+\rangle _x^b + |-\rangle _z^a|-\rangle _x^b - 2 |+\rangle _z^a|-\rangle _x^b \Bigr) \label{psi2} \\ \psi ^{\it III}_0 & = & \frac 1{\sqrt 6} \Bigl( |+\rangle _x^a|- \rangle _z^b + |-\rangle _x^a|-\rangle _z^b - 2 |-\rangle _x^a|+\rangle _z^b \Bigr) \label{psi3} \\ \psi ^I_2 & = & \frac 1{\sqrt 3}\Bigl( |+\rangle _z^a|-\rangle _z^b-|+\rangle _z^a|+\rangle _z^b+|-\rangle _z^a|+\rangle _z^b \Bigr) \label{psi12} \end{eqnarray} {From} the assumption that $\rho ^\Sigma = |\psi ^\Sigma |^2$ in all frames, we obtain, from (\ref{psi11}) or (\ref{pred4}), that for the simultaneity surface $\Sigma = \Sigma ^{\it I}(t^I_1)$ \begin{eqnarray} & & \int\limits_{{\rm supp}\, |+\rangle _x^a \times {\rm supp}\, |+\rangle _x^b} \rho^I_1(q_a,q_b)\, dq_a\, dq_b \nonumber\\ & & = \int\limits_{{\rm supp}\, |+\rangle _x^a \times {\rm supp}\, |+\rangle _x^b} |\psi ^I_1(q_a,q_b)|^2 \, dq_a\, dq_b = \frac 1{12} . \label{wid1} \end{eqnarray} For the simultaneity surfaces $\Sigma ^{\it II}(0)$ and $\Sigma ^{\it III}(0)$ we have from (\ref{psi2}) resp.\ (\ref{psi3}) (or (\ref{pred3}) resp.\ (\ref{pred1})) that \begin{eqnarray} \rho^{\it II}_0 (q_a, q_b)=0 & \mbox{ for } & (q_a, q_b) \in {\rm supp}\, |+\rangle _z^a \times {\rm supp}\, |+\rangle _x^b ,\label{wid2} \\ \rho^{\it III}_0 (q_a, q_b)=0 & \mbox{ for } & (q_a, q_b) \in {\rm supp}\, |+\rangle _x^a \times {\rm supp}\, |+\rangle _z^b , \label{wid3} \end{eqnarray} and for $\Sigma = \Sigma ^{\it I}(t^I_2)$ from (\ref{psi12}) or (\ref{pred2}) that \begin{equation} \label{wid4} \rho^{\it I}_2 (q_a, q_b)=0 \ \ \mbox{ for } \ \ (q_a, q_b) \in {\rm supp}\, |-\rangle _z^a\times {\rm supp}\, |-\rangle _z^b .\end{equation} Consider now that part of the ensemble of two-paths containing paths that cross ${\rm supp}\, |+\rangle _x^a \times {\rm supp}\, |+\rangle _x^b $. {From} (\ref{wid1}), this has probability $1/12$. {From} (\ref{wid2}), particle $a$ will be in ${\rm supp}\, |-\rangle _z^a $ at $t^I_2$; from (\ref{wid3}), particle $b$ will be in ${\rm supp}\, |-\rangle _z^b $ at $t^I_2$; thus \[ \int\limits_{{\rm supp}\, |-\rangle _z^a\times {\rm supp}\, |-\rangle _z^b} \rho^I_2 (q_a,q_b)\, dq_a\, dq_b \geq \frac 1{12}, \] in contradiction with (\ref{wid4}). (This argument assumes that, say, the crossing track of particle $a$ for $\Sigma ^{\it I}(t^I_2)$ agrees with that for $\Sigma ^{\it II}(0)$---i.e., that there is no sudden change of track. By a suitable choice of geometry the violation of this assumption can be made as implausible as we like.) \hspace*{\fill} $\Box$ \bigskip We can more briefly, though somewhat imprecisely, rephrase $(*)$ by saying that ``Quantum equilibrium ($\rho=|\psi |^2$) cannot hold in all Lorentz frames.'' Although the notions of the wave function $\psi$ in position representation as well as that of a position measurement are problematical in relativistic quantum theory, the impact of this statement is not thereby diminished. In fact, the statement ``$\rho=|\psi |^2$ cannot hold in all Lorentz frames'' should be understood as follows: The joint distributions given by quantum theory for position measurements (from whatever formalism they arise) cannot in general agree with the distribution of the actual particle positions in all Lorentz frames. This is the case, as pointed out above, already if only the (experimentally well-established) predictions of the distribution of spin measurements---spin is measured, as is any observable, cf.\ \cite{DDGZ}, ultimately by measuring some position in a suitable experiment (here with Stern-Gerlach magnets)---in the singlet state (which is the relevant state for the earlier versions of the nonlocality theorems) are considered. An immediate question is whether this leads, for a theory with trajectories, to experimentally detectable violations of quantum mechanical predictions. That it ain't necessarily so will be illustrated by a concrete model in a later section. But it is already clear from (nonrelativistic) Bohmian mechanics that the validity of $\rho = |\psi |^2$ in just {\it one}\/ frame is sufficient to derive the quantum mechanical predictions for observations at different times: Assume that the frame corresponding to (Newtonian) absolute time---the frame in which quantum equilibrium $\rho=|\psi |^2$ holds for Bohmian mechanics---corresponds to system $I$ in Hardy's experiment in Figure \ref{rhopsibild}. To derive from Bohmian mechanics the correct prediction for the joint distribution of a measurement of $a_x$ and a later measurement of $b_z$, one has to take into account that the actual performance of measuring $a_x$, which requires an intervention such as the suitable insertion of a photographic plate, influences the future evolution of the whole system, and in particular, nonlocally and instantaneously, the future path of particle $b$. This can be conveniently described in terms of the effective ``collapse of the wave function.'' The ``unmeasured'' distributions do not in general give the correct predictions for the outcomes of experiments! For a rather detailed discussion of related matters, see \cite{DGZ92a}, Sections 8--10. Moreover, it is rather clear that any two theories agreeing at all times on the spatial distribution of particles for some frame must be empirically equivalent, though we shall not try here to give a precise formulation of this assertion. We note, however, that for a theory involving a foliation of space-time into hypersurfaces, such as the proposal of D\"urr, Goldstein, and Zanghi \cite{DGZ90}, as well as that of Samols \cite{Samols}, it is natural to demand that ``quantum equilibrium'' hold on these hypersurfaces. For the proposal in section \ref{3.2} of this paper, a theory involving particle interactions that are instantaneous with respect to a specified synchronization, one is lead to demand ``quantum equilibrium'' with respect to this synchronization. That this indeed suffices to recover the quantum mechanical predictions for the outcomes of all joint measurements is implied by the fact that the joint results for any family of measurements can always be transferred to a common place and time---and must be if these results are to be subject to the analysis of a single individual (cf.\ \cite{DGZ90,Samols}, and \cite{DGZ92a}, point 19 on p.\ 900). This suggests that even a suitable kind of ``local quantum equilibrium'' should be sufficient to obtain the standard quantum mechanical predictions. \section{The multitime formalism}\label{3} \subsection{Multitime translation invariance}\label{3.1} Consider a system composed of $n$---we put $n=2$ for simplicity---widely separated subsystems. Even observers who are slowly (``nonrelativistically'') moving relative to each other need not agree on the simultaneity of events in the separated subsystems: let $(t_\alpha , x_\alpha )$, $(t_\beta , x_\beta )$ be the coordinates of the events $\alpha$ resp.\ $\beta$ for observer 1. We may put $t_\alpha=0$, $x_\alpha=0$. The two events are simultaneous, $t_\alpha=t_\beta$, and widely separated from each other, $x_\beta \gg 1$. A second observer, slowly moving in the $x$-direction relative to the first observer, will describe the same events by the following primed coordinates, cf.\ Figure \ref{trafobild}: \begin{eqnarray*} t_\alpha'=t_\alpha=0, & & x_\alpha'=x_\alpha=0,\\ t_\beta'=\gamma (t_\beta-vx_\beta) \approx -\vartheta, & & x_\beta' =\gamma (x_\beta-vt_\beta) \approx x_\beta, \end{eqnarray*} where $v\approx 0$, so that $\gamma = 1/ \sqrt{1-v^2} \approx 1$. It is further assumed that $x_\beta$ is sufficiently large that $vx_\beta =\vartheta$ is of order unity. For observer 2, the events $\alpha$ and $\beta$ are not simultaneous, $t_\alpha' \neq t_\beta'$, not even approximately. More precisely, in the limit in which $x_\beta \to \infty$ and $v\to 0$ in such a manner that $vx_\beta =\vartheta \neq 0$, the Lorentz transformation becomes simply a translation of relative time. Consequently, for the case of a system composed of widely separated subsystems we might demand of a nonrelativistic theory invariance with respect to independent shifts of the zeros of the subsystems' time scales (on subsystem clocks). The relevance of this nonrelativistic residue, or analogue, of Lorentz invariance, especially for the discussion of the possibility of a Lorentz invariant realistic quantum theory, has been pointed out by Bell (\cite{Bell}, Chapter 22, and \cite{Belltalk}). To specify the space-time transformation corresponding to this change in frame of reference, we have to introduce two separate coordinate systems for the two widely separated subsystems $a$ and $b$. On configuration-space-time, the multitime translation is given by \begin{eqnarray} L_\tau :\ {\rm I\! R}\times {\rm I\! R}^{3N_a} \times {\rm I\! R}\times {\rm I\! R}^{3N_b} & \longrightarrow & {\rm I\! R}\times {\rm I\! R}^{3N_a} \times {\rm I\! R}\times {\rm I\! R}^{3N_b}, \ \tau = (\tau_a,\tau_b) \in {\rm I\! R}^2 \nonumber\\ z:=(z_a,z_b):=(t_a,q_a,t_b,q_b) & \longmapsto & (t_a-\tau_a,q_a,t_b-\tau_b ,q_b)=z'=L_\tau z \label{Ltaudef} \end{eqnarray} where $N_a$ and $N_b$ are the particle numbers of the respective subsystems. At first thought, one might not expect a quantum theory to be invariant under $L_\tau$, because absolute time seems necessary to mediate the action-at-a-distance of Schr\"odinger's equation, not to mention the more explicit nonlocality of Bohmian mechanics. Indeed, for the usual Schr\"{o}dinger equation as well as for the GRW model and Bohmian mechanics it would appear that the multitime translation cannot be discussed at all because time appears in the wave function only as common (absolute) time. But if the subsystems $a$ and $b$ are independent, i.e., if there is no interaction potential between the subsystems \begin{eqnarray*} & V(q_a,q_b) = V_a(q_a) + V_b(q_b) & \\ & H=H_a+H_b,\quad H_k= -\frac 12 \Delta _k + V_k ,\quad k=a,b & \end{eqnarray*} so that the Hamiltonians $H_a$ and $H_b$ commute, the Schr\"odinger evolution may be reformulated so that it becomes multitime translation invariant: {From} the ordinary one-time wave function $\psi _t=e^{- iHt}\psi_0=U_t\psi_0$ we define a two-time wave function $\psi (t_a, t_b)\in L^2({\rm I\! R} ^{3N_a})\otimes L^2({\rm I\! R} ^{3N_b}) \cong L^2({\rm I\! R} ^{3(N_a+N_b)})$ \[ \psi (t_a,t_b) = e^{-iH_at_a} e^{-iH_bt_b} \psi _0=U_{t_a}^a U_{t_b}^b \psi_0 \] satisfying two separate Schr\"{o}dinger equations \begin{equation} \label{sgl2} i\, \frac{\partial \psi}{\partial {t_a}} =H_a \psi,\quad i\, \frac{\partial \psi }{\partial {t_b}} = H_b \psi . \end{equation} This system of partial differential equations, with $\psi$ transforming in the obvious way, \[ \psi (z) = \psi \circ L_\tau ^{-1} (L_\tau z) =: \psi '(z') \] is invariant under $L_\tau$. In particular, the unitary representation of the group of multitime translations is given by $U_\tau = U^a_{\tau_a}U^b _{\tau_b}$: \begin{equation} \label{Utaudef} \psi ' = e^{-iH_a\tau_a }e^{-iH_b\tau_b }\psi = U^a _{\tau_a}U^b _{\tau_b} \psi = U_\tau \psi \end{equation} \begin{figure}[t] \begin{center} \leavevmode \epsfxsize=7.2cm \epsffile{trafo.eps} \end{center} \caption{$\alpha$ and $\beta$ are two widely separated events. In the primed frame of reference, corresponding to a slowly moving observer, these two events are not simultaneous.} \label{trafobild} \end{figure} Note that in any frame of reference given by a particular synchronization of the subsystem times, i.e., where $t_a=s$ is simultaneous with $t_b=s+h$, the wave function in ``frame $h$,'' which is given by $\psi^{h}_t=\psi(t,t+h)$ and recognized as $\psi^h_t= \psi '(t,t)$ for a multitime shift by $\tau = (0,h)$ from the unprimed frame, satisfies the one-time Schr\"{o}dinger equation. It is also easy to see that the transition to a two-time wave function transforms the usual quantum measurement formalism into a multitime translation invariant form. We shall use here the Heisenberg picture for convenience as well as for analogy with relativistic quantum theory. Let $\psi =\psi_0=\psi (0,0)$ be the Heisenberg state of the system, and consider a sequence of observables $(M^a_j)_{1\leq j\leq k}$ and $(M^b_j)_{1\leq j\leq \ell }$, which are measured at times $t^a_1<\dots < t^a_{k}$ resp.\ $t^b_1<\dots <t^b_{\ell}$. Here $M^a_j$ acts only on system $a$, i.e., $M^a_j= O^a_j\otimes \openone$ with observables $O^a_j$ on $L^2({\rm I\! R} ^{3N_a})$, and $M^b_j$ acts only on system $b$, i.e., $M^b_j= \openone \otimes O^b_j$ with observables $O^b_j$ on $L^2({\rm I\! R} ^{3N_b})$. Thus the observables and the unitary evolution of system $a$, $M^a_j$ and $U^a_{t_a}=e^{-iH_at_a}$, commute with the observables and the unitary evolution of system $b$, $M^b_j$ and $ U^b_{t_b}=e^{-iH_bt_b}$: for all $j,j',t_a,t_b,$ \begin{equation} [M^a_j,M^b_{j'}]=0, \quad [M^a_j, U^b_{t_b}]=0, \quad [M^b_j, U^a_{t_a}]=0, \quad [U^a_{t_a}, U^b_{t_b}]=0. \label{commute} \end{equation} We shall assume for simplicity that all the observables $M^a_j$ and $M^b_j$ have discrete spectrum and denote by $\pi ^a_{j,\alpha }$ resp.\ $\pi ^b_{j,\beta }$ the projection operator onto the eigenspace of $M^a_j$ resp.\ $M^b_j$ corresponding to the eigenvalue $\alpha$ resp.\ $\beta$. We introduce the Heisenberg operators \[ \pi^a_{j,\alpha}(t_a) := U^a_{- t_a}\, \pi ^a_{j,\alpha}\, U^a_{t_a} \quad \mbox{and} \quad \pi^b_{j,\beta}(t_b) := U^b_{-t_b}\, \pi ^b_{j,\beta}\, U^b_{t_b}, \] which, by (\ref{commute}), agree with the usual ones involving the full evolution $U_t=U^a_tU^b_t$. The joint probability for obtaining the measurement results $M^a_1=\alpha_1$, \dots , $M^a_{k}=\alpha_{k}$, $M^b_1=\beta_1$, \dots , $M^b_{\ell}=\beta_{\ell}$ is given by \begin{eqnarray} P ( M^a_1=\alpha_1, \dots , M^a_{k}=\alpha_{k}, M^b_1=\beta_1, \dots , M^b_{\ell}=\beta_{\ell}\Bigr) & & \nonumber \\ = \| \pi ^b_{\ell ,\beta_{\ell}}(t^b_{\ell}) \dots \pi ^b_{1,\beta_{1}} (t^b_{1}) \, \pi ^a_{k,\alpha_{k}} (t^a_{k}) \dots \pi ^a_{1,\alpha_{1}} (t^a_{1}) \, \psi \| ^2 .& & \label{measform} \end{eqnarray} Considering that under a multitime translation the Heisenberg operators transform as \begin{eqnarray*} \pi^a_{j,\alpha}(t_a') & = & U^a_{-t'_a}\, \pi ^a_{j,\alpha}\, U^a_{t'_a} = U^a_{\tau_a}\, \pi ^a_{j,\alpha}(t_a)\, U^a_{-\tau_a} \ = \ U_\tau \, \pi ^a_{j,\alpha}(t_a)\, U_\tau^{-1} \\ \pi^b_{j,\beta}(t_b') & = & U^b_{-t'_b}\, \pi ^b_{j,\beta}\, U^b_{t'_b} = U^b_{\tau_b}\, \pi ^b_{j,\beta}(t_b)\, U^b_{-\tau_b} \ = \ U_\tau \, \pi ^b_{j,\beta}(t_b)\, U_\tau^{-1} \end{eqnarray*} and the state transforms according to (\ref{Utaudef}), one sees that the formula (\ref{measform}) is in fact multitime translation invariant. In particular, the predictions of the quantum measurement formalism are independent of the frame of reference. Thus the quantum mechanical measurement formalism for a system which consists of independent widely separated subsystems is multitime translation invariant. Note also that the probability of obtaining the results $M^a_{i}=\alpha_{i}$, $M^b_{j}=\beta_{j}$ {\it given}\/ the results $M^a_1=\alpha_1$, \dots , $M^a_{i-1}=\alpha_{i-1}$, $M^b_1=\beta_1$, \dots , $M^b_{j-1}=\beta_{j- 1}$, \[ \frac{ \| \pi ^b_{j,\beta_{j}} (t^b_{j}) \dots \pi ^b_{1,\beta_{1}} (t^b_{1})\, \pi ^a_{i,\alpha_{i}} (t^a_{i}) \dots \pi ^a_{1,\alpha_{1}} (t^a_{1})\, \psi \| ^2 }{ \| \pi ^b_{j-1,\beta_{j-1}} (t^b_{j-1}) \dots \pi ^b_{1,\beta_{1}} (t^b_{1})\, \pi ^a_{i-1,\alpha_{i-1}} (t^a_{i-1}) \dots \pi ^a_{1, \alpha_{1}} (t^a_{1}) \psi \| ^2 } \] can be conveniently expressed as \[ \| \pi ^b_{j ,\beta_{j}} ( t^b_{j}) \, \pi ^a_{i,\alpha_{i}} ( t^a_{i}) \psi_{\rm eff} \| ^2 \] with the ``collapsed wave function'' \[ \psi_{\rm eff} = \frac{ \pi ^b_{j-1, \beta_{j-1}} (t^b_{j-1}) \dots \pi ^b_{1, \beta_{1}} (t^b_{1}) \, \pi ^a_{i-1, \alpha_{i-1}} (t^a_{i-1}) \dots \pi ^a_{1,\alpha_{1}} (t^a_{1}) \, \psi }{\| \pi ^b_{j-1, \beta_{j-1}} (t^b_{j-1}) \dots \pi ^b_{1, \beta_{1}} (t^b_{1}) \, \pi ^a_{i-1, \alpha_{i-1}} (t^a_{i-1}) \dots \pi ^a_{1,\alpha_{1}} (t^a_{1}) \, \psi \| } . \] (We find an analogous formula if we condition on a smaller initial segment.) Within this framework an EPR experiment can be described---the subsystems, while not explicitly interacting, are coupled by their common wave function $\psi (t_a,t_b)$---and one can explicitly see, for this two-time yet orthodox model, that {\it the EPR-Bell nonlocality does not demand the existence of a preferred frame of reference.} Despite the presence of EPR-correlations, these do not permit the transmission of ``signals'': {From} the results of measurements on system $a$ alone, one can draw no inference about the possible interventions on system $b$---the kinds of experiments performed on system $b$. The crucial assumption responsible for this property is the commutativity (\ref{commute}). In axiomatic quantum field theory the analogue of this assumption, namely the commutativity of Heisenberg operators corresponding to measurements in spacelike separated regions, is one of the fundamental postulates, sometimes called ``local commutativity'' or ``microscopic causality'' (see for example \cite{SW}). It conveys that experiments in spacelike separated regions do not disturb each other, so that relativistic causality is not violated. However, it is important to recognize (as well as all too rare) that EPR and Bell have shown that the quantum correlations between observables for which ``local commutativity'' holds cannot in general be explained by a local theory! Bell has shown that the GRW model can also be formulated in a multitime translation invariant manner (\cite{Bell}, Chapter 22). Bell's result is sometimes regarded as indicating that the GRW theory is superior to Bohmian mechanics with respect to the problem of finding a Lorentz invariant extension. In the next section we show that such a conclusion is perhaps unfounded. \subsection{A multitime translation invariant Bohmian theory}\label{3.2} We formulate a multitime Bohmian theory that is invariant under multitime translation. Consider a system consisting of $n$ widely separated subsystems, as described in Section \ref{3.1}, with an $n$-time wave function satisfying (the analogue of) (\ref{sgl2}). As usual, we shall for simplicity put $n=2$. We shall denote again by $N_a$ and $N_b$ the particle numbers in the subsystems and put $N=N_a+N_b$. The beables of the multitime Bohmian theory are first of all the usual beables of a Bohmian theory, namely the wave function, here the two-time wave function, and the trajectories of the particle configuration in the two subsytems, $Q_a(t)$ and $Q_b(t)$. The straightforward way to formulate a multitime translation invariant Bohmian theory for the evolution of these paths is to introduce as an additional beable a synchronization: a path in two-time ${\rm I\! R}^2$, i.e., an equivalence class of maps $(T_a,T_b): {\rm I\! R}\to {\rm I\! R}^2$, $s\mapsto (T_a(s), T_b(s))$ differing only in their parametrization. The synchronization together with the subsystem trajectories defines a {\it synchronized N-path}\/ in configuration-space-time parametrized by $s$ \[ \Bigl( T_{a}(s),\, Q_{a}(s),\, T_b(s), \, Q_b(s)\Bigr) =: Z(s) \] with $Q_{a}(s)\equiv Q_{a}(T_a(s)),\ Q_b(s)\equiv Q_b(T_b(s))$. We prescribe for the synchronized $N$-path the following guiding equation \begin{eqnarray} \frac { dT_a}{ds} = 1,& & \frac { dT_b}{ds} =1, \nonumber\\ \frac { dQ_a}{ds} = v_a^{\psi }(Z),& & \frac{ dQ_b}{ds}=v_b^\psi (Z), \label{bohm2} \end{eqnarray} with $v_a^\psi $ and $v_b^\psi $ given as usual by \begin{equation} \label{velfield2} v_a^\psi = {\mbox {Im}}\, \frac {\nabla _{q_a} \, \psi}{\psi}, \quad v_b^\psi = {\mbox {Im}}\, \frac {\nabla _{q_b} \, \psi}{\psi}. \end{equation} {\it The Bohmian theory given by the Eqs.\ (\ref{sgl2}, \ref{bohm2}, \ref{velfield2}) does not have a preferred ``frame of reference,'' and is obviously invariant under} $L_\tau $, i.e., if $(\psi , \, Z)$ is a solution of (\ref{sgl2}, \ref{bohm2}), then so is $(\psi ',\, Z')=(\psi \circ L_\tau ^{-1}, L_\tau \circ Z)$. The parameter $s$ labels the synchronization with respect to which the nonlocal interaction is mediated: The velocity of system $a$ at the parameter value $s$ depends, through $\psi(t_a,q_a,t_b,q_b)$, upon the configuration of the $a$-system at time $T_a(s)$---more precisely, upon $Q_{a}(s)$ and $T_a(s)$---as well as on the configuration $Q_b(s)$ and the time $T_b(s)$ of the $b$-system corresponding to parameter value $s$. In particular, the velocity ``field'' is a functional of the two-time wave function at the appropriate times. Physical significance pertains only to the synchronized $N$-path $Z({\rm I\! R})\subset {\rm I\! R}^{2+3N}$, not to the particular parametrization determined by (\ref{bohm2}). Thus, just as with Eqn.\ (\ref{1partbohm}), (\ref{bohm2}) is physically equivalent to all equations of the form $\displaystyle \frac {dZ}{ds} = A(Z) (1,v_a^\psi (Z) ,1,v_b^\psi(Z))$ with arbitrary positive functions $A$ on ${\rm I\! R}^{2+3N}$. For the statistical analysis of this theory, it is natural to look for a distinguished measure. As a consequence of (\ref{sgl2}), we have the two identities which have the form of continuity equations \begin{equation} \label{ce1} \frac{\partial |\psi |^2 }{\partial t_a}+ {\rm div} _{q_a} j_a^\psi =0 \quad \mbox{or} \quad {\rm div} _{z_a} {J_a^\psi}=0 \end{equation} and \begin{equation} \label{ce2} \frac{\partial |\psi |^2}{\partial t_b}+ {\rm div} _{q_b}j_b^\psi =0 \quad \mbox{or} \quad {\rm div} _{z_b} {J_b^\psi }=0 \end{equation} with $J_k^\psi =(|\psi |^2, j_k^\psi )$ and $j_k^\psi = |\psi |^2 \, v_k^\psi = {\rm Im}\, (\psi ^* \, \nabla_k \, \psi )$, $k=a,b$. By analogy with the statistical analysis of the usual Bohmian mechanics, it might at first glance seem appropriate to seek a stationary measure for $Z$, i.e., for the dynamical system given by Eqs.\ (\ref{bohm2}, \ref{velfield2}). The continuity equation for this dynamical system, for a (continuously differentiable) density $f: {\rm I\! R} \times {\rm I\! R}^{2+3N} \to {\rm I\! R}$, \begin{equation} \label{contz} \frac{\partial f}{\partial s} + {\rm div} _{z_a} (fw_a^\psi ) + {\rm div} _{z_b} (fw_b^\psi ) =0 \end{equation} with $w_k^\psi := (1,v_k^\psi )$, is, by (\ref{ce1}) and (\ref{ce2}), solved (trivially) by $f=|\psi |^2$, which is stationary with respect to the synchronization parameter $s$. Hence $|\psi |^2$ is certainly a distinguished measure on the space ${\rm I\! R}^{2+3N}$ of initial values for Eqs.\ (\ref{bohm2}). But it is not normalizable (by unitarity); moreover, for the dynamical system given by Eqs.\ (\ref{bohm2}, \ref{velfield2}) there can be no density, normalizable on ${\rm I\! R}^{2+3N}$, that is stationary with respect to the evolution parameter $s$---since a stationary measure for $Z$ yields a stationary marginal measure for $T_a$, and by the first of Eqs.\ (\ref{bohm2}) all stationary measures for $T_a$ must be proportional to the Lebesgue measure on ${\rm I\! R}$. In this regard it is also important to recognize that a general probability density $f(s)$ satisfying (\ref{contz}), while defining a probability measure on $N$-paths, does not itself directly correspond to any clear statistical property of this ensemble of $N$-paths, such as the distributions of crossings discussed in Section \ref{2}. Recall now that $s$ labels the synchronization, and recall as well the suggestion that ``quantum equilibrium'' should hold on ``simultaneity surfaces'' \cite{DGZ90}. Thus we proceed as follows: We first fix initial values for the subsystem times $T_a(0)=:s_0$ and $T_b(0):= s_0+h$. Then the evolution equations for $T_a$ and $T_b$ may be solved to obtain $T_a(s)=s_0+s$ and $T_b(s)=s_0+h+s$. The constant of motion $h= T_b(0) -T_a(0)\, (= T_b(s) - T_a(s)$ for all $s$) defines the synchronization---the velocity of system $a$ at a time $t_a$ depends upon the configuration $Q_b$ at time $t_b=t_a+h$. (As it happens, to this fixed synchronization we may associate a (Lorentz) frame of reference in which the interaction between the systems is ``instantaneous.'' However, this associated frame is merely a convenience, one that for more than four subsystems it would typically be impossible to retain.) Now the subsystem times in Eqn.\ (\ref{bohm2}) may be eliminated: With \[ \psi^{h}(s,q_a,q_b) := \psi (T_a(s),q_a, T_b(s),q_b) = \psi (s_0+s,q_a, s_0+h+s,q_b) \] one obtains \begin{eqnarray*} \frac { dQ_a}{ds} & = & v_a^{\psi ^h}(s,Q_a(s),Q_b(s)), \\ \frac{ dQ_b}{ds} & = & v_b^{\psi^h}(s,Q_a(s),Q_b(s)).\end{eqnarray*} This is the usual Bohmian mechanics relative to the synchronization given by $h$; we have the continuity equation \begin{equation} \label{konth} \frac{\partial {\rho^h}}{\partial s} + {\rm div}_{q_a} (\rho^h \, v_a^{\psi^h}) + {\rm div}_{q_b} (\rho^h \, v_b^{\psi^h}) =0,\end{equation} and the density $\rho^h = |\psi ^h|^2$ is ``equivariant,'' i.e., if $\rho ^h (s_0) =|\psi^h (s_0)|^2$ for some $s=s_0$, then $\rho ^h (s)=|\psi ^h(s)|^2$ for all $s$. For $\psi ^h(s)\in L^2({\rm I\! R} ^{3N})$, this density is normalizable, and gives the distribution of crossings of any hypersurface corresponding to the times $T_a(s)$ and $T_b(s)$ for the ensemble of $N$-paths defined by $\psi ^h$. \subsection{Hardy's experiment in multitime translation invariant \protect\\ Bohmian theory}\label{3.3} We describe now the particle trajectories in Hardy's experiment for the multitime translation invariant Bohmian theory given by Eqs.\ (\ref{sgl2}, \ref{bohm2}, \ref{velfield2}) (with $N_a=N_b=1$ and the Stern-Gerlach magnets treated, as usual, as external fields). We prepare a system of two particles in the quantum state $\psi_{\rm Hardy}$ (\ref{hardy1}). After the particles are widely separated from each other, we perform Hardy's experiment, cf.\ Figure \ref{rhopsibild}, focusing on the part of the experiment in which the particles run through the Stern-Gerlach magnets $A_z$, $B_z$, cf.\ Figure \ref{hardy2bild}. \begin{figure}[t] \begin{center} \leavevmode \epsfxsize=10.3cm \epsffile{hardy2.eps} \end{center} \caption{Hardy's experiment in multitime formalism.} \label{hardy2bild} \end{figure} First we describe the development of the synchronized paths for initial values of the subsystem times $T_a(0)=-\vartheta$ and $T_b(0)= h^{\it II}=-2\vartheta$ for a $\vartheta >0$, not too large, referring to scales for the subsystem times $t_a$ and $t_b$ as defined in Figure \ref{hardy2bild}. This gives a synchronization corresponding to the frame {\it II}\/ in Figure \ref{rhopsibild}. Consider those two-paths for which at the value $s=0$ of the synchronization parameter, particle $a$ is located in ${\rm supp}\, |+\rangle _x^a$ and particle $b$ is located in ${\rm supp}\, |+\rangle _x^b$. Demanding $\rho ^{h^{\it II}}(0)= |\psi ^{h^{\it II}}(0)|^2$, these are 1/12 of all two-paths. After particle $a$ has gone through the apparatus $A_z$, it must be located in ${\rm supp}\, |-\rangle _z^a$ since e.g.\ $\rho ^{h^{\it II}}(3\vartheta /2)= |\psi ^{h^{\it II}}(3\vartheta /2)|^2$, cf.\ Eqn.\ (\ref{psi2}). After particle $b$ has run through the apparatus $B_z$, it must be located in ${\rm supp}\, |+\rangle _z^b$ since e.g.\ $\rho ^{h^{\it II}}(5\vartheta /2)= |\psi ^{h^{\it II}}(5\vartheta /2)|^2$, cf.\ Eqn.\ (\ref{psi12}). This course of the particle paths is displayed in Figure \ref{hardyIIbild}, top. \begin{figure}[p] \begin{center} \leavevmode \epsfxsize=10.3cm \epsffile{hardyb.eps} \end{center} \caption{Course of some synchronized two-paths according to the multitime translation invariant Bohmian theory in Hardy's experiment for initial values of subsystem times corresponding to the synchronization {\it II}\/ (top) resp.\ {\it III}\/ (bottom).} \label{hardyIIbild} \end{figure} Now consider the same experiment with different initial values for the subsystem times: $T_a(0)=-2\vartheta$ and $T_b(0)=-\vartheta$, so that $h= h^{\it III}=\vartheta$, a synchronization corresponding to the frame {\it III}\/ in Figure \ref{rhopsibild}. Again consider those two-paths for which at the value $s=0$ of the synchronization parameter, particle $a$ is located in ${\rm supp}\, |+\rangle _x^a$ and particle $b$ is located in ${\rm supp}\, |+\rangle _x^b$. Demanding $\rho ^{h^{\it III}}(0)= |\psi ^{h^{\it III}}(0)|^2$, these are 1/12 of all two-paths. After particle $b$ has gone through $B_z$, it must be located in ${\rm supp}\, |-\rangle _z^b$ since e.g.\ $\rho ^{h^{\it III}}(3\vartheta /2)= |\psi ^{h^{\it III}}(3\vartheta /2)|^2$, cf.\ Eqn.\ (\ref{psi3}). After particle $a$ has run through the apparatus $A_z$, it must be located in ${\rm supp}\, |+\rangle _z^a$ since e.g.\ $\rho ^{h^{\it III}}(5\vartheta /2 )= |\psi ^{h^{\it III}}(5\vartheta /2)|^2$, cf.\ Eqn.\ (\ref{psi12}). This course of the particle paths is displayed in Figure \ref{hardyIIbild}, bottom. In neither case does the distribution of crossings by the two-paths of a hypersurface corresponding to the other synchronization agree for all parameter values $s$ with the corresponding $|\psi ^{h}(s)|^2$. In the first case, $h=h^{\it II}=-\vartheta$, the two-paths run through ${\rm supp}\, |+\rangle _x^a \times {\rm supp}\, |+\rangle _z^b$ when they cross a suitable hypersurface corresponding to frame {\it III}, even though the wave function $\psi ^h$ is orthogonal to $|+\rangle _x^a |+\rangle _z^b$, cf.\ Eqn.\ (\ref{psi3}). Analogously, in the second case, $h=h^{\it III}=\vartheta$, the two-paths run through ${\rm supp}\, |+\rangle _z^a \times {\rm supp}\, |+\rangle _x^b$ when they cross a suitable hypersurface corresponding to frame {\it II}, even though the wave function $\psi ^h$ is orthogonal to $|+\rangle _z^a |+\rangle _x^b$, cf.\ Eqn.\ (\ref{psi2}). Finally we explain why, despite the fact that the two-paths (occasionally) are in regions where the wave function vanishes, no violations of the quantum mechanical predictions would be experimentally observed. If an actual experiment---involving, for example, the insertion of photographic plates into the paths of the particles---were performed, the influence of this apparatus on the future evolution of the complete system would have to be taken into account. This can conveniently be accomplished, in a manner analogous to what is done in ordinary Bohmian mechanics, by suitably collapsing the wave function $\psi ^h$ upon measurement. Suppose, for example, that we attempt to detect what quantum mechanically should be impossible, namely the two-paths running through ${\rm supp}\, |+\rangle _x^a \times {\rm supp}\, |+\rangle _z^b$, which we have just seen has positive probability for synchronization {\it II\/}, at least when no detection is attempted. We might do this by inserting a detector in the path corresponding to ${\rm supp}\, |+\rangle_x^a$, say at a position corresponding to $s=0$, as well as in the path corresponding to ${\rm supp}\, |+\rangle _z^b$. Then with the synchronization {\it II}, the wave function $\psi ^{h^{\it II}}$ collapses at the synchronization parameter value $s=0$, when particle $a$ is found in ${\rm supp}\, |+\rangle _x^a$, to \[ \psi _{a_x=+1}^{h^{\it II}} (s=0) = \frac 1{\sqrt 2}|+\rangle _x^a\Bigl( |+\rangle _x^b-|-\rangle _x^b\Bigr) \ \stackrel{U_{5\vartheta /2}}\longrightarrow \ |+\rangle _x^a |-\rangle _z^b ,\] and the future evolution of particle $b$ changes drastically from what it would have been like had there been no measurement or collapse: after having gone through apparatus $B_z$ it no longer runs into the $|+\rangle _z^b$ channel, but rather into the $|-\rangle _z^b$ channel! Analogous things happen with the synchronization {\it III} and the opposite measurements. \section{Reflections on Lorentz invariance and statistical transparency}\label{4} Concerning the model of Section \ref{3.2}, we have just alluded to the fact that, just as for ordinary Bohmian mechanics, from the quantum equilibrium hypothesis that the actual distribution of crossings $\rho ^h=|\psi ^h|^2,$ one can derive the quantum mechanical measurement formalism---which, as shown in Section \ref{3.1}, is multitime translation invariant and moreover does not even depend upon the quantity $h$. We thus have, with regard to our multitime Bohmian model, three levels of description: the microscopic dynamical level, given by (\ref{sgl2},\ref{bohm2}), which is multitime translation invariant; the statistical mechanical level, given by the quantum equilibrium hypothesis, which is, in precisely the same way, also multitime translation invariant---despite the results of Section \ref{2}; and the observational level given by the quantum measurement formalism, which is also apparently multitime translation invariant. There is, however, an important difference between the relativistic characters of these levels: the latter level might be regarded as more fully relativistic than the first two, which achieve their invariance through the incorporation of the additional structure provided by the synchronization. It might be argued that such a structure violates the spirit of relativity \cite{Maud95,Maudlin}, and regardless of whether or not we agree with this, it must be admitted that achieving relativistic invariance in a realistic (i.e., precise) version of quantum theory without the invocation of such structure seems much more difficult. Hence Bell's excitement about his version of the model of GRW (\cite{Bell}, Chapter 22). (It must also be admitted that a somewhat unpleasant implication of the situation just described is that this synchronization structure---which after all comprises a radical addition to physics---is, in the model under consideration here, completely unobservable! See also \cite{Maud95}.) Indeed, any theory can be made trivially Lorentz invariant (or invariant under any other space-time symmetry) by the suitable incorporation of additional structure, for example as given by the specification of a Lorentz frame $\Lambda_0$ as part of the state description.\footnote{Consider a theory specifying the set ${\cal L}$ (THE LAW) of possible decorations $\xi$ of space-time and assume that the Lorentz group acts naturally on any $\xi$ and thus on ${\cal L}$. This theory, demanding that $\xi\in{\cal L}$, will be Lorentz invariant if $\Lambda{\cal L}={\cal L}$ for any Lorentz transformation $\Lambda$. Suppose this is not true. We may then enlarge the original theory by replacing $\xi$ by $\hat\xi\equiv(\xi,\Lambda_0)$ and the law ${\cal L}$ by $\hat{\cal L}$ defined by stipulating that ${(\xi,\Lambda_0)=\hat\xi\in \hat{\cal L}} \Leftrightarrow {\xi\in \Lambda_0{\cal L}}$. (The original theory thus corresponds to $\Lambda_0=I$.) Then $\hat{\cal L}$ is trivially Lorentz invariant: For any Lorentz transformation $\Lambda$ we have that $\Lambda\hat\xi=(\Lambda\xi,\Lambda\Lambda_0)\equiv(\xi',{\Lambda_0}') \equiv\hat\xi'$, so that ${\hat\xi\in\hat{\cal L}}\Rightarrow{\Lambda\xi\in\Lambda\Lambda_0{\cal L}}\Rightarrow {\xi'\in {\Lambda_0}'{\cal L}}\Rightarrow{\hat\xi'\in \hat{\cal L}}$.} It seems rather clear that this example, while Lorentz invariant, does not possess what Bell has called ``serious Lorentz invariance,'' a notion, however, that it is extremely difficult to make precise in an adequate way \cite{Bell}. The Bohmian model (\ref{bohm2}) immediately suggests a genuinely (though perhaps not seriously) Lorentz invariant Bohmian theory: For $N$ particles, the beables are a multitime wave function and a synchronized $N$-path, i.e., an equivalence class of maps $(X_1,\dots ,X_N): {\rm I\! R} \to {\rm I\! R}^{4N}$, $s\mapsto (X_1(s),\dots ,X_N(s))$ differing only in their parametrization. The synchronized $N$-path satisfies the guiding equation \begin{equation} \label{libohm2} \frac{dX_k}{ds} = v_k(X_1(s), \dots , X_N(s)), \quad k=1,\dots ,N , \end{equation} where the $v_k$ are suitable 4-vector fields, on ${\rm I\! R}^{4N}$, determined by the multitime wave function. As with (\ref{1partbohm}) and (\ref{bohm2}), the fact that only the synchronized $N$-path and not the parametrization determined by a particular $v_k$ has beable status implies that all equations of the form $\displaystyle \frac{dX_k}{ds} = a (X_1, \dots , X_N) v_k(X_1, \dots , X_N)$ with an arbitrary positive function $a$ on ${\rm I\! R}^{4N}$ are physically equivalent. More concretely, one may consider a Lorentz invariant multitime Bohm-Dirac theory: the wave function $\psi=\psi(x_1,\dots ,x_N)$ satisfies $N$ Dirac equations analogous to (\ref{sgl2}), and $v_k$ may for example be chosen to be \begin{equation} \label{libohmdirac} v_k^\mu = \bar \psi \gamma _k^\mu\psi \end{equation} with $\bar\psi = \psi^\dagger (\gamma^0\otimes \dots \otimes \gamma^0) = \psi^\dagger \gamma^0_1 \dots \gamma^0_N$ and $\gamma^\mu_k = \openone \otimes \dots \otimes \openone \otimes \gamma^\mu\otimes \openone \otimes \dots \otimes \openone$, the $\gamma^\mu$ at the $k$-th of the $N$ places. We shall discuss such a model in a subsequent work. Just as with the model of Section \ref{3}, models of the form (\ref{libohm2}), because of the nonlocal interaction along the synchronization, have the possibility of properly describing quantum nonlocality as exhibited, for example, by an EPR experiment. This is in contrast with the local model of Squires \cite{sqmodel}, which is based on what might be called a local light-cone synchronization. While Squires formulates his model for the nonrelativistic Schr\"odinger equation, he could as well have considered a multitime Dirac model with a local light-cone synchronization to obtain a model that is completely Lorentz invariant---and completely local. Some readers may be wondering why we have analysed the nonrelativistic multitime Bohmian theory in detail in Section \ref{3} instead of starting right away with (\ref{libohm2}, \ref{libohmdirac}) or with the multitime Bohm-Dirac theory \begin{equation} \label{shbd} \frac{dT_k}{ds} = \psi^\dagger \psi , \quad \frac{d{\bf Q}_k}{ds} = \psi^\dagger \bbox{\alpha} _k \psi \end{equation} with $ X_k= (T_k,{\bf Q}_k)$, $ k=1\dots N $, and $\alpha^i_k = \openone \otimes \dots \otimes \openone \otimes \alpha^i\otimes \openone \otimes \dots \otimes \openone = \gamma_k^0 \gamma_k^i$. This theory arises from Bohm's theory (\ref{bohmdirac}) for $N$ Dirac particles $\displaystyle {\bf v} _k = \frac{ \psi^\dagger \bbox{\alpha} _k \psi }{\psi ^\dagger \psi } = \frac{{\bf j} _k}{ \rho}$ by introducing a dynamical synchronization, and it agrees for $N=1$ with (\ref{libohm2}, \ref{libohmdirac}). These models might suggest that the reconciliation of statistical transparency and Lorentz invariance is at hand. However, for $N>1$ (\ref{shbd}) is not Lorentz invariant, because---unlike (\ref{libohmdirac})---$( \psi^\dagger \psi , \psi^\dagger \bbox{\alpha} _k \psi )$ is not a 4-vector. On the other hand, (\ref{libohmdirac}) is not statistically transparent because---unlike (\ref{shbd})---the (reparametrization invariant) configuration space velocity $v_k^i /v_k^0$ arising from (\ref{libohmdirac}) is not of the form ${\bf j}_k /\rho$ for $N>1$. Thus, for the Lorentz invariant model (\ref{libohm2}, \ref{libohmdirac}) equivariance does not hold in any obvious way and hence, since there is in general no reason that quantum equilibrium should hold with respect to any reasonable family of hypersurfaces, the canonical statistical analysis cannot be performed and the question of the extent of its agreement with standard quantum theory becomes rather delicate.\footnote{This absence of statistical transparency is similarly also the case for the local model of Squires \cite{sqmodel}.} There is another important difference between (\ref{libohmdirac}) and (\ref{shbd}). To appreciate this consider the system \begin{equation} \label{multibd} \frac{dT_k}{ds} = 1, \quad \frac{d{\bf Q}_k}{ds} = {\bf v} _k(X_1(s), \dots , X_N(s)) \end{equation} with $\displaystyle {\bf v} _k = \frac{ \psi^\dagger \bbox{\alpha} _k \psi }{\psi ^\dagger \psi }$. Here $(T_k(s))$ is entirely determined by $(T_k(0))$ and the statistical analysis of this theory may be developed as in Section \ref{3.2} for the multitime Bohmian theory, merely replacing $|\psi |^2$ by $\psi ^\dagger \psi $. With (\ref{libohmdirac}), however, the equations for the evolution of the synchronized particle times \[ \frac{dT_k}{ds} = ( \bar \psi \gamma_k^0 \psi ) (X_1(s),\dots ,X_N(s)) \] imply that in general $(T_k(s))$ depends upon the (initial) positions of the particles as well as on $(T_k(0))$, and it is difficult to see how one could begin any statistical analysis even if the velocity field were otherwise somehow of a suitable form. Now it might appear that we should have the same difficulty with (\ref{shbd}); however the theory (\ref{shbd}) is equivalent to (\ref{multibd}) since the respective vector fields differ by a real-valued function on ${\rm I\! R}^{4N}$ and hence define the same synchronized $N$-paths. Thus it turns out that (\ref{shbd}) is statistically transparent---or at least statistically translucent. We shall take up these questions in a subsequent paper. Observe that if the 4-vectors $v_k$ are 4-velocities ($v_{k\mu} v_k^\mu =1$), the synchronization implied by (\ref{libohm2}), which in this case is according to proper time parametrization, reduces in the nonrelativistic limit to the first set of Eqs.\ (\ref{bohm2}).\footnote{Note that $\bar \psi \gamma _k^\mu\psi $ need not in general be everywhere timelike and thus $v_k$ (\ref{libohmdirac}) cannot in general be normalized. However, one can find a simple reparametrization $\displaystyle v_k= \frac{\bar\psi \gamma_k\psi }{ \bar\psi \psi }$ such that the $v_k$ are approximate 4-velocities for ``large $c$'': Writing $$\psi (x_1,\dots , x_N)=\sum _i \varphi_i (x_k)\chi_i (x_1,\dots , x_{k-1}, x_{k+1},\dots , x_N)$$ and noting that in the nonrelativistic limit the last two components of $\varphi_i$ in the standard representation become much smaller than the first two, one sees that in the nonrelativistic limit $dT_k/ds=\bar \psi \gamma _k^0\psi / \bar\psi \psi \approx 1$ and the space components of $v_k$ become small. Thus in the nonrelativistic limit the theory (\ref{libohmdirac}) implies a synchronization that can be (re)expressed in the form (\ref{bohm2}) (first equations). (Concerning the reparametrization by $\bar \psi \psi$ [and that by $\psi ^\dagger \psi$ for the relation between (\ref{shbd}, \ref{multibd})], one may convince oneself that not only the multiplication of the velocity field by a positive function, but typically even by a fuction that has zeros or changes sign will yield an equivalent theory.)} Whatever reservations we may have concerning models such as we've been discussing, a synchronization by proper time seems to us entirely compatible with serious Lorentz invariance, at least for a pair of particles having a common origin in a single event. The requirement that a Bohmian theory be Lorentz invariant without the incorporation of such additional structure as a dynamical synchronization places a very strong constraint on, say, the vector field defining the law of motion (in a particular frame), or, what amounts to pretty much the same thing, on the wave function of the system---and might be expressed via a suitable fixed-point equation for this wave function. It seems extremely likely that the set of wave functions satisfying such an equation is very small, far smaller than the families of wave functions we normally consider for the set of possible initial states of a quantum system. However, if, as is widely believed, we accept that from a cosmological perspective there should be a unique wave function (for example, the Wheeler-de Witt wave function or the Hartle-Hawking wave function) of the universe, this very fact might well be a virtue rather than a vice! \section*{Acknowledgements} This work was supported in part by the DFG, by NSF Grant No. DMS-9504556, and by the INFN.
1,108,101,565,885
arxiv
\section*{Keywords} \section{Introduction} \paragraph*{}An intriguing aspect of populations of coupled neurons is their ability to generate persistent activity~\cite{Major2004}. Such sustained activity is found in diverse brain areas~\cite{Schultz2003, Komura2001, Prut1999, Moschovakis1997, Kojima1996} and species~\cite{Marques2020, MajorA2004} and the qualitative similarity in their activity patterns indicate their importance and the possibility of an unified mechanism underlying such brain dynamics. Several studies suggest a potential link between persistent activity and working memory of the brain~\cite{Wang1999, Compte2000, Zylberberg2017, Kaminski2017}, which is crucial for many cognitive processes including decision-making~\cite{Curtis2010, Haller2018}. This remarkable ability of the brain to achieve stable persistent state which in turn enables robust information storage~\cite{Curtis2003} is attributed to the efficient communication between its constituent elements which are the neurons. It is well known that the inter-neuronal communication in the mammalian brain is largely achieved by chemical synapses~\cite{Galarreta2001}. Such synapses can either be excitatory or inhibitory (depending on the neurotransmitter they release) and the precise balance between them is crucial for proper brain functioning~\cite{Symonds1959, Rubenstein2003, Uhlhaas2010, Yizhar2011}. Nevertheless, the role of electrical synapses (or gap-junctions) in maintaining normal physiological function and homeostasis cannot be ignored~\cite{Dong2018}. Hence, it is important to understand the interplay between these two synaptic modalities viz., chemical and electrical in generating persistent neuronal activity. Networks of inhibitory neurons have been studied extensively in the context of synchronization and are known to play a key role in generating robust network oscillations~\cite{Buzsaki2012, Allen2015}. Although they constitute only 10\%-20\% of the neuronal population~\cite{Buzsaki2007, Swanson2019}, they play a significant role in sculpting the network dynamics. These inhibitory neurons are known to be connected predominantly through electrical gap-junctions ~\cite{Galarreta2001}, in addition to their synaptic connections. Previous studies on coupled inhibitory neurons focused on studying synchronization among the constituent neurons with gap-junctions alone~\cite{Chow2000, Lewis2003A} and with synapse and gap-junctions~\cite{Lewis2003, Kopell2004, Pfeuty2005}. Thus it is apparent from these studies that gap-junctions play a crucial role in enabling neuronal synchronization. This tendency of gap-junctions to promote synchronization could be one of the prominent reasons for their lack of occurrence between excitatory neurons, as synchronization prevents persistent activity~\cite{Ermentrout2006}. As the present work focuses on mechanisms underlying persistent activity as opposed to the previously studied neuronal synchronization, considering networks that only comprise inhibitory neurons can enhance our understanding towards that direction. Chaotic dynamics is known to exist in many biological systems ranging from coupled genetic circuits~\cite{Kappler2003, Novak2008, Zhang2012, Suzuki2016, Liu2019}, cardiac cells~\cite{Glass1983, Chialvo1990, Lewis1990, Garfinkel1997} and ecological networks~\cite{Schaffer1985, Hastings1991, Hassell1991, Upadhyay1998, Upadhyay2009, Pearce2020} and there has been plenty of research of such systems reporting chaos with and without delays. Investigations of the dynamics of individual neurons in the network coupled through synapses, have revealed that the activity profile does not necessarily exhibit rhythmic behavior. In particular, networks of synaptically coupled neurons are shown to exhibit irregular dynamical activity, often attributed to chaos~\cite{Jahnke2008, Shim2018}. There has been plenty of evidence for the existence of non-periodic dynamics in a single neuron (e.g. non-periodic oscillations in internodal cell of Nitella flexillis~\cite{Hayashi1982}, chaotic oscillations in Molluscan neurone~\cite{Holden1982}). Such chaotic dynamics occurs not just at the level of single neurons but at several hierarchical levels in the brain~\cite{Amari2003, Freeman2003, Korn2003}. Although the occurrence of chaos both during normal and pathological brain states suggest their significance in neuronal mechanisms, their precise functional role still remains unclear~\cite{Freeman1995}. Nevertheless, it is compelling to see if chaos appears as an emergent persistent behavior in a network comprising of only inhibitory neurons in the presence of both chemical and electrical synaptic coupling. In this paper we address the following questions: (1) how does the interplay between inhibitory synaptic and gap-junctional (electrical) coupling result in persistent activity? (2) what is the simplest network that can generate complex sustained dynamics? To this end, we first analyze in detail the dynamical behavior exhibited by two coupled excitable neurons (with each neuron generating an action potential when a sufficient stimulus is applied), coupled through both an uni-directional synapse and a bi-directional gap-junction. By applying a short pulse to one of the constituent neurons (specifically the pre-synaptic neuron), we investigate the conditions that result in persistent neuronal activity rather than previously studied synchronization, where the individual neurons were chosen to be oscillators and not excitable elements. We know that if the coupling strengths of both the synapse and the gap-junctions are sufficiently strong, they combine in a nonlinear manner and can give rise to new, complex behavior of the system~\cite{Pfeuty2005}. Motivated by a study by Kopell and Ermentrout [2004], for this work, we focus on strong synaptic coupling regime and study the effects of varied gap-junctional coupling strengths on generating persistent activity. As one of our key results, we demonstrate the emergence of complex dynamical behavior such as period-$n$ ($n=2,4,8\ldots$) oscillations and chaos using the simplest setting consisting of an uni-directional synapse from an inhibitory pre-synaptic neuron and a gap-junction, with the neurons starting from their resting state as well as from random initial conditions. We perform a detailed parametric study for systems in the presence of brief pulse and obtain parameter space diagrams that indicate the various attractors to which the system converges to when starting from resting state initial conditions, viz., no oscillations, periodic oscillations and complex oscillations. We further extend our study to a ring of inhibitory neurons having synapses between randomly chosen pairs of neurons and a bi-directional gap-junction with their nearest neighbors. Using this set up, we obtain the basin size of the various attractors in the system and also determine the optimal conditions for obtaining sustained activity under strong inhibition. Thus, in this work, we present a detailed picture of a minimalistic network of neurons coupled through synapses and gap-junctions. This not only enables deeper understanding of the mechanism uncovering persistent activity under strong inhibition but can also aid future research on addressing broader questions related to cortical computation and controlling memory patterns~\cite{Mongillo2018}. \section*{II. The Model} \label{sec:model} \paragraph*{}We consider a system of $N$ identical Fitzhugh Nagumo (FHN) neurons~\cite{Fitzhugh1961, Nagumo1962}, coupled through chemical synapses and electrical gap-junctions. The dynamics of the coupled system is described by the equations \begin{equation} \begin{split} \epsilon\dot{V_i} &= {V_i\ (1 -V_i)\ (V_i - a) - W_i\ + I_{ext} - I_i^{syn} + I_i^{gap}},\\ \dot{W_i} &= V_i - k\ W_i, \end{split} \label{eq:FHN} \end{equation} where $i \in \{1,2...N\}$ denotes the neuron index, $V_i$ is the associated membrane potential, $W_i$ is the associated recovery variable and $I_{ext}$ is the external current. The parameters $a = 0.1 $ and $k = 0.5$ describe the model kinetics, while $\epsilon =0.01$ is the recovery rate. These values are chosen such that each uncoupled neuron is an excitable system. The $I_{i}^{syn}$ appearing in Eqn.~(\ref{eq:FHN}) represents the synaptic current which is modeled here as an ohmic current~\cite{Destexhe1994, Ermentrout2010}. Synapses in the brain are uni-directional with synaptic coupling from the pre-synaptic neuron $j$ to the post-synaptic neuron $i$. The equation for the synaptic current onto the post-synaptic neuron $i$ is given by: \begin{equation} \begin{split} I_i^{syn} = g_{syn} \ \sum_{j=1}^{N} A_{ij} \ (V_i- E_{syn})\ s_{ji}, \end{split} \end{equation} where $g_{syn}$ is the synaptic conductance, $A_{ij}$ represents the synaptic weight matrix (or the adjacency matrix) and $E_{syn}$ represents the synaptic reversal potential with $E_{syn} = 5$ for excitatory and $E_{syn} = -5$ for inhibitory neurons respectively. $s_{ji}$ denotes the synaptic gating variable that evolves according to the equation, \begin{equation} \begin{split} \dot{s}_{ji} &= \alpha\ N(V_j)\ (1-s_{ji})\ - \beta\ s_{ji},\\ \end{split} \label{eq:Gating} \end{equation} where, \begin{equation*} \begin{split} N(V_j) &= 0.5\ (1+\tanh((V_j-v_{th})/v_{sl})). \end{split} \end{equation*} Here $\alpha=3$ and $\beta=3$ are the decay constants and $v_{th} = 0.3$, $v_{sl} = 0.001$ are the parameters that determine the shape of the synaptic term. The equation for the gating variable (\ref{eq:Gating}) thus depends only on the membrane potential of the pre-synaptic neuron $V_j$. The $I_{i}^{gap}$ appearing in Eqn.~(\ref{eq:FHN}) represents the gap-junctional current. Such electrical coupling between the neurons is diffusive in nature and hence the gap-junctional current can be written as: \begin{equation} \begin{split} I_i^{gap} = g_{gap} \ \sum_{nn} (V_j- V_i), \end{split} \end{equation} where $g_{gap}$ represents the gap-junctional conductance and the summation is over all nodes that are nearest neighbors ($nn$) of a given node $i$. Throughout this paper, the dynamics of the coupled system is studied for high inhibition and weak electrical coupling levels, with the value of the synaptic conductance fixed at $g_{syn}=0.81$ (unless mentioned otherwise) and varying the gap-junctional conductance $g_{gap}$. For all simulations reported in this work, we study the dynamics obtained when a sub-threshold stimulus of $I_{ext}=0.03$ is given to the pre-synaptic neuron. The equations are solved using variable step stiff solver ODE15s of \verb|MATLAB Release 2010b| with a tolerance of $1e^{-8}$ and verified the results using $4^{th}$ order Runge-kutta method . \section*{III. Results} \paragraph*{}We have carried out simulations of systems of coupled excitable neurons that exhibit Fitzhugh Nagumo dynamics, as described in the preceding section. The activity profiles of both pre- and post-synaptic neurons (shown in red and blue respectively), for different high and low inhibitory synaptic $g_{syn}$ and gap-junctional $g_{gap}$ conductance are displayed in Fig.~\ref{fig1} for the simplest case of $N=2$ coupled neurons. In all of these simulations, the neurons are originally in their resting states, while the pre-synaptic neuron alone is subjected to a brief sub-threshold pulse $I_{ext}$. \begin{figure*}[htbp] \includegraphics[width=0.9\linewidth]{Fig1.pdf} \caption{(color online) Activity profiles of the pre- and post-synaptic neurons observed by establishing a synaptic and gap-junctional connections in distinct combinations between two neurons. (A1-F1) Schematic diagrams representing the possible ways of setting up a synaptic and a gap-junctional coupling between two neurons. The pre- and the post-synaptic neurons in the schematic are marked by red and blue color circles respectively. The six different possibilities are as follows: [L-R] (A1) uni-directional excitatory synaptic coupling along with gap-junction [$E_{pre}$], (B1) bi-directional coupling between two excitatory neurons along with gap-junction [EE], (C1) bi-directional coupling between an excitatory and an inhibitory neuron along with gap-junction [EI], (D1) an uni-directional synapse from an inhibitory pre-synaptic neuron along with gap-junction [$I_{pre}$], (E1) bi-directional coupling between and inhibitory and an excitatory neuron along with gap-junction [IE] and (F1) bi-directional coupling between two inhibitory neurons along with gap-junction [II]. For all the aforementioned configurations, the neurons are originally in their resting states, while only the pre-synaptic neuron (excitatory or inhibitory) is subjected to a sub-threshold stimulus $I_{ext}$. The stimulated neuron along the column (A-C) is excitatory, whereas the stimulated neuron along the column (D-F) is inhibitory. (A2-F2, A3-F3 and A4-F4) correspond to low value of synaptic conductance $g_{syn}=0.05$, whereas panels (A5-F5, A6-F6, A7-F7) correspond to high value of synaptic conductance $g_{syn}=0.9$. Both the values of $g_{syn}$ are indicated at the right end of the figure. Along each row, the value of gap-junctional conductance $g_{gap}$ is kept constant. The arrows on the right represent the increasing direction of $g_{gap}$, whose precise values viz., $0, 0.05, 0.9$ are indicated at the end of each row corresponding to low and high value of $g_{syn}$. Note that at least one inhibitory synaptic connection is necessary for sustained oscillations in the system. The simplest possible two neuron system to show oscillations corresponds to the configuration (D1), which comprises one inhibitory synapse and a gap-junction. } \label{fig1} \end{figure*} The schematic representation shown along the top row of Fig.~\ref{fig1} (A1-F1) displays six possible ways of establishing a synaptic connection (uni-directional and bi-directional) between two neurons (excitatory or inhibitory) in the presence of a gap-junction, namely: $E_{pre}$, EE, EI, $I_{pre}$, IE and II. The neuronal activities corresponding to each of the aforementioned connectivities is shown along the columns, with their specific connections represented by a schematic diagram on top of each column. Along each row, the conductance values $g_{syn}$ and $g_{gap}$ are kept constant and their specific values are mentioned on the right side of the figure. For all our simulations on coupled neurons, we have assumed that a synaptic connection exists irrespective of the existence of a gap-junction, hence $g_{syn} > 0$ and $g_{gap} \geq 0$. The neuronal firing patterns corresponding to low and high synaptic inhibition are shown in Fig.~\ref{fig1} (A2-F2, A3-F3 and A4-F4) and Fig.~\ref{fig1} (A5-F5, A6-F6, A7-F7) respectively, for varying $g_{gap}$. \begin{figure*}[htbp] \centering \includegraphics[width=0.9\linewidth]{Fig2_v4new.pdf} \caption{(Color online) Period-doubling route to chaos in coupled neurons connected by an inhibitory synapse and a gap-junction. (A) The schematic (shown as an inset) represents the simplest configuration of $N=2$ coupled neurons with uni-directional inhibitory synapse and gap-junctions when the pre-synaptic neuron alone is subjected to a brief sub-threshold stimulus. The bifurcation diagram, is obtained by varying the $g_{gap}$ values (along x-axis), while $g_{syn}=0.81$ is fixed. Plotted along the y-axis are the peak values of the pre-synaptic membrane potential $V_{pre}$, obtained for the last 100 time points. The coupled neurons show oscillatory dynamics for an intermediate range of $g_{gap}$ values, and stable fixed points otherwise. (B) The enlarged portion corresponding to the blue rectangular region in panel (A), shows period-doubling route to chaos, on decreasing $g_{gap}$. (C) The phase space trajectory of the pre-synaptic neuron corresponding to those values of $g_{gap}$ indicated by violet broken lines in panel (B). [L-R] represents complex dynamics namely chaos, period-8, period-4 and period-2 oscillations respectively. (D) shows the corresponding power spectral density ($Pow_{max-min}$) of a discrete time series constructed by taking the maximum and the minimum points of the original time series of the pre-synaptic neuron and (E) shows the Poincaré map (or the return map obtained by plotting the $n^{th}$ and the $(n+1)^{th}$ peak obtained from the time-series of the pre-synaptic neuron) corresponding to the neuronal dynamics shown in panel (C).} \label{fig2} \end{figure*} It is well known that the activity patterns of the coupled neurons vary depending on (1) the type of neurons viz., excitatory or inhibitory (2) they type of synaptic coupling viz., uni-directional or bi-directional and (3) the coupling strengths $g_{syn}$ and $g_{gap}$. The precise contribution of such factors on the activity profiles are discussed in detail in Fig:~\ref{fig1}. For systems that have low synaptic inhibition and no gap-junctions (Fig.~\ref{fig1} (A2-C2)), we observe that an excitatory pre-synaptic neuron causes the firing of its post-synaptic neuron almost simultaneously. But in the case of an inhibitory pre-synaptic neuron, the post-synaptic neuron fires an action potential after being released from the suppressing effect of the inhibitory pre-synaptic neuron, which is called the post-inhibitory rebound which can be seen clearly in Fig.~\ref{fig1} (D2-E2). Owing to this post-inhibitory rebound effect of inhibitory synapses, in the presence of bi-directional inhibitory coupling, the coupled system with the II configuration exhibits sustained oscillations (Fig.~\ref{fig1} (F2)). Furthermore, introducing gap-junctional conductance $g_{gap}$ causes synchronized behavior of neurons (Fig.~\ref{fig1} (A3-F3) and (A4-F4)) which may or may not result in an action potential (depending on the pre-synaptic neuron). In the bi-directional inhibitory synapse case, where neurons originally exhibited sustained oscillations, introducing the gap-junction kills sustained activity (Fig.~\ref{fig1} (F3 and F4)). Thus, at low synaptic conductance, the coupled system does not give rise to persistent activity in the presence of gap-junctions. Having studied the effects of low synaptic conductance on the neuronal firing, we now focus on how high synaptic inhibition affects the behavior of the coupled neurons. In the case of systems with no gap-junctions, we observe that when the synaptic conductance $g_{syn}$ value is high, the neurons exhibit persistent activity only for bidirectional synapses where at least one of the neurons is inhibitory (EI, IE and II as represented in Fig.~\ref{fig1} (C1, E1 and F1) respectively). These bidirectional synapses continue to sustain oscillations in the presence of weak gap-junctional conductance (Fig.~\ref{fig1} (C6, E6 and F6)) but as the gap-junctional conductance grows stronger (Fig.~\ref{fig1} (C7, E7 and F7)), IE and II can no longer sustain oscillations while EI continues to show persistent activity. The configurations EI and IE are identical, while only the stimulated neurons are distinct in both cases (excitatory for EI and inhibitory for IE). Hence both the configurations exhibit qualitatively similar firing pattern in all the cases (Fig.~\ref{fig1} (C2-C6 and E2-E6)) except for high values of $g_{syn}$ and $g_{gap}$ (Fig.~\ref{fig1} (C7 and E7)), where EI configuration shows oscillations due to the stimulation of excitatory pre-synaptic neuron while IE configuration does not initiate oscillations as the stimulated inhibitory neuron suppresses the excitation of its post-synaptic neighbor. Among all the configurations, there is only one uni-directional synaptic configuration which shows sustained oscillations, and that is $I_{pre}$ (Fig.~\ref{fig1} (D1)), in the presence of a strong inhibitory synapse and a weak gap-junctional conductance (Fig.~\ref{fig1} (D6)). However, at strong $g_{gap}$ (Fig.~\ref{fig1} (D7)), oscillations are not sustained. This is the simplest system of two coupled neurons, with a single synapse and a gap-junction, that exhibits persistent activity. Therefore, in order to understand the interplay between the synapse and the gap-junction in creating persistent activity in a neuronal network, we carry an in-depth exploration of the dynamical behavior of this specific configuration viz., $I_{pre}$. The above given results indicate that the interplay between synaptic and gap-junctional conductance values are crucial in determining the dynamical behavior of the system. In a system of two neurons coupled by a strong inhibitory synapse and a gap-junction, the change in the dynamical behavior from non-oscillatory to oscillatory to non-oscillatory states at high inhibition is studied by varying the gap-junctional conductance $g_{gap}$ (see Fig.~\ref{fig2}). Starting from their initial resting states, i.e. $(V_{i},W_{i},S_{i})=(0,0,0)$ where $i = 1,2$, this coupled neuronal system results in persistent dynamical activity, when a sub-threshold pulse is applied to the pre-synaptic inhibitory neuron. The bifurcation diagram (Fig.~\ref{fig2} (A)), with the gap-junctional conductance along the x-axis and the membrane potential ($V_{pre}$) of the pre-synaptic neuron along the y-axis, is obtained for the case of high synaptic coupling. This bifurcation diagram shows that the neurons exhibit oscillatory behavior for a specific range of gap-junctional conductance $0.031 < g_{gap} < 0.25$, while their extreme values does not sustain oscillations. The striking feature of this minimal system of two coupled inhibitory neurons is their ability to show complex dynamical behavior, for a restricted range of $g_{gap}$ (enclosed within the blue box). This range, although very small, exhibits a rich dynamical repertoire as can be seen in the magnified view in Fig.~\ref{fig2} (B). We find that when $g_{gap}$ is reduced below a critical value ($\approx0.0392$), the system undergoes a period-doubling bifurcation thereby converting the attractor from period-1 to period-2. Further decrease in $g_{gap}$ results in the emergence of period-4 and period-8 attractors, after which we observe the onset of chaos, which is indicated by the appearance of solid red bands formed due to the merging of successive bifurcations. We thus note that there is a minimum value of the gap-junctional conductance $g_{gap}$ below which the neurons do not show persistent activity. To verify if there exists a chaotic attractor (but with a smaller basin of attraction) below the minimum $g_{gap}$ value, we performed annealed simulations (results not shown), where we allow the neurons to reach a particular attractor and gradually vary the $g_{gap}$ using the current state as the initial condition as opposed to quenched simulations where for every $g_{gap}$ we start from resting state initial conditions, and found the existence of only the resting state attractor. This sudden disappearance of the chaotic attractor on reducing $g_{gap}$ can be attributed to the well studied boundary crisis~\cite{Grebogi1983}. Moreover, we show that, the dynamics exhibited by the post-synaptic neuron is qualitatively similar to that of the above discussed pre-synaptic neuron (See Supporting Information Fig:~\ref{SI2}). Thus, it is clear from the aforementioned results that a system of two neurons coupled through both chemical and electrical synapses is the minimal network that can exhibit both sustained activity as well as complex dynamics such as chaos. In order to further analyze the observed dynamical patterns, we consider four different $g_{gap}$ values indicated by vertical broken lines in violet. The phase space trajectory corresponding to the chosen conductance $g_{gap}$ values are shown in Fig.~\ref{fig2} (C), with complex behavior such as [L-R] chaos, period-8, period-4, period-2 oscillations. In other words, the dynamics of the coupled system changes from complex chaotic oscillations to periodic oscillations, when $g_{gap}$ is increased yet constrained to the narrow range. In order to distinguish between various complex oscillatory dynamics, Power spectral densities of the corresponding time-series are often used. In this paper, instead of the full time series, we calculate the maximum and minimum values of each oscillation (from the original time series) and construct a discrete time series. This method along with the power spectral density (PSD) of full time series was shown to be effective in distinguishing various chaotic attractors in ~\cite{Suzuki2016}. For our paper, we found that the power spectral density calculated from the maximum-minimum time series distinguishes the different types of oscillations effectively (See Supporting Information Fig:~\ref{SI1}). For period-2,4 and 8 oscillations, we observe peaks for certain frequencies alone but for chaotic dynamics, we can see spikes throughout the entire frequency range. Yet another efficient way of distinguishing various periodic and non-periodic oscillations is the Poincaré map shown in Fig.~\ref{fig2} (E), corresponding to the four different oscillatory patterns. The Poincaré map which is obtained by plotting the $n^{th}$ and the $(n+1)^{th}$ peak, which can be calculated from the neuronal time series. We find $N$ discrete points with $N = 2, 4, 8\dots$ for period-2,4,8 oscillations and this regular geometric pattern is lost when the system shows chaotic oscillations. Hence, the bifurcation diagram along with the Poincaré map and the PSD of the discrete time series indicates that the non-periodic oscillations observed are indeed chaotic. \begin{figure}[htbp!] \centering \includegraphics{Fig3_new_test.pdf} \caption{(Color online) Different dynamical regimes exhibited by $N=2$ coupled neurons connected by an inhibitory synapse and a gap-junction. (A) The $(g_{syn}, g_{gap})$ parameter space, marked by the attractors to which the system converges to, viz. No Oscillations, Periodic and Complex oscillations ($>$ period-1 oscillations, indicated by an arrow), starting from their resting states, i.e. $(V_{i},W_{i},S_{i})=(0,0,0)$ where $i = 1,2$. The blue broken lines represent the $g_{syn} = 0.5$ value used for the bifurcation diagram shown in panels (B). The broken line in magenta in (A) corresponds to the $g_{syn} = 0.81$ value in Fig.~\ref{fig2} (A). (B) The bifurcation diagram is obtained by varying the gap-junctional conductance $g_{gap}$ when a brief pulse is applied to the pre-synaptic neuron alone, fixing $g_{syn}=0.5$. Plotted along the y-axis are the peak values of the pre-synaptic membrane potential $V_{pre}$, obtained for the last 100 time points. On decreasing $g_{gap}$, the system shows premature termination of the period-doubling bifurcation (at period-4 oscillations).} \label{fig3} \end{figure} While the results mentioned above by applying a brief pulse to the pre-synaptic inhibitory neuron show many different activity patterns, we comprehensively detail the activity patterns that arise across $g_{syn},g_{gap}$ parameter space (Fig.~\ref{fig3} (A)) and identify regions of periodic (yellow region), complex oscillations (narrow black region indicated by an arrow) and no oscillations (white region) when the system is subjected to a brief pulse. We see that the complex oscillations are limited to a narrow range of weak gap-junctional $g_{gap}$ and for strong synaptic $g_{syn}$ coupling strengths. Furthermore, the range of $g_{gap}$ for which the system shows oscillations increases with $g_{syn}$ and the boundary between periodic and no oscillation regime shows a monotonic behavior and the precise shape of the boundary is attributed to the choice of initial conditions (here, the resting state values). We know from Fig.~\ref{fig2} that the coupled system gives rise to chaotic behavior for synaptic conductance as high as $g_{syn}=0.81$, when a brief pulse is applied. In order to understand if the region of complex oscillations always include chaotic behavior, we chose $g_{syn}=0.5$ for this analysis, as complex oscillations begin to appear close to this value of $g_{syn}$. The bifurcation diagram obtained by varying $g_{gap}$ (Fig.~\ref{fig3} (B)) shows the presence of complex dynamics viz., period-2, period-4 oscillations, but for a very narrow region of $g_{gap}$. Although the system undergoes a period-doubling bifurcation, their activity is prematurely terminated with lowering $g_{gap}$ and hence the system does not show chaotic behavior. This shows that the coupled system with a brief pulse can exhibit chaotic behavior but at a much higher value of $g_{syn}$. \begin{figure}[htbp!] \centering \includegraphics{Fig4_new1.pdf} \caption{(Color online) Bi-stability observed in $N=2$ coupled neurons connected by an inhibitory synapse and a gap-junction, starting from random initial conditions. (A) The bifurcation diagram is plotted by varying gap-junctional conductance $g_{gap}$ values, at a fixed value of synaptic conductance $g_{syn}$. Plotted along the y-axis are the peak values of the pre-synaptic membrane potential $V_{pre}$, obtained for the last 100 time points. Starting from random initial conditions for each value of $g_{gap}$, the coupled neurons exhibit co-existence of limit cycle and fixed point attractors. Whereas an equivalent diagram (plotted at same $g_{syn}$ as (A)) shown in Fig.~\ref{fig2} (A), exhibited only one stable attractor for a given value of $g_{gap}$ owing to the resting state initial conditions of the neurons. (B) The probability of obtaining oscillations ($P_{osc}$) in $(g_{syn},g_{gap})$ parameter space for the coupled neurons is obtained starting from random initial conditions. $P_{osc}$ represents the basin size of limit cycle attractor and this value increases with the strength of inhibition. Note that a minimum $g_{gap}$ is required for the coupled system to show oscillations. The blue broken line indicates the value of $g_{syn} = 0.81$ corresponding to the bifurcation diagram in (A). Both (A) and (B) are obtained for 100 random initial conditions.} \label{fig4} \end{figure} \begin{figure}[htbp!] \centering \includegraphics{Fig5.pdf} \caption{(Color online) Persistent activity in a multiplex set-up of $N=10$ neurons coupled through synapses and gap-junctions, arranged in a one-dimensional ring. (A) Schematic representation of a network of coupled inhibitory neurons (black circles) connected to their nearest neighbors by gap-junctions (shown in blue arrows). The red lines represent the inhibitory synaptic links between randomly chosen pairs of neurons. Only one of the many pre-synaptic neurons (marked in red) receive a short pulse ($I_{ext}$). Simulating such a system by varying the number of synaptic connections ($n_{syn}$) for different gap-junctional conductance values $g_{gap}$ can result in the convergence of the system to one or more of the following attractors namely (1) a fixed point attractor (with no oscillations) (2) a chimera state with oscillations of only few of the neurons (3) a global oscillatory state, i.e. oscillations of all the neurons in the system (B-C) Co-existence of multiple attractors (marked by 1, 2 and 3, whose characteristic dynamics are described above) is indicated by the variation in their basin sizes (BS) for $g_{gap}=0$ and $g_{gap}= 0.05$ respectively, where BS represents the fraction of initial conditions that result in a particular dynamical attractor. (D) The probability of obtaining active neurons (showing oscillations) ($P_{act}$) for different values of $n_{syn}$ (along the x-axis) and $g_{gap}$ (along the y-axis). Two different transitions occur at $g_{gap}=0.02$ and $g_{gap}=0.03$ respectively and at every transition (along increasing $g_{gap}$), a non-zero probability of activity $P_{act}$ is observed even for lower $n_{syn}$.} \label{fig5} \end{figure} To quantitatively analyze the robustness of the observed oscillatory behavior and the oscillatory and non-oscillatory boundary, we considered the minimalistic network of two couped neurons with each neuron starting from a random initial state (as opposed to the special resting state initial condition used for the rest of simulations in this paper). The behavior of the system is studied by applying a brief pulse to the pre-synaptic neuron. The bifurcation diagram Fig.~\ref{fig4} (A) shows that the system either goes to the oscillatory branch (top) or stable fixed point (bottom), thereby exhibits a bi-stable behavior, starting from a random initial state. The synaptic conductance value for which the bifurcation diagram is plotted is $g_{syn}=0.81$ and the blue broken line in Fig.~\ref{fig4} (B) indicates the same. The bifurcation diagram in Fig.~\ref{fig2} (A) and Fig.~\ref{fig4} (A), although have same set of parameter values, they differ in the choice of their initial conditions. We notice that the former does not show a bi-stable behavior due to resting state initial conditions, whereas the latter with random initial conditions exhibit bi-stability. Additionally, the probability of obtaining sustained oscillations $P_{osc}$ in $(g_{syn},g_{gap})$ parameter space, is shown in Fig.~\ref{fig4} (B). The $P_{osc}$ provides information on the basin size (which gives information on fraction of initial conditions leading to a limit cycle attractor) for different conductance values. We considered 100 trials, each starting from a random initial condition and our results show that the probability of obtaining oscillations increases with the value of $g_{syn}$, provided the gap-junctional conductance is greater than a lower cut-off value, which is $g_{gap}>0.03$. Although the activity of a pair of coupled inhibitory neurons have been analyzed in detail, it is important to extend the study to a network of inhibitory neurons and identify the parametric requirements that give rise to persistent activity in such networks. For this purpose, we consider a network of $N=10$ neurons arranged in a one-dimensional ring topology coupled through coupled through randomly chosen pairs of synapses and nearest neighbor gap-junctions and the results are summarized in Fig.~\ref{fig5}. Although the synaptic conductance $g_{syn}=0.81$ is fixed, the number of synaptic connections $n_{syn}$ in the network can be varied from a minimum of one connection to a maximum of $N(N-1)$ connections. Hence, for each value of gap-junctional conductance $g_{gap}$, the value of $n_{syn}$ is varied and the network activity is observed, by stimulating only one of the pre-synaptic neuron with a short sub-threshold stimulus $I_{ext}$. Such a system exhibits co-existence of multiple attractors viz., (1) a complete quiescent state or fixed point attractor, where the initial stimulus is not sufficient to generate oscillations (2) an intermediate state where only few of the oscillators oscillate while others are at rest, which is the characteristic of a chimera state and (3) a global oscillatory state, i.e. all the nodes (neurons) exhibit self-sustained oscillations. The Basin size (BS) (corresponding to each of the attractors mentioned above) displayed in Fig.~\ref{fig5} (B-C) is obtained for two different values of $g_{gap}=0, 0.5$ respectively, as the $n_{syn}$ is varied. In each of these panels, the fraction of initial conditions that converge to one of the attractors mentioned above are marked in blue, red and green colors respectively. What we observe is that the network with only synaptic connections ($g_{gap}=0$) as in Fig.~\ref{fig5} (B) demands as high as $n_{syn}=40$ synapses for all the neurons to show oscillations. For extremely low value of $n_{syn}$, the system does not show oscillations and later we find co-existence of fixed point attractor and a chimera state, eventually leading to global oscillatory state. On the contrary, the system with $g_{gap}=0.05$ shown in Fig.~\ref{fig5} (C), requires comparatively lesser synaptic connections $n_{syn} \approx 10$ for oscillation of all the nodes. Moreover, we see that even a single synapse can result in oscillation of at least few of the neurons taking the system directly to a chimera state, unlike the $g_{gap}=0$ case. The surface plot Fig.~\ref{fig5} (D), with varying $n_{syn}$ along the x-axis and different $g_{gap}$ along the y-axis shows the probability of obtaining activity in the network $P_{act}$ (shown along the z-axis). On increasing $g_{gap}$, we observe two distinct transitions corresponding to two different values of gap-junctional conductance, viz. $g_{gap}=0.02$ and $0.03$ respectively. At each transition (along the increasing $g_{gap}$ direction), we observe a higher probability of obtaining activity $P_{act}$ corresponding to lower $n_{syn}$. In other words, increasing $g_{gap}$ increases the $P_{act}$ even with lesser number of synaptic connections. Hence, for $g_{gap} \geq 0.03$, we obtain a non-zero probability of obtaining oscillations when compared to conductance values lower than 0.03 i.e.$g_{gap}=0.03$. A further increase in gap-junctional conductance $g_{gap} > 0.05$ results in failure of sustained activity, mainly due to the synchronizing ability of gap-junctions. Even as we increase the system size, the results remain qualitatively same (See Supporting Information Fig:~\ref{SI5}). Hence, it is apparent that weak gap-junctional conductance $g_{gap}$ helps in achieving global oscillatory state (with all the neurons oscillating) even with minimal synaptic connections, under strong inhibition (with $g_{syn} =\ 0.81$). \section*{IV. Conclusion} To conclude, we have shown how the interplay between synaptic inhibition and electrical gap-junctions results in the emergence of persistent activity. We have analyzed various combinations of two neurons connected with synapses and gap-junctions, and we infer the following: (a) networks of excitatory neurons alone cannot exhibit persistent activity, (b) Strong inhibition is required to maintain persistent activity in the presence of gap-junctions. Our results are in agreement with~\cite{Ermentrout2006}, where it is shown that a weak synapse cannot overcome the effect of gap-junctions causing the system to converge to the stable attractor state, thereby suggesting the requirement of strong synaptic coupling to maintain persistent activity. Through our systematic investigation, we uncover the complexity involved in a minimal model of a pair of inhibitory neurons coupled through both the aforementioned synaptic modalities. We further show that this simple system undergoes a series of period-doubling bifurcations leading to chaos. Hence, our work highlights not just the combined effect of chemical and electrical synapses but also outlines the importance of inhibitory neurons in generating and maintaining persistent yet complex dynamics in networks of coupled neurons. Furthermore, our simulations on a one-dimensional ring topology gives a preliminary understanding on the role of gap-junctions in achieving persistent activity. We report here, the existence of chimera pattern that comprises neurons exhibiting both oscillatory and non-oscillatory states. Such chimera patterns have been reported in our earlier study~\cite{Janaki2019} in the context of biological pattern formation. Studies on networks of excitatory and/or inhibitory with synapse and gap-junctions have shown to give complex spatiotemporal dynamics, viz. chimera-like pattern~\cite{Mishra2017}, transient chaotic behavior~\cite{Keplinger2014} etc. Hence it would be intriguing to analyze the spatiotemporal dynamics exhibited by purely inhibitory neurons arranged in one- and two-dimensional lattices. By using a multiplex framework with nearest-neighbor gap-junctional coupling and long-range synaptic inhibition, one can study the collective dynamics exhibited by inhibitory neurons under strong synaptic inhibition. Additionally, by evolving the gap-junctional layer alone under activity-dependent plasticity~\cite{Haas2011, Pernelle2017}, keeping the synaptic layer frozen, one could potentially study the emergent behavior in a system of inhibitory neurons. Thus, our study on networks with strong inhibitory coupling when extended to large system sizes might have potential implications in maintaining working memory as they are shown to store many more patterns than their excitatory counterparts~\cite{Mongillo2018, Kim2020}. As our study consists of identical neuronal elements, extending this work to study the effects of heterogeneity could be another exciting direction of research. \begin{acknowledgments} RJ has been supported by IMSc Project of Interdisciplinary Science \& Modeling (PRISM), and the Center of Excellence in Complex Systems and Data Science, both funded by the Department of Atomic Energy, Government of India. RJ would like to thank Dr. Sitabhra Sinha and The Institute of Mathematical Sciences for the support. We also thank Dr. Rita John, Department of Theoretical Physics, University of Madras for the constant encouragement. The simulations and computations required for this work were supported by High Performance Computing facility (Nandadevi) of The Institute of Mathematical Sciences. The Nandadevi cluster is partly funded by the IT Research Academy (ITRA) under the Ministry of Electronics and Information Technology (MeitY), Government of India (ITRA-Mobile Grant No. ITRA/15(60)/DIT NMD/01). We thank Anand Pathak and Ria Ghosh for useful discussions. We also thank Shakti N.~Menon, Soumya Easwaran and K.~A.~Chandrashekar and Rishu Kumar Singh, Tanmay Mitra and Amit Sharma for their valuable suggestions in shaping the manuscript. \end{acknowledgments}
1,108,101,565,886
arxiv
\section*{Abstract} \setcounter{page}{1} Consider the Fourier expansions of two elements of a given space of modular forms. How many leading coefficients must agree in order to guarantee that the two expansions are the same? Sturm~\cite{Sturm} gave an upper bound for modular forms of a given weight and level. This was adapted by Ram Murty~\cite{Murty}, Kohnen~\cite{Kohnen} and Ghitza~\cite{Ghitza} to the case of two eigenforms of the same level but having potentially different weights. We consider their expansions modulo a prime ideal, presenting a new bound. In the process of analysing this bound, we generalise a result of Bach and Sorenson~\cite{Bach}, who provide a practical upper bound for the least prime in an arithmetic progression. \section*{Notation and terminology} All modular forms discussed are of positive integer weight $k$ and level $N$. A modular form of weight $k$ and character $\chi$ for $\Gamma_0(N)$ satisfies \begin{equation} f\left(\frac{az+b}{cz+d}\right)=\chi(d)(cz+d)^k f(z)\quad\text{for all } \begin{pmatrix}a&b\\c&d\end{pmatrix}\in \Gamma_0(N). \end{equation} By \emph{eigenform} we mean an eigenvector for the full Hecke algebra. If $f$ is a modular form then $a_n(f)$ denotes the $n$th Fourier coefficient: \begin{equation} f(z) = \sum_n a_n(f) e^{2 \pi i nz}. \end{equation} The symbols $p$ and $\ell$ are reserved for prime numbers. A \emph{prime primitive root} modulo $p$ is a prime that is also a primitive root modulo $p$. We write $f \sim g$ to mean that the ratio of the two functions tends to 1 in some limit, and define the equivalence relation $\sim$ analogously for sequences. The Euler totient function is denoted by $\varphi$. By GRH we mean the generalisation of the Riemann hypothesis to Dirichlet $L$-functions. If $a$ and $q$ are relatively prime positive integers and $x \ge 1$ is a real number, then $\pi_{a,q}(x)$ denotes the number of $\ell \le x$ such that $\ell \equiv a \mod q$. We use Landau `big O' notation in the standard way. \section{Introduction} \label{intro} We present a new bound for the number of leading Fourier coefficients that one needs to compare in order to distinguish two eigenforms, of potentially different weights, modulo a prime ideal. Bounds of this flavour are of great practical use in modular forms research, and have received much attention (e.g. \cite{Murty}, \cite{Kohnen}, \cite{Ghitza}, \cite{GH}, \cite{Kowalski}) since the groundbreaking work of Sturm~\cite{Sturm}: \begin{thm} [Sturm bound, see {\cite[Theorem 9.18]{Stein}}] Let $f$ be a modular form of weight $k$ for a congruence subgroup $\Gamma$ of index $i(\Gamma)$ inside $SL_2(\mathbb Z)$. Let $R$ be the ring of integers of a number field, and assume that $R$ contains the Fourier coefficients of $f$. Let $\mathfrak p$ be a prime ideal in $R$, and assume that $f \not \equiv 0 \mod \mathfrak p$. Then there exists \begin{equation} n \le \frac {k \cdot i(\Gamma)}{12} \end{equation} such that $a_n(f) \not \equiv 0 \mod \mathfrak p$. \end{thm} We will use Buzzard's adaptation of the Sturm bound to modular forms with character: \begin{cor} [see {\cite[Corollary 9.20]{Stein}}] \label{BuzzardCor} Let $f$ and $g$ be modular forms of weight $k$ and character $\chi$ for $\Gamma_0(N)$. Let $R$ be the ring of integers of a number field, and assume that $R$ contains the Fourier coefficients of $f$ and $g$. Let $\mathfrak p$ be a prime ideal in $R$, and assume that $f \not \equiv g \mod \mathfrak p$. Then there exists \begin{equation} n \le \frac k{12} [SL_2(\mathbb Z):\Gamma_0(N)] \end{equation} such that $a_n(f) \not \equiv a_n(g) \mod \mathfrak p$. \end{cor} Our research is strongly motivated by work of Ram Murty~\cite{Murty}: \begin{lemma} [see {\cite[Lemma 2]{Ghitza}}] \label{MurtyLemma} Let $f$ and $g$ be eigenforms of respective weights $k_1 \ne k_2$ for $\Gamma_0(N)$, and let $\ell$ be the least prime not dividing $N$. Then there exists $n \le \ell^2$ such that $a_n(f) \ne a_n(g)$. \end{lemma} Our main result concerns eigenforms modulo a prime ideal: \begin{thm} \label{main} Let $f$ and $g$ be normalised eigenforms for $\Gamma_0(N)$, with character $\chi$ and respective weights $k_1 \le k_2$. Let $R$ be the ring of integers of a number field containing the Fourier coefficients of $f$ and $g$, and let $\mathfrak p$ be a nonzero prime ideal in $R$. Define $p$ by $p \mathbb Z = \mathfrak p \cap \mathbb Z$, assume that $p \ge 5$, and assume that $f \not \equiv g \mod \mathfrak p$. Then there exists \begin{equation} \label{GhitzaBound} n \le \max\left\{ g^*(p,N)^2, \frac{k_2}{12}[SL_2(\mathbb Z):\Gamma_0(N)] \right\} \end{equation} such that $a_n(f) \ne a_n(g) \mod \mathfrak p$, where $g^*(p,N)$ is the least prime primitive root modulo $p$ that does not divide $N$. \end{thm} We note that Kohnen has obtained a similar result~\cite[Theorem 4]{Kohnen}, replacing $g^*(p,N)^2$ by the constant $900$ in~\eqref{GhitzaBound}, at the expense of requiring $(N, 30)=1$ and only getting a bound for infinitely many (rather than all) prime ideals $\mathfrak p$ of $R$. Our argument can be modified to deal with the excluded cases $p=2$ and $p=3$, yielding (slightly weaker) versions of Theorem~\ref{main}. We relegate these special cases to Section~\ref{2and3}. In Section \ref{proof} we prove Theorem \ref{main}. In Section \ref{asymptotics}, we provide asymptotics (as $N \to \infty$) for the two quantities in the bound \eqref{GhitzaBound}, establishing that the second is asymptotically greater. In Section \ref{practical} we determine how large $N$ has to be to ensure that the second expression in \eqref{GhitzaBound} is indeed the larger of the two. The crucial ingredient in Section \ref{practical} is our generalisation (see Corollary \ref{dist}) of an explicit Linnik-type bound (see Theorem \ref{BS}) of Bach and Sorenson. We thank James Withers for several fruitful discussions and observations. We thank M. Ram Murty and David Loeffler for some useful comments. The first author was supported by the Elizabeth and Vernon Puzey scholarship, and is grateful towards the University of Melbourne for their hospitality while preparing this memoir. The second author was supported by Discovery Grant DP120101942 from the Australian Research Council. \section{Proof of Theorem \ref{main}} \label{proof} Since $p -1 \ge 4$ is even, we may use the (appropriately normalised) Eisenstein series of weight $p-1$, which is the modular form for $SL_2(\mathbb Z)$ given by \begin{equation} E_{p-1}(z) = 1- \frac{2p-2}{B_{p-1}} \sum_{n=1}^\infty \sigma_{p-2}(n) e^{2 \pi i nz}, \end{equation} where $B_{p-1}$ is the $(p-1)$st Bernoulli number (a rational number) and $\sigma_{p-2}(n) = \sum_{d|n} d^{p-2}$; see~\cite[Subsection 2.1.2]{Stein}. If $k_1 = k_2$ then the result follows immediately from Corollary \ref{BuzzardCor}, so henceforth assume that $k_1 < k_2$. Put $\ell = g^*(p,N)$. By standard formulae (see~\cite[Proposition 5.8.5]{Diamond}), \begin{equation}\label{hecke} \chi(\ell) \ell^{k_1-1} = a_\ell(f)^2 - a_{\ell^2}(f) \qquad \text{and} \qquad \chi(\ell) \ell^{k_2-1} = a_\ell(g)^2 - a_{\ell^2}(g). \end{equation} We may assume that $a_\ell(f) \equiv a_\ell(g) \mod \mathfrak p$ and $a_{\ell^2}(f) \equiv a_{\ell^2}(g) \mod \mathfrak p$, since otherwise the result is immediate. As $(\ell, N)=1$, it follows from~\eqref{hecke} that \begin{equation} \ell^{k_1} - \ell^{k_2} \in \mathfrak p \cap \mathbb Z = p\mathbb Z. \end{equation} As $\ell$ is a primitive root modulo $p$, this implies that $p-1$ divides $k_2 - k_1$, so put \begin{equation} r = \frac{k_2-k_1}{p-1}\end{equation} and $f^\prime = E_{p-1}^rf$. The von Staudt-Clausen theorem (see~\cite[Theorem 5.8.4]{BorevichShafarevich}) implies that $p$ divides the denominator of $B_{p-1}$, so \begin{equation} E_{p-1} \equiv 1 \mod p \end{equation} as power series. Now $f^\prime \equiv f \mod pR$, so $f^\prime \equiv f \mod \mathfrak p$. As $f^\prime$ is a modular form of weight $k_2$ and character $\chi$ for the congruence subgroup $\Gamma_0(N)$, the result now follows from Corollary \ref{BuzzardCor}. \section{Asymptotics} \label{asymptotics} We show that, of the two expressions in Theorem \ref{main}, the second is greater, providing that $N$ is sufficiently large. The key result in this section is: \begin{thm} \label{key} Let $p \ge 5$. Then \begin{equation} \limsup_{N \to \infty} \frac{g^*(p,N)}{\log N} = \frac{p-1}{\varphi(p-1)}. \end{equation} \end{thm} The group index $[SL_2(\mathbb Z):\Gamma_0(N)]$ is classically known (see~\cite[Exercise 1.2.3]{Diamond}): \begin{equation} \label{ShimuraFormula} [SL_2(\mathbb Z):\Gamma_0(N)] = N \prod_{\ell | N} \left(1+\frac{1}{\ell}\right). \end{equation} In particular $[SL_2(\mathbb Z):\Gamma_0(N)] \ge N$ which, upon proving Theorem \ref{key}, will verify the assertion made at the beginning of this section. We include the supremal asymptotics for $[SL_2(\mathbb Z):\Gamma_0(N)]$ purely for interest's sake (this is proved in a similar vein to Theorem \ref{key}): \begin{prop} \label{side} \begin{equation} \limsup_{N \to \infty} \frac{[SL_2(\mathbb Z):\Gamma_0(N)]}{N \log \log N} = \frac{6e^\gamma}{\pi^2}, \end{equation} where $\gamma$ is the Euler-Mascheroni constant. \end{prop} Our goal for the remainder of this section is to prove Theorem \ref{key}. For positive integers $t$, let $x_t$ be the $t$th smallest prime primitive root modulo $p$, and let $N_t = x_1 \cdots x_t$ (also put $N_0 =1$). The sequence $(N_t)$ is the worst case scenario: if $N$ is a positive integer then there exists $t \ge 0$ (defined by $g^*(p,N) = x_{t+1}$) such that $g^*(p,N_t) =g^*(p,N)$ and $N_t \le N$. Put \begin{equation} c = \frac{p-1}{\varphi(p-1)} > 1. \end{equation} We will establish Theorem \ref{key} via the following: \begin{prop} \label{intermediate} \begin{equation} \lim_{t \to \infty} \frac{x_t}{\log N_t} = c. \end{equation} \end{prop} This in turn is established by determining the asymptotics of the sequence $(x_t)$: \begin{lemma} \label{asymp} \begin{equation} x_t \sim ct \log t. \end{equation} \end{lemma} We require some basic results on asymptotic equivalence: \begin{lemma} \label{basic} \begin{enumerate}[(i)] \item Let $(a_t)$ and $(b_t)$ be sequences of positive real numbers. Assume that $a_t \sim b_t$ and that $b_t \to \infty$ as $t \to \infty$. Then $\log a_t \sim \log b_t$. \item Let $(a_t), (b_t), (c_t)$, and $(d_t)$ be sequences of positive real numbers such that $a_t \sim c_t$ and $b_t \sim d_t$. Then $a_t+b_t \sim c_t + d_t$. \end{enumerate} \end{lemma} Armed with these tools, we prove Lemma~\ref{asymp}, Proposition~\ref{intermediate}, and Theorem~\ref{key}. \begin{proof}[Proof of Lemma~\ref{asymp}] We interpret $t$ as the number of prime primitive roots modulo $p$ that are less than or equal to $x_t$. Each of these lies in one of the $\varphi(p-1)$ primitive root residue classes, so summing the prime number theorem for arithmetic progressions over these residue classes yields \begin{equation} \label{I} t \sim \frac{\varphi(p-1)}{p-1} \cdot \frac{x_t}{\log x_t},\end{equation} so \begin{equation} \label{II} \log t \sim \log \frac{\varphi(p-1)}{p-1} + \log x_t - \log \log x_t \sim \log x_t. \end{equation} Combining the equivalences \eqref{I} and \eqref{II} completes the proof. \end{proof} \begin{proof}[Proof of Proposition~\ref{intermediate}] Fix $\varepsilon \in (0,c-1)$, and choose (by Lemma \ref{asymp}) a positive integer $T$ such that if $t > T$ then \begin{equation} (c-\varepsilon)t \log t < x_t < (c+\varepsilon) t \log t. \end{equation} Consider $r > T$, and define \begin{equation} u_r = \log(x_t \cdots x_T) + (r-T) \log (c-\varepsilon) + \log \left(\prod_{t=T+1}^r t \right)+ \log \prod_{t=T+1}^r \log t \end{equation} and \begin{equation} v_r = \log(x_t \cdots x_T) + (r-T) \log (c+\varepsilon) + \log \left(\prod_{t=T+1}^r t \right)+ \log \prod_{t=T+1}^r \log t. \end{equation} Using Stirling's approximation and Lemma \ref{asymp}, \begin{equation} u_r \sim r \log (c-\varepsilon) + \log (r!) \sim r \log(c-\varepsilon) + r \log r \sim r \log r \sim \frac1c x_r, \end{equation} and similarly $v_r \sim \frac1c x_r$. Since $u_r < \log N_r < v_r$, the result now follows from the sandwich rule. \end{proof} \begin{proof}[Proof of Theorem~\ref{key}] By Lemma \ref{asymp} and Proposition \ref{intermediate}, \begin{equation} \frac{g^*(p,N_t)}{\log N_t} = \frac{x_{t+1}}{\log N_t} = \frac{x_{t+1}}{x_t} \cdot \frac{x_t}{\log N_t} \sim \frac{c(t+1) \log(t+1)}{ct \log t} \cdot \frac{x_t}{\log N_t} \to c, \end{equation} so it remains to show that $\limsup_{N \to \infty} \frac{g^*(p,N)}{\log N} \le c$. Fix $\varepsilon >0$. For each positive integer $N$, choose (by our `worst case scenario' property) $t_N \ge 0$ such that $g^*(p,N_{t_N}) = g^*(p,N)$ and $N_{t_N} \le N$. Choose a positive integer $C$ such that if $t \ge C$ then $\frac{g^*(p,N_t)}{\log N_t} \le c + \varepsilon$, define the real number \begin{equation} M = \sup_{t > 0} \frac{g^*(p,N_t)}{\log N_t}, \end{equation} and put \begin{equation} K = \exp \frac{M \log N_C}{c+\varepsilon}. \end{equation} Let $N \ge K$. If $t_N \ge C$ then \begin{equation} \frac{g^*(p,N)}{\log N} \le \frac{g^*(p,N_{t_N})}{\log N_{t_N}} \le c + \varepsilon, \end{equation} while if $t_N < C$ then \begin{align} \frac{g^*(p,N)}{\log N} &= \frac{\log N_{t_N}}{\log N} \cdot \frac{g^*(p,N_{t_N})}{\log N_{t_N}} < \frac{\log N_{C}}{\log K} \cdot \frac{g^*(p,N_{t_N})}{\log N_{t_N}} \\ &= \frac{c+\varepsilon}M \cdot \frac{g^*(p,N_{t_N})}{\log N_{t_N}} \le c+\varepsilon, \end{align} which completes the proof since $\varepsilon >0$ was chosen arbitrarily. \end{proof} \section{A practical comparison} \label{practical} We know from Section \ref{asymptotics} that, for sufficiently large $N$, \begin{equation} g^*(p,N)^2 \le \frac1{12} N \prod_{\ell |N} \left(1+\frac1\ell\right) \le \frac {k_2}{12} N \prod_{\ell |N} \left(1+\frac1\ell\right), \end{equation} in the context of Theorem \ref{main}. In this section we describe how large $N$ has to be, given $p$, to ensure that \begin{equation} \label{comp} 12g^*(p,N)^2 \le N \prod_{\ell | N} \left(1+\frac1\ell\right). \end{equation} Fix $p \ge 5$, and let $\hat N$ be minimal such that if $N \ge \hat N$ then the inequality \eqref{comp} holds. Our strategy will be to first establish a theoretical upper bound for $\hat N$, and then to determine $\hat N$ precisely using the software \emph{Sage}~\cite{Sage}. Our theoretical upper bound is $N_{r-1}$ in the following: \begin{thm} \label{theoretical} Assume GRH and let $p \ge 5$. Let $r = r(p)$ be minimal such that $N_{r-1} \ge 29.2032p^4(\log p)^4$, and suppose $N \ge N_{r-1}$. Then \begin{equation} N \ge 12g^*(p,N)^2,\end{equation} so in particular the inequality \eqref{comp} holds. \end{thm} To obtain this bound, we study the `worst case scenario' $N = N_{r-1}$. Our bound in this situation is: \begin{prop} \label{WorstCaseII} Assume GRH, let $p \ge 5$, and let $r$ be a positive integer such that \begin{equation} N_{r-1} \ge 29.2032p^4(\log p)^4.\end{equation} Then \begin{equation} \label{WorstComp} N_{r-1} \ge 12x_r^2.\end{equation} \end{prop} \subsection{The distribution of prime primitive roots modulo $p$, and more generally that of primes in arithmetic progression} In pursuit of Proposition \ref{WorstCaseII}, we study the distribution of prime primitive roots modulo $p$. Specifically, we seek an explicit lower bound for the counting function. As this task is of intrinsic interest, we now indulge in a discussion that goes slightly beyond what is strictly necessary for our purposes. For a more comprehensive review, see the introduction of~\cite{Bach}. Many of the results in this section can be generalised to composite moduli. There are two main approaches to our task: (i) break the problem into $\varphi(p-1)$ primitive root residue classes modulo $p$ and study the distribution of primes in arithmetic progression, or (ii) specifically use the primitive root property. The approach (ii) is currently superior for deriving upper bounds for the least prime primitive root modulo $p$, for instance (assuming the Riemann hypothesis for all Hecke characters) Shoup~\cite{Shoup} uses sieve methods to provide the upper bound \begin{equation} O(r^4 (\log r +1)^4 (\log p)^2), \end{equation} where $r$ is the number of distinct prime divisors of $p-1$; note the discussion following~\cite[Corollary 3.1]{Martin}. It is difficult to understand the distribution of such primes via the approach (ii), so we focus on (i). There are many classical asymptotic results, such as the prime number theorem for arithmetic progressions. For the least prime in an arithmetic progression $a \mod p$, where $p$ does not divide $a$, Linnik (see~\cite{Linnik1} and~\cite{Linnik2}) famously provided the upper bound \begin{equation} p^{O(1)},\end{equation} and the exponent can be 5.2 unconditionally, if the bound is multiplied by a constant (see~\cite{Xylouris}). Conditional results are much stronger, and the conjectured upper bound is $p^2$ (see~\cite{Heath-Brown}). Bach and Sorenson~\cite{Bach} derived an explicit version of Linnik's theorem: \begin{thm} [see {\cite[Theorem 5.3]{Bach}}] \label{BS} Assume GRH. Let $a$ and $q$ be relatively prime positive integers. Then there exists $\ell \equiv a \mod q$ such that \begin{equation} \ell < 2(q \log q)^2. \end{equation} \end{thm} \begin{proof}[Summary of their approach] For (Dirichlet) characters $\chi$ modulo $q$, real numbers $x >1$, and real numbers $\alpha$, put \begin{equation} S(x,\chi) = \sum_{n<x} \Lambda(n) \chi(n) (n/x)^\alpha \log (x/n), \end{equation} where $\Lambda$ is the von Mangoldt function. Let $a^{-1}$ denote the multiplicative inverse of $a$ modulo $q$. By orthogonality, \begin{equation} \label{BSkey1} \sum_{\chi \mod q} \chi(a^{-1})S(x,\chi) = \varphi(q) \sum_{\substack{n<x \\ n \equiv a \mod q}} \Lambda(n) (n/x)^\alpha \log (x/n).\end{equation} Suppose there exist no primes $\ell < x$ that are congruent to $a$ modulo $q$. Then \begin{equation} \sum_{\chi \mod q} \chi(a^{-1})S(x,\chi) = p(x), \end{equation} where $p(x)$ is the contribution of proper prime powers $n$ to the right hand side of equation \eqref{BSkey1}. For characters $\chi \mod q$, let $\hat \chi$ denote the primitive character induced by $\chi$. Then \begin{equation} \label{BSkey2} \Big | \sum_{\chi \mod q} \chi(a^{-1})S(x,\hat \chi) \Big | \le |i(x)|+ p(x), \end{equation} where \begin{equation} i(x) = \sum_{\chi \mod q} \chi(a^{-1}) (S(x,\hat \chi) - S(x,\chi)). \end{equation} In~\cite[Subsection 4.1]{Bach}, tools from algebraic number theory and analytic number theory are used to bound $|i(x)|$ from above. In~\cite[Subsection 4.2]{Bach}, complex integration is used to estimate $|\sum_{\chi \mod q} \chi(a^{-1})S(x,\hat \chi)|$. In~\cite[Subsection 4.3]{Bach}, known estimates for a certain arithmetic function provide an upper bound for $p(x)$. In~\cite[Subsection 5.2]{Bach}, the cases $q \ge 1000$ and $q < 1000$ are considered separately. In the first case computer programs are used to choose $x$ and $\alpha$ so that the inequality \eqref{BSkey2} is invalidated, thereby proving that some prime $\ell < x$ is congruent to $a$ modulo $q$; the second case is handled by brute force. \end{proof} If further details are sought then~\cite[special case (1) on p362]{Bach0} and the proof of~\cite[Corollary~3.4]{Bach} describe our specific context within~\cite{Bach}. Note that~\cite[Theorem 5.3]{Bach} assumes the generalisation of the Riemann hypothesis to all Hecke $L$-functions, whereas the statement of Theorem \ref{BS} merely assumes it for Dirichlet $L$-functions. The stronger assumption is necessary for the more general results in~\cite{Bach}, but only GRH is needed for~\cite[Theorem 5.3]{Bach}. To justify this claim we use the notation of~\cite[Subsection 4.2]{Bach}, where Bach and Sorenson use the assumption for $\zeta_E$ and $L(\cdot, \hat \chi)$. The latter is a Dirichlet $L$-function, since $K=\mathbb Q$ in our context, and the former is a product of Dirichlet $L$-functions (see~\cite[equation (2.2)]{Bach}), since for our purposes $E = \mathbb Q(\zeta_q)$ is an abelian extension of $K=\mathbb Q$, where $\zeta_q$ is a primitive $q$th root of unity. The constant 2 appears to have been chosen for simplicity. Following the proof of~\cite[Theorem~5.3]{Bach}, but not rounding up until the end, and insisting that $q > 2$, the constant 2 can be improved to 1.56: \begin{thm} \label{BS'}Assume GRH. Let $a$ and $q>2$ be relatively prime integers. Then there exists $\ell \equiv a \mod q$ such that \begin{equation} \ell < 1.56 (q \log q)^2. \end{equation} \end{thm} In fact the constant can be improved a little more (for $q>2$), but our theoretical bound for $\hat N$ will serve only as a ceiling for brute force computation, so we satisfy ourselves with the constant 1.56. \subsection{A generalisation of Theorem \ref{BS'}} We seek not the least prime in an arithmetic progression but the distribution of such primes, so we provide the following corollary: \begin{cor} \label{dist} Assume GRH. Let $a$ and $q>2$ be relatively prime integers, and let $t$ be a positive integer. Then \begin{equation} \label{distbound} \pi_{a,q}(1.56t^2 q^{2t} (\log q)^2) \ge q^{t-1}.\end{equation} \end{cor} \begin{proof} For each $s = 0,1,\ldots, q^{t-1}-1$, there exists $\ell \equiv a+sq \mod q^t$ such that \begin{equation} \ell \le 1.56t^2 q^{2t} (\log q)^2,\end{equation} by Theorem \ref{BS'}, since $(a+sq,q^t)=1$. These $\ell$ are distinct and congruent to $a$ modulo $q$. \end{proof} There are many ways in which to convert Corollary \ref{dist} into an explicit lower bound for $\pi_{a,q}(x)$ for all sufficiently large $x$; some are better asymptotically, while others do not require $x$ to be as large. Since our theoretical upper bound for $\hat N$ will serve merely as a ceiling for machine calculations, we have executed this fairly arbitrarily, and there may be other ways to improve our bound: \begin{lemma} \label{AP} Assume GRH. Let $a$ and $q \ge 5$ be relatively prime integers, and let \begin{equation} x \ge 6.24 q^4 (\log q)^2.\end{equation} Then \begin{equation} \pi_{a,q}(x) > x^{1/9}.\end{equation} \end{lemma} \begin{proof} Choose $t \ge 2$ such that \begin{equation} 1.56t^2q^{2t} (\log q)^2 \le x < 1.56(t+1)^2q^{2(t+1)}(\log q)^2. \end{equation} By Corollary \ref{dist}, \begin{equation} \pi_{a,q}(x) \ge \pi_{a,q}\left(1.56t^2 q^{2t} (\log q)^2\right) \ge q^{t-1}. \end{equation} Straightforward arithmetic confirms that $q^{t-1} > x^{1/9}$, completing the proof. \end{proof} By summing the bound \eqref{distbound} over the primitive root residue classes, we deduce: \begin{cor} \label{upper} Assume GRH, let $p>2$, and let $t$ be a positive integer. Then \begin{equation} x_{\varphi(p-1)p^{t-1}} \le 1.56t^2 p^{2t} (\log p)^2. \end{equation} \end{cor} \subsection{Completion of the proof of Theorem \ref{theoretical}} Now that we have an upper bound for the sequence $(x_r)$, we formulate a crude upper bound for the sequence $(N_r)$: \begin{lemma} \label{lower} Let $p \ge 5$. Then \begin{equation} N_{\varphi(p-1)} \ge (p+1)^{\varphi(p-1)/2}.\end{equation} \end{lemma} \begin{proof} Let $g_1, \ldots, g_{\varphi(p-1)}$ be integer representatives for the primitive root residue classes modulo $p$, with \begin{equation} 1< g_1 < g_2 < \ldots < g_{\varphi(p-1)} < p. \end{equation} These come in pairs of inverses modulo $p$, and no $g_i$ can pair with itself because its order modulo $p$ is $p-1 > 2$. The product of each pair is at least $p+1$, so \begin{equation} N_{\varphi(p-1)} = x_1 \cdots x_{\varphi(p-1)} \ge g_1 \cdots g_{\varphi(p-1)} \ge (p+1)^{\varphi(p-1)/2}. \end{equation} \end{proof} We show Proposition \ref{WorstCaseII} by first establishing a weaker bound: \begin{prop}\label{WorstCaseI} Assume GRH, let $p \ge 5$, and let $r$ be a positive integer such that \begin{equation} N_{r-1} \ge 467.2512p^8(\log p)^4.\end{equation} Then $N_{r-1} \ge 12 x_r^2$. \end{prop} \begin{proof} Proof by contradiction: assume that $N_{r-1} < 12 x_r^2$. Then \begin{equation} \label{first} x_r > 6.24 p^4 (\log p)^2, \end{equation} so Lemma \ref{AP} gives \begin{equation} \label{sumthis} \pi_{a,p}(x_r) > x_r^{1/9} \end{equation} for all integers $a$ that are not divisible by $p$. Since $r$ is the number of prime primitive roots modulo $p$ that are less than or equal to $x_r$, summing the inequality \eqref{sumthis} over all primitive root residue classes $a$ modulo $p$ yields \begin{equation} r> \varphi(p-1)x_r^{1/9}.\end{equation} Now \begin{equation} \label{second} N_{r-1} < 12 x_r^2 < 12 \left( \frac r {\varphi(p-1)} \right)^{18}. \end{equation} Specialising $t=2$ in Corollary \ref{upper} yields \begin{equation} x_{p\varphi(p-1)} \le 6.24p^4(\log p)^2,\end{equation} which together with the inequality \eqref{first} implies that $r > p \varphi(p-1)$. Induction shows that if $r>44$ then $N_{r-1} \ge 12(0.5r)^{18}$ (use the product of the first $r-1$ primes as a crude lower bound for $N_{r-1}$), which would contradict the inequality \eqref{second}. Hence $p \varphi(p-1) < r \le 44$, so $p=5,7$. In each of these cases $10 <r \le 44$ and $N_{10} > 12x_{44}^2$ (by computer check), completing the proof. \end{proof} Finally we prove Proposition~\ref{WorstCaseII} and Theorem~\ref{theoretical}. \begin{proof}[Proof of Proposition~\ref{WorstCaseII}] First assume that $p \ge 71$. In this case it is easy to show, by considering cases, that $\varphi(p-1) \ge 24$. Specialising $t=1$ in Corollary \ref{upper} yields \begin{equation} \label{t1} x_{\varphi(p-1)} \le 1.56p^2 (\log p)^2, \end{equation} so the result follows immediately if $r \le \varphi(p-1)$. However, if $r > \varphi(p-1)$ then, using Lemma~\ref{lower}, \begin{equation} N_{r-1} \ge N_{\varphi(p-1)} \ge (p+1)^{\varphi(p-1)/2} \ge (p+1)^{12} \ge 467.2512p^8 (\log p)^4, \end{equation} whereupon the result follows from Proposition \ref{WorstCaseI}. For each $p$ with $5 \le p < 71$, there are very few values of $r$ for which \begin{equation} 29.2032p^4(\log p)^4 \le N_{r-1} < 467.2512p^8(\log p)^4,\end{equation} so we computer check these cases and apply Proposition \ref{WorstCaseI} otherwise. \end{proof} \begin{proof}[Proof of Theorem~\ref{theoretical}] Let $g^*(p,N) = x_s$, so that $N \ge N_{s-1}$, and put $t = \max(r,s)$. Then \begin{equation} N_{t-1} \ge N_{r-1} \ge 29.2032 p^4 (\log p)^4 \end{equation} so, by Proposition \ref{WorstCaseII}, $N_{t-1} \ge 12 x_t^2$. Now \begin{equation} N \ge N_{t-1} \ge 12 x_t^2 \ge 12 x_s^2 = 12 g^*(p,N)^2.\end{equation} \end{proof} \subsection{Computation of $\hat N$ given $p$} Henceforth, let $r$ be as in Theorem \ref{theoretical}, and assume GRH. Now that we have a theoretical upper bound for $\hat N$, it is not too difficult to write a program that, given $p$, will compute $\hat N$ exactly. Still, it would be awfully slow to test the inequality \eqref{comp} for every $N < N_{r-1}$, so we shall describe an economising manoeuvre based on the following observation: \begin{lemma} \label{obs} Let $t$ be a positive integer, and suppose that $N \ge 12x_t^2$ is such that the inequality~\eqref{comp} does not hold. Then $N_t$ divides $N$. \end{lemma} \begin{proof} The hypotheses imply that $g^*(p,N) > x_t$, so $N_t$ divides $N$. \end{proof} So we only need to test the inequality \eqref{comp} for $N \le 12 x_1^2$ and for multiples of $N_t$ in the range \begin{equation} \label{range} \left[12x_t^2, 12x_{t+1}^2\right) \end{equation} ($t=1,2,\ldots,r-2$), since Lemma \ref{obs} and Theorem \ref{theoretical} imply that if $N \ge 12 x_{r-1}^2$ then the inequality \eqref{comp} holds. There is a reasonable upper bound \eqref{t1} for $x_{\varphi(p-1)}$, and hence for $x_1$, however in practice $x_1$ is very small. Moreover, for each $t$ there are very few (if any) multiples of $N_t$ in the range~\eqref{range}. Consequently, we have an extremely efficient method for determining $\hat N$ given $p$, and we could easily have done so for much larger $p$ than discussed below. By running the program we conclude as follows: \begin{prop} For $p \ge 5$, the inequality \eqref{comp} holds if $p < p^*$ and $N \ge N^*$ for the following pairs $(p^*,N^*)$: \begin{align*} &(4243,121424) \qquad &(2791,81550) \qquad &(691,48204) \qquad &(271,44158) \\ &(199,38858) \qquad &(151,24796) \qquad &(43,9049) \qquad &(19,5853). \end{align*} In particular, in any of these cases the bound in Theorem \ref{main} becomes \begin{equation} \frac{k_2}{12}N \prod_{\ell |N}\left(1+\frac1 \ell\right). \end{equation} \end{prop} These are best possible bounds for $\hat N$, since for each $p$ we computed $\hat N$ exactly. One might wonder why $\hat N$ is so large. Indeed $g^*(p,N)$ is typically very small, however there are some values (small multiples of the $N_t$) for which $g^*(p,N)$ is somewhat large, which can mean that the inequality \eqref{comp} suddenly fails. \section{The special cases $p=2$ and $p=3$} \label{2and3} As the considerations in this section are not crucial to the main point of the paper, we do not recall here the algebro-geometric definition of modular forms due to Deligne and Katz. The interested reader is invited to consult~\cite{Katz} or~\cite{Gross}. For any prime $p$, the Hasse invariant $A_p$ is a Katz modular form (mod $p$) of level one and weight $p-1$, with $q$-expansion \begin{equation*} A_p(q)=1. \end{equation*} As recalled in Section~\ref{proof}, if $p\geq 5$ then $A_p$ can be obtained as the reduction modulo $p$ of the Eisenstein series $E_{p-1}$. We say that $E_{p-1}$ is a lifting of $A_p$ to characteristic zero. If $p<5$, we can still lift $A_p$ to a form in characteristic zero, at the expense of increasing the level. We will use the following two results of Katz: \begin{thm}[see~{\cite[Theorem 1.7.1]{Katz}}] \label{katz1} Let $k$ and $N$ be positive integers such that either ($k=1$ and $3\leq N\leq 11$) or ($k\geq 2$ and $N\geq 3$). Let $p$ be a prime not dividing $N$. Then every modular form (mod $p$) of weight $k$ and level $\Gamma(N)$ can be lifted to characteristic zero. \end{thm} \begin{thm}[see~{\cite[Theorem 1.8.1]{Katz}}] \label{katz2} Let $k$ be a positive integer and let $p\neq 2$ be a prime. Every modular form (mod $p$) of weight $k$ and level $\Gamma(2)$ can be lifted to characteristic zero. \end{thm} \subsection{The case $p=3$} \begin{itemize} \item If $N$ is a power of $3$, we can use Theorem~\ref{katz2} to lift $A_3$ to $\tilde{A}_3$: \begin{equation*} \xymatrixcolsep{0pt} \xymatrix{ A_3 & \in & M_2(SL_2(\mathbb Z);\overline{\mathbb F}_3) & \subset & M_2(\Gamma(2);\overline{\mathbb F}_3) \ar@{~>}[d]\\ & & \tilde{A}_3 & \in & M_2(\Gamma(2);\overline{\mathbb Z}) & \subset & M_2(\Gamma_0(2),\text{triv};\overline{\mathbb Z}). } \end{equation*} Going through the proof in Section~\ref{proof} with $E_{p-1}$ replaced by $\tilde{A}_3$, we have $f^\prime=\tilde{A}_3^r f\in M_{k_2}(\Gamma_0(2N),\chi;\overline{\mathbb Z})$, so we must use the Sturm bound for $\Gamma_0(2N)$. Therefore the inequality in Theorem~\ref{main} must be replaced by \begin{equation} n \le \max\left\{ g^*(p,N)^2, \frac{k_2}{12}[SL_2(\mathbb Z):\Gamma_0(2N)] \right\} \end{equation} \item If $N$ is divisible by $2$, the same process as in the previous part gives us the lifting $\tilde{A}_3\in M_2(\Gamma_0(2),\text{triv};\overline{\mathbb Z})$. However, since $2$ divides $N$, we obtain the exact same inequality as in Theorem~\ref{main}. \item If $N$ is divisible by a prime $p_0\notin\{2, 3\}$, we can use Theorem~\ref{katz1} to lift \begin{equation*} \xymatrixcolsep{0pt} \xymatrix{ A_3 & \in & M_2(SL_2(\mathbb Z);\overline{\mathbb F}_3) & \subset & M_2(\Gamma(p_0);\overline{\mathbb F}_3) \ar@{~>}[d]\\ & & \tilde{A}_3 & \in & M_2(\Gamma(p_0);\overline{\mathbb Z}) & \subset & M_2(\Gamma_0(p_0),\text{triv};\overline{\mathbb Z}). } \end{equation*} Since $p_0$ divides $N$, we again obtain the same inequality as in Theorem~\ref{main}. \end{itemize} \subsection{The case $p=2$} \begin{itemize} \item If $N$ is not divisible by $5$, $7$ or $11$, use Theorem~\ref{katz2} to lift $A_2$ to $\tilde{A}_2$: \begin{equation*} \xymatrixcolsep{0pt} \xymatrix{ A_2 & \in & M_1(SL_2(\mathbb Z);\overline{\mathbb F}_2) & \subset & M_1(\Gamma(5);\overline{\mathbb F}_2) \ar@{~>}[d]\\ & & \tilde{A}_2 & \in & M_1(\Gamma(5);\overline{\mathbb Z}) & \subset & M_1(\Gamma_0(5),\text{triv};\overline{\mathbb Z}). } \end{equation*} The inequality in Theorem~\ref{main} must then be replaced by \begin{equation} n \le \max\left\{ g^*(p,N)^2, \frac{k_2}{12}[SL_2(\mathbb Z):\Gamma_0(5N)] \right\} \end{equation} \item If $N$ is divisible by $p_0\in\{5, 7, 11\}$, use Theorem~\ref{katz1} to lift \begin{equation*} \xymatrixcolsep{0pt} \xymatrix{ A_2 & \in & M_1(SL_2(\mathbb Z);\overline{\mathbb F}_2) & \subset & M_1(\Gamma(p_0);\overline{\mathbb F}_2) \ar@{~>}[d]\\ & & \tilde{A}_2 & \in & M_1(\Gamma(p_0);\overline{\mathbb Z}) & \subset & M_1(\Gamma_0(p_0),\text{triv};\overline{\mathbb Z}). } \end{equation*} Since $p_0$ divides $N$, we get the same inequality as in Theorem~\ref{main}. \end{itemize} We summarise our findings in Table~\ref{table:ineq}. \begin{table}[h] \begin{tabular}{lll} \toprule Prime & Level & Inequality in Theorem~\ref{main}\\ \midrule $p\geq 5$ & $N\geq 1$ \\ $p=3$ & $N\neq 3^a$, some $a$ & $n \le \max\left\{ g^*(p,N)^2, \frac{k_2}{12}[SL_2(\mathbb Z):\Gamma_0(N)] \right\}$ \\ $p=2$ & $N$ divisible by $5$, $7$ or $11$\\ \midrule $p=3$ & $N=3^a$, some $a$ & $n \le \max\left\{ g^*(p,N)^2, \frac{k_2}{12}[SL_2(\mathbb Z):\Gamma_0(2N)] \right\}$ \\ \midrule $p=2$ & $N$ not divisible by $5$, $7$ or $11$ & $n \le \max\left\{ g^*(p,N)^2, \frac{k_2}{12}[SL_2(\mathbb Z):\Gamma_0(5N)] \right\}$\\ \bottomrule \end{tabular} \caption{Inequalities obtained for the various combinations of $p$ and $N$} \label{table:ineq} \end{table}
1,108,101,565,887
arxiv
\section{Introduction} Trajectory tracking of nonlinear systems is a classic problem in control theory. Optimal control, as one of the approaches to solve this problem, has attracted some efforts throughout past several decades. The interested reader can refer to \cite{bryson1975applied,werbos2004adp,lewis2009reinforcement,saridis1979approximation,ccimen2008state,zhang2019near} for a concise introduction to more common nonlinear optimal control techniques. Since analytic solutions of nonlinear optimal control problems is not available, except for simple cases, seeking approximate solutions is a common practice. ADP technique \cite{werbos2004adp}, that is approximating cost function with a neural network and learning the optimal cost function in a backward manner (dynamic programming), is one of the widely used techniques among researchers. Optimal tracking problem have been studied for both continuous time and discrete time systems, but regardless of this, the pursued solutions can be categorized into two general frameworks: LQR\footnote{Linear quadratic regulator} extensions and ADP based approaches. In the first approach, the nonlinear plant is modeled as a linear system with time-varying matrices, and then techniques from linear optimal control theory are used. For instance, in \cite{lahdhiri1999design}, a feedback linearization is done on the nonlinear plant and then, a linear optimal problem is defined for the resultant feedback linearized system. In this approach, the object function is not directly related to the physical system and may not have physical realization. In \cite{ccimen2004nonlinear,jeyed2019development}, SDRE\footnote{State dependent Riccati equation} approach is used for input affine nonlinear systems. The main drawback of this method is that the proper choice of state-dependent quasilinear form plays an important role in the algorithm \cite{ccimen2008state}. Also in \cite{chou2004line}, a general nonlinear system is considered and error dynamics is estimated adaptively as a linear system. The optimal control then, is calculated based on the linear estimation. In the ADP based approaches, the total cost is approximated with a function approximator of appropriate form and then, this approximation is used in order to calculate optimal cost and optimal control. ADP based approaches can be categorized into two branches based on the objective function that they used. In the first one, the objective function for tracking is defined based on the error and the total control input of the system. Optimizing this cost function leads to optimality but the resulted controller is not locally asymptotically stable in general, this will be discussed later. For instance, in \cite{tang2007optimal}, the general nonlinear system is decomposed based on its linearization and residual terms, then the optimal control is calculated as a combination of linear and residual part. In \cite{mclain1999synthesis}, a finite horizon continuous time optimal tracking is considered and then, the optimal control is calculated by direct implementation of ADP. A discrete time version of finite horizon approximate optimal tracking can also be found in \cite{heydari2014fixed}, in this work the controller can accept different initial conditions of the same reference trajectory dynamics. Also in \cite{modares2018adaptive,kiumarsi2014actor,modares2014optimal}, states and reference trajectory are augmented in a new variable and the optimal problem is solved as a regulation. In these three work reinforcement learning is used to calculate the optimal control online. Moreover in \cite{khiabani2019design}, a discrete time optimal tracking controller is considered for a switching system and is solved by direct implementation of ADP. Another ADP based reinforcement learning is used in \cite{yang2011reinforcement} for tracking control of a class of discrete time nonlinear systems with unknown dynamics, however in this approach it is assumed that the input transition matrix is positive definite. For problems with bounded input, \cite{lyshevski1999optimal} has proposed an approach by adding a non-quadratic functional to the total cost. This approach is not done with ADP for tracking problems. However, in \cite{abu2005nearly} it is used along with ADP for a regulation problem. In the second category of ADP based approaches, the control input is decomposed into a steady state (that makes the error dynamic stationary at origin) and a transient part. Then, the control objective is defined based on the error and the transient control. This approach can be found in \cite{park1996optimal,zhang2008novel}. In \cite{zhang2008novel}, reference trajectory and error are augmented into new states and then the ADP is used to solve the resulted augmented regulation optimal problem. In \cite{dierks2009optimal}, an optimal control problem is defined for the transient control and then the total cost is calculated in an online manner by ADP. In this method knowledge of system dynamics is not necessary. In \cite{kamalapurkar2015approximate}, reinforcement learning is used to solve the optimal tracking control of a nonlinear system with unknown dynamics online. In these two works (\cite{dierks2009optimal,kamalapurkar2015approximate}) the optimal problem is solved for an augmented system, as in \cite{zhang2008novel}. Despite advances in the mentioned works, ADP based methods all share a common drawback. That is, the controller needs to be re-calculated for each particular reference trajectory. This issue also exists in LQR based techniques. This work is dedicated to solve the mentioned problem of ADP based approaches for a class of nonlinear systems. The proposed method uses the idea of control decomposition to eliminate trajectory dynamics from error dynamics, without eliminating systems dynamic matrices. The optimization is done based on using transient control in the objective function, which is called \textit{modified total cost}, in here. Effects of this decomposition on optimality and asymptotic stability of the closed loop system will be discussed, which, to the best of our knowledge, has not been done in the related literature yet. Furthermore, it will be shown that by optimizing expectation of modified total cost, instead of its exact value, there is no need to know the reference trajectory in the training stage. This change will lead to the main contribution of this paper, that is a near optimal asymptotically stabilizing tracking controller that does general tracking for a class of nonlinear systems. Finally, it will be shown that using optimal control based on expected value of modified total cost (instead of its exact value), does not hurt asymptotic stability of the closed loop system. The proposed controller is near optimal in three aspects. First, because of the form of steady state control that is used. Second, it optimizes expected value of modified total cost, instead of exact modified total cost of a reference trajectory. Third, it approximates this objective function which is the core of ADP. In what follows, first the problem is defined and the proposed method is explained. Then theoretical support for optimality, convergence, and asymptotic stability of the approach is presented. Finally, the method is implemented experimentally on a Delta parallel manipulator and its performance is shown in comparison to some standard nonlinear control techniques. \section{Problem statement and resolution} Let us define a tracking problem for a nonlinear system of the following form \begin{equation} x^{(p)} = f(X) + g(X)u, \label{eq:csys} \end{equation} where $x(t) \in \mathbb{R}^{m\times 1}$ is an $m\mbox{-}$vector\footnote{All vectors are column vectors.} of output of interest, $X = [x^\intercal, \cdots, {x^{(p-1)}}^\intercal]^\intercal \in \mathbb{R}^{(n\times 1)}$ is $n\mbox{-}$vector of states, and $u(t) \in \mathbb{R}^{m\times 1}$ is $m\mbox{-}$vector of control input. Also note that $n = m\times p$. Furthermore, $f(.): \mathbb{R}^{n\times1}\rightarrow \mathbb{R}^{m\times1}$ and $g(.): \mathbb{R}^{n\times1}\rightarrow \mathbb{R}^{m\times m}$ are functions representing dynamics of the system. Moreover $f(.)$ and $g(.)$ and their Jacobians are assumed to be continuous. Also, $x^{(i)}$ denotes the $i_{th}$ time derivative of $x$. The system is supposed to follow a particular reference trajectory (not known a priori), that is $X_d(t) = [x_d^\intercal, \cdots, {x_d^{(p-1)}}^\intercal]^\intercal \in D\subset \mathbb{R}^{n\times 1}$, with zero tracking error. Furthermore, assume that $\dot{X}_d$ exists and is continuous. Tracking error is defined as $E = [e^\intercal, \cdots, {e^{(p-1)}}^\intercal]^\intercal$ where $e = x-x_d$. As mentioned in the introduction, in some of the related literature, the optimal controller is designed to minimize the following cost function \begin{equation} J = \int_{0}^{\infty}exp(-\rho \tau)(E^\intercal QE+{u}^\intercal R{u})d\tau, \label{eq:ctotcost} \end{equation} where $Q \in \mathbb{R}^{n\times n},R \in \mathbb{R}^{m\times m}$, and $\rho \in \mathbb R_{\ge 0}$ are semi-positive definite error penalizing matrix, positive definite control penalizing matrix, and discount factor, respectively. The error dynamics of this tracking problem can be written in the following form \begin{equation}\label{eqn_cedyn} e^{(p)} = f(E+X_d)+g(E+X_d)u-x_d^{(p)}. \end{equation} This error dynamics can be also stated in state space from as \begin{equation}\label{eqn_csedyn} \dot{E} = F(E+X_d)+G(E+X_d)u-\dot{X}_d, \end{equation} where one has \begin{equation* F(E+X_d) = \begin{bmatrix} \dot{e} + \dot{x}_d\\ \vdots\\ e^{(p-1)} + x_d^{(p-1)}\\ \\ f(E+X_d) \end{bmatrix}, \end{equation*} \begin{equation* G(E+X_d) = \begin{bmatrix} 0_{(n-m) \times m}\\ g(E+X_d) \end{bmatrix}. \end{equation*} Note that the above dynamics is non-autonomous\footnote{The dynamic system has a direct dependence on time through $X_d$ and its time derivative.} and non-stationary\footnote{The equilibrium point of the dynamic system does not lie at origin.} at origin with respect to its states $E$. Even though error dynamics and objective function in the form of \cref{eqn_cedyn,eq:ctotcost} are commonly used, there are three disadvantages with formulating the problem in this way. First, the optimal tracking controller is not locally asymptotically stabilizing in general (see \cref{appendixexample}). The reason is that the reference trajectory, generally is not an invariant set of the system dynamics, which is needed for optimal control to be asymptotically stabilizing. This shows itself as a steady state error\footnote{This steady state error is because of the analytical construction of the controller, not from disturbance and\textbackslash or uncertainties.}. Second, the presence of discounting factor $\rho$ means that, just a limited part of horizon is important to the controller. This will lead to a higher steady state error and worsens the effects of the first problem. Third, the resulted optimal control can only follow\footnote{Assuming that the application tolerates the first and the second mentioned problems.} the reference trajectory that is solved for. This means that for new reference trajectories, the problem should be re-solved. These issues have motivated some authors to use a modified objective function and to revisit components of the error dynamics by decomposing the control input into \textit{steady} and \textit{transient} parts. However, the third issue is not solved in any of the ADP related literature yet, to the best of the authors' knowledge. Furthermore, interpretation of such control decomposition with respect to optimality is not done in the referenced works, to the best of our knowledge. In a tracking problem, the evolution of the system can be categorized in two phases: the transient and the steady state. This gives an idea of decomposing the control to a steady state control plus a correction term, when it is possible. For a system in the form of \cref{eq:csys}, the steady state control, that is $u_s$, can be defined to satisfy the following equation \begin{equation}\label{eqn_cpresscont} x_d^{(p)} = f(X_d)+g(E+X_d)u_s, \end{equation} this form of steady state control is used in \cite{dierks2009optimal} for a discrete-time system, and its main advantage over other forms in literature is that it eliminates trajectory dynamics from error dynamics of \cref{eqn_cedyn}. A controllable plant is assumed, therefore $g(E+X_d) = g(X)$ is invertible. Then $u_s$ can be calculated as \begin{equation}\label{eqn_csscont} u_s = g^{-1}(E+X_d)(x_d^{(p)}-f(X_d)). \end{equation} At any instance, if error equals zero, that is $E=0$, then applying $u_s$ leads to perfect tracking. The total control is the sum of steady state control, that is $u_s$, and a corrective term, that is $\Delta u$, so it is defined as \begin{equation} u = u_s + \Delta u. \label{eq:ctotcont} \end{equation} By substituting \cref{eq:ctotcont,eqn_csscont} in \cref{eqn_cedyn}, the error dynamics equation, (\cref{eqn_cedyn}) can be rewritten as \begin{equation}\label{eqn_cedynred} e^{(p)} = f(E+X_d)-f(X_d)+g(E+X_d)\Delta u, \end{equation} furthermore, this equation can be written in state space form as \begin{equation}\label{eqn_csedynred} \dot{E} = F(E+X_d)-F(X_d)+G(E+X_d)\Delta u. \end{equation} The above error dynamics is stationary at origin. Now, one can define modified total cost based on the corrective term $\Delta u$ (instead of total control $u$) as \begin{equation} V = \int_{0}^{\infty}(E^\intercal QE+{\Delta u}^\intercal R{\Delta u})d\tau, \label{eq:ctotcostred} \end{equation} where optimal transient control will be calculated by optimizing the above total cost. This cost function is commonly used in this category of solutions to the optimal tracking problem. Decomposing the control and redefining the total cost resolves the first mentioned problem. Assuming that the system is controllable, the above modified total cost is bounded. The reason is that $\Delta u$ vanishes as the transient phase finishes. This cost function minimizes the error and the corrective control term, so it brings the system to the steady state tracking phase asymptotically. The reason is that, by imposing the steady state control and optimizing modified total cost, the optimal problem is converted to an optimal regulation problem which is asymptotically stable (see for example \cite{lyashevskiy1995control} for asymptotic stability of optimal regulation problem). Furthermore, since the boundedness of modified total cost, that is \cref{eq:ctotcostred}, is achieved without introducing a discounting factor, there is no risk of associated steady state error. Therefore, the second issue is also resolved. For any particular reference trajectory in time, modified total cost, that is \cref{eq:ctotcostred}, only depends on the initial error, that is $E_0$. This is a key point in this analysis that also reduces dimensionality of the value function and therefore, mitigates curse of dimensionality further. However, the issue is that the trajectories are not known ahead of time. If one writes HJB\footnote{Hamilton- Jacobi- Bellman} equation for \cref{eq:ctotcostred} it can be seen (see \cref{Theoretical background}) that knowledge of trajectory is needed for calculating the modified optimal cost function. One solution to this issue is using expectation of total cost instead of its exact value for a specific trajectory in time. The reason is that one can consider the desired trajectory, that is $X_d(t)$, as a parameter with uniform distribution in ROI\footnote{Region of interest}. This leads to the main contribution of this paper. The expected value of modified total cost, that is \cref{eq:ctotcostred}, can be written in the following form, \begin{equation} \overline{V}(E_0) = \underset{X_d\in ROI}{\mathbb{E}} \left\{\int_{0}^{\infty}(E^\intercal QE+{\Delta u}^\intercal R{\Delta u})d\tau)\right\}, \label{eq:cstochtotcostred} \end{equation} where $\mathbb{E}$ denotes mathematical expected value. For every specific trajectory and the defined problem, optimal modified total cost exists uniquely \cite[pp. 284--291]{athans2013optimal} and is two times differentiable \cite{strulovici2015smoothness}. Therefore, its expected value (that is simply an average over all possible trajectories in the present case) also exists, is unique and is two times differentiable. By following the procedure of \cite[pp. 131--136]{bryson1975applied} and taking time derivative of \cref{eq:cstochtotcostred} and using the error dynamics from \cref{eqn_csedynred}, the non-optimal HJB equation can be derived as the following \begin{equation} \underset{X_d\in ROI}{\mathbb{E}}\left\{\overline{V}^\intercal_E(F-F_d+G\Delta u)+E^\intercal QE+\Delta u^{\intercal}R \Delta u \right\}= 0, \label{eq:ctotcostrecredhjb} \end{equation} where $\overline{V}_E = \frac{d\overline{V}(E)}{dE}$, $F = F(E+X_d) = F(X)$, $F_d = F(X_d)$, and $G = G(E+X_d) = G(X)$. Optimal transient control, that is $\Delta u^*$, is the minimizer of LHS\footnote{Left hand side} of \cref{eq:ctotcostrecredhjb}, and can be calculated as \begin{equation} {\Delta u}^* = -\frac{1}{2}R^{-1} G^\intercal \overline{V}_E^*, \label{eq:coptcontred} \end{equation} where $\overline{V}_E^*(E)$ is gradient of expectation of optimal modified total cost. There are several ways in the literature to solve the resulted HJB equation, including PI\footnote{Policy iteration} algorithm \cite{leake1967construction,saridis1979approximation}, integral PI \cite{beard1997galerkin}, integral VI\footnote{Value iteration} \cite{bian2016value}, projection technique \cite{kompas2010comparison}, perturbation method \cite{kompas2010comparison}, and parametric linear programming technique \cite{kompas2010comparison}. Among these methods integral VI is chosen. The reason is that it gives a good understanding of underlying theory and it does not need initial admissible policy as needed in other iterative methods based on PI. One can rewrite \cref{eq:cstochtotcostred} in the following form and apply Bellman principle of optimality \cite{Bellman:1957} as \begin{multline} \overline{V}^*(E(t)) =\underset{X_d\in ROI}{\mathbb{E}} \left\{\int_{t}^{t+\Delta T}(E^\intercal QE+{\Delta u^*}^\intercal R{\Delta u^*})d\tau) \right.\\ \left.+ \overline{V}^*(E(t+\Delta T))\right\}, \label{eq:cstochtotcostredrec} \end{multline} where $\Delta T \rightarrow 0$. This equation can be used in the so called integral value iteration to learn the expectation of modified optimal cost (in the other words, value function), from the following iterative procedure, \begin{multline} \overline{V}_{i+1}(E(t)) =\\ \underset{X_d\in ROI}{\mathbb{E}} \left\{\int_{t}^{t+\Delta T}(E^\intercal QE+{\Delta u_{i+1}^*}^\intercal R{\Delta u_{i+1}^*})d\tau) \right.\\\left.+ \overline{V}_i(E(t+\Delta T))\right\}, \label{eq:cstochvfredrec} \end{multline} where $\Delta u_{i+1}^*$ is defined as \begin{equation} {\Delta u}_{i+1}^* = -\frac{1}{2}R^{-1} G^\intercal \overline{V}^*_{i_E}. \label{eq:coptcontredrec} \end{equation} Note that because of using the expectation of total cost, there is no need for knowing the trajectory in training stage. This means that once the expectation of modified value function is calculated, it can be used to track every trajectory in the ROI. Therefore the third issue with existing ADP based methods is also solved for nonlinear systems with the dynamics given by \cref{eq:csys}. To calculate expected modified value function, and consequently optimal transient control, in a closed form, ADP \cite{werbos2004adp} is used here. To do this, value functions in \cref{eq:cstochtotcostredrec,eq:cstochvfredrec} are approximated with a linear (in weight) NN\footnote{Neural network} as \begin{equation} \overline{V}(E) = W^\intercal \varphi (E), \label{eq:cvfapp} \end{equation} \begin{equation} \overline{V}_i(E) = W_i^\intercal \varphi (E), \end{equation} where $\varphi (E)$ is the vector of basis functions. The procedure for training the neural network with integral value iteration can be summarized as: \begin{enumerate} \item[$a$.] Initialize some random values for $X$ and $X_d$ in the ROI. \item[$b$.] Calculate errors $E$, based on values generated in step $a$. \item[$c$.] Initialize \cref{eq:cstochvfredrec} with $\overline{V}_{0}(E) = 0$. \item[$d$.] For every $E$ and $X_d$ pair and some small constant value of $T$, repeat the following stages: \begin{enumerate} \item[1.] Calculate ${\Delta u}_{i+1}^*(E,X_d)$ from \cref{eq:coptcontred}. \item[2.] Calculate $\overline{V}_{i+1}(E)$ from \cref{eq:cstochvfredrec}. \end{enumerate} \item[$e$.] Use values from step $d$ to update weights from the least square method \cite[pp. 302--305]{boyd2004convex}. \item[$f$.] Calculate $\delta = max(\left|W_{i+1}-W_i\right|)$ and do one of the following stages: \begin{enumerate} \item[1.] If $\delta< threshold$, terminate the iterative procedure and set $W = W_{i+1}$ and go to step g. \item[2.] If $\delta \ge threshold$ then go to step d. \end{enumerate} \item[$g$.] The optimal corrective term can be calculated as $${\Delta u}^* = -\frac{1}{2}R^{-1} G^\intercal(E+X_d) W^\intercal \Delta \varphi(E),$$ where $\Delta \varphi(E) = \frac{\partial \varphi(E)}{\partial E}$ is gradient of $\varphi(E)$. \end{enumerate} \section{Theoretical background}\label{Theoretical background} \subsection{Optimality}\label{sec:optimality} In the presented approach, the total control is calculated as a combination of the steady state control and the optimal transient control. Considering this decomposition, one may ask about the optimality of the resulted total cost with respect to the original cost function, that is \cref{eq:ctotcost}. To make the analysis easier to grasp, we will investigate the case of a specific reference trajectory to avoid getting involved in the expected values of the total costs. Let us define a new steady state control $u_r$ and a discounted version of \cref{eq:ctotcostred} as \begin{equation}\label{eqn_crrcont} u_r = g^{-1}(X_d)(x_d^{(p)}-f(X_d)), \end{equation} \begin{equation} W(E) = \int_{0}^{\infty}exp(-\rho \tau)(E^\intercal QE+{\Delta u}^\intercal R{\Delta u})d\tau. \label{eq:cstochtotcostreddisc} \end{equation} By substituting total control based on $u_r$, that is $u = u_r + \Delta u$, in \cref{eq:ctotcost}, the total cost can be rewritten as \begin{multline} J(E) = \int_{0}^{\infty}exp(-\rho \tau)(E^\intercal QE + \Delta u^\intercal R \Delta u \\+ 2{\Delta u}^\intercal R{u_r} + u_r^\intercal R u_r)d\tau, \label{eq:ctotcostn1} \end{multline} or \begin{equation} J(E) = W(E) + \int_{0}^{\infty}exp(-\rho \tau)(2{\Delta u}^\intercal R{u_r} + u_r^\intercal R u_r)d\tau. \label{eq:ctotcostn2} \end{equation} By minimizing \cref{eq:ctotcostn1,eq:ctotcostn2} over $\Delta u$ and eliminating $J^*(E)$ among them one can write \begin{multline} \min_{\Delta u}\left\{W(E) + \int_{0}^{\infty}exp(-\rho \tau)(2{\Delta u}^\intercal R{u_r} + u_r^\intercal R u_r)d\tau\right\}\\= \min_{\Delta u}\biggl\{\int_{0}^{\infty}exp(-\rho \tau)(E^\intercal QE + \Delta u^\intercal R \Delta u \\+ 2{\Delta u}^\intercal R{u_r} + u_r^\intercal R u_r)d\tau\biggr\}. \label{eq:ctotcostn3} \end{multline} The minimization is on the same variable on both sides of \cref{eq:ctotcostn3}, so it can be simplified as \begin{equation} \begin{split} \min_{\Delta u}\left\{\right.&\left.W(E)\right\} = W^*(E) =\\& \min_{\Delta u}\left\{\int_{0}^{\infty}exp(-\rho \tau)(E^\intercal QE + \Delta u^\intercal R \Delta u)d\tau\right\}. \end{split} \label{eq:ctotcostn4} \end{equation} The process above means that optimizing $W(E)$ is equal to optimizing $J(E)$, while imposing $u_r$ on the system. Furthermore, as $\rho \rightarrow 0$, $W(E)\rightarrow V(E)$. This means that optimizing $V(E)$ is equivalent of optimizing $W(E)$ while imposing $u_r$ on the system and $\rho \rightarrow 0$. Since imposing $u_r$ on the error dynamics and finding optimal transient control, for a specific reference trajectory, transforms the optimal control problem to an equivalent optimal regulation problem, so it is asymptotically stable (see \cite{lyashevskiy1995control} for asymptotic stability of optimal regulation problems). Also since $u_r$ is the exact steady state control, the resulted total control is optimal among asymptotically stabilizing controllers, however it is not the absolute optimal control (that as discussed, see \cref{appendixexample}, is not generally asymptotically stabilizing). Furthermore, because $u_s$ (that is used in the proposed method) has a slight difference with $u_r$, the proposed method has a degree of sub-optimality. Therefore, the proposed method, for a specific reference trajectory, is a near optimal control among asymptotically stabilizing tracking controllers. The other point that should be investigated is about optimality of optimal transient control, that is \cref{eq:coptcontred}, with respect to expected value of modified total cost, that is \cref{eq:cstochtotcostred}. In the presented approach, the optimization problem is defined based on the expectation of modified total cost. Introducing the expectation in the equations, a legitimate question is the optimality of the selected control, that is \cref{eq:coptcontred}. To answer this, one needs to take derivative of LHS of \cref{eq:ctotcostrecredhjb} with respect to $\Delta u$ and solve the following equation for $\Delta u$ \begin{multline} \frac{\partial}{\partial \Delta u}\biggl\{\underset{X_d\in ROI}{\mathbb{E}}\Bigl\{\overline{V}^\intercal_E(F-F_d+G\Delta u)\\+E^\intercal QE+\Delta u^{\intercal} R\Delta u \Bigr\}\biggr\} = 0. \label{eq:ctotcostrechjbdif} \end{multline} In the above equation, since the expectation is over $X_d$, the derivative can be interchanged with the expectation and one has \begin{multline} \underset{X_d\in ROI}{\mathbb{E}} \biggl\{\frac{\partial}{\partial \Delta u}\Bigl\{\overline{V}^\intercal_E(F-F_d+G\Delta u)\\+E^\intercal QE+\Delta u^{\intercal} R\Delta u \Bigr\}\biggr\} = 0, \label{eq:ctotcostrechjbdifin} \end{multline} note that if the following holds \begin{equation} \begin{split} \frac{\partial}{\partial \Delta u}\left\{\overline{V}^\intercal_E(F-F_d+G\Delta u)+E^\intercal QE+\Delta u^{\intercal} R\Delta u \right\} = 0, \end{split} \label{eq:ctotcostrechjbdifinred} \end{equation} then also \cref{eq:ctotcostrechjbdifin} holds, therefore the answer to the above equation, which is well known to be in the form of \cref{eq:coptcontred}, is a solution to \cref{eq:ctotcostrechjbdif}. Consequently, \cref{eq:coptcontred} is a minimizer to \cref{eq:cstochtotcostred,eq:ctotcostrecredhjb}. Therefore, optimality of \cref{eq:coptcontred} with respect to the expectation of modified total cost is proved. \subsection{Convergence} Since integral value iteration of \cref{eq:cstochvfredrec,eq:coptcontredrec} is an iterative procedure, convergence of the iterations is a concern. This concern is investigated in this subsection while neglecting approximation error of \cref{eq:cvfapp}. The procedure is adopted from \cite{bian2016value} with some changes. Let us define the following transformations, for some positive $V(E)$ with $V(0) = 0$, as \begin{multline} T^{\Delta u}(V,X_d) = \int_{t}^{t+\Delta T}(E^\intercal QE+{\Delta u}^\intercal R{\Delta u})d\tau) \\+ V(E(t+\Delta T)), \label{eq:transdef} \end{multline} \begin{equation} \overline{T}^{\Delta u}(V) = \underset{X_d\in ROI}{\mathbb{E}}\left\{T^{\Delta u}(V,X_d)\right\}. \label{eq:etransdef} \end{equation} Also let $T(V,X_d)$ be defined as $T^{^{\Delta u^{\circ}}}(V,X_d)$, that is calculated with ${\Delta u}^{\circ} = -\frac{1}{2}R^-1G(E+X_d)^\intercal V_E$ which is the minimizer of right hand side of \cref{eq:transdef} (and also \cref{eq:etransdef}, in the same way explained in \cref{sec:optimality}). Furthermore, let $\overline{T}(V)$ be the expected value of $T(V,X_d)$. In this way one can write \cref{eq:cstochtotcostredrec,eq:cstochvfredrec} in the forms of $\overline{V}^* = \overline{T}(\overline{V}^*)$ and $\overline{V}_{i+1} = \overline{T}(\overline{V}_i)$, respectively. If for some $V_l$ and $V_k$ the inequality $V_l< V_k$ holds, then $T^{\Delta u}(V_l,X_d)< T^{\Delta u}(V_k,X_d)$ also holds for all $E, X_d \in ROI \text{ and } E\neq 0$. Therefore one can conclude that $\overline{T}^{\Delta u}(V_l)< \overline{T}^{\Delta u}(V_k)$. Consequently, by this assumption, one has $\overline{T}(V_l)\leq \overline{T}^{{\Delta u}^{\circ}_k}(V_l)< \overline{T}(V_k)$). If $\overline{V}_0 = 0$ holds, by investigating \cref{eq:cstochvfredrec} one can see that $\overline{V}_1> \overline{V}_0$. Also if one assumes $\overline{V}_{i+1}>\overline{V}_i$, then $\overline{V}_{i+2} = \overline{T}(\overline{V}_{i+1})>\overline{V}_{i+1} = \overline{T}(\overline{V}_i)$. Thus, if integral value iteration starts with $\overline{V}_0 = 0$, then by induction one has $\overline{V}_{i+1}>\overline{V}_i$ for all $E, X_d \in ROI \text{ and } E\neq 0$. Since a controllable plant is assumed, then there exist a non-optimal (in the sense of expectation of modified total cost) stabilizing control policy, that is $\Delta h$, whose expected modified total cost, that is $\overline{Z}_0(e)$, is greater than $\overline{V}_0$ and $\overline{V}^*$. Also note that one can write $\overline{Z}_0 = \overline{T}^{\Delta h}(\overline{Z}_0)$. Therefore $\overline{Z}_1 = \overline{T}(\overline{Z}_0)< \overline{Z}_0 = \overline{T}^{\Delta h}(\overline{Z}_0)$. Consequently one can conclude $\overline{Z}_{i+1}< \overline{Z}_i$, in the same manner used in the previous paragraph. Furthermore, since $\overline{V}^*<\overline{Z}_0$, then $\overline{V}^* = \overline{T}(\overline{V}^*)< \overline{Z}_1 = \overline{T}(\overline{Z}_0)$. By repeating this for $i$ times, one can conclude that $V^* < Z_i$ . Since $Z_i$ is a decreasing positive sequence\footnote{Elements of the sequence are positive.} that is lower bounded by $V^*$, it converges to this lower-bound. Moreover, since $\overline{V}_0<\overline{Z}_0$, by the same reasoning $\overline{V}_i<\overline{Z}_i$. Finally, assuming that $\overline{V}_0 = 0$, one can combine strictly monotonic convergence of $\overline{Z}_i$ to $\overline{V}^*$, strictly monotonic increase of $\overline{V}_i$, the inequality $\overline{V}_i<\overline{Z}_i$, and positiveness of these functions to conclude that $\overline{V}_i$ also converges to $\overline{V}^*$ as $i \rightarrow \infty$ for all $E, X_d \in ROI \text{ and } E\neq 0$. Furthermore, one has $\overline{V}_i(0) = \overline{Z}_i(0) = \overline{V}^*(0) = 0$ from their construction. As a result, convergence of the integral value iteration for the proposed method is guaranteed for all $E, X_d \in ROI$. \subsection{Stability} The most important aspect of a controller is its stability. We will show asymptotic stability of the proposed method through an appropriate Lyapunov function. Let us define the \textit{Hamiltonian}, for any differentiable function $V_l(E): \mathbb{R}^{n\times 1}\rightarrow \mathbb{R}$, as \begin{equation} H_{\Delta u}(V_l) := V_{l_E}^\intercal(F-F_r + G\Delta u) + E^\intercal QE + {\Delta u}^\intercal R\Delta u. \label{eq:hamiltonian} \end{equation} Furthermore, let $H(V_l)$ be optimal value of $H_{\Delta u}(V_l)$ with respect to $\Delta u$. Moreover, let us define $Z(E)$ as the modified total cost of an admissible control\footnote{In this work, admissible controls are limited to asymptotically stabilizing controllers.}, calculated from \cref{eq:ctotcostred} for a specific reference trajectory. Based on Lemma 1 of \cite{leake1967construction}, if the following inequality holds, for any $V_l$, \begin{equation} H(Z) \leq H(V_l), \label{eq:hamil1} \end{equation} then one has \begin{equation} V_l(E) \leq Z(E). \label{eq:hamil2} \end{equation} One can write optimal value of the modified total cost of a specific reference trajectory, that is calculated form \cref{eq:ctotcostred} (here called $S^*(E)$ for clarity of notation), as \begin{equation} S^*(E) = \overline{V}^*(E) + D^*(E). \label{eq:totdec} \end{equation} Furthermore, since $S^*$ is the solution of HJB equation, so one has \begin{equation} H(S^*) = {S^*_E}^\intercal(F-F_r + G\Delta u^*_{s}) + E^\intercal QE + {\Delta u^*_{s}}^\intercal R\Delta u^*_{s} = 0, \label{eq:hamilopt} \end{equation} where $\Delta u^*_{s} = \Delta u^* + \Delta u^*_{d} = -\frac{1}{2}R^{-1}G^\intercal (\bar{V}_E^* + D^*_E)$. This equation can be rewritten as \begin{multline} {D^*_E}^\intercal(F-F_r + G(\Delta u^* + \Delta u^*_{d}))\\ + E^\intercal QE + (\Delta u^* + \Delta u^*_{d})^\intercal R(\Delta u^* + \Delta u^*_{d})\\ + {\overline{V}^*_E}^\intercal(F-F_r + G(\Delta u^* + \Delta u^*_{d}))= 0. \label{eq:rehamilopt} \end{multline} For any specific reference trajectory, there could be three cases, based on $D^*(E)$. First, assume that $D^* = 0$. In this cases $\overline{V}^*(E) = V^*(E)$. So, by substituting \cref{eq:totdec} into \cref{eq:hamilopt} one has \begin{equation} \dot{\overline{V}}^* = {\overline{V}_E^*}^\intercal(F-F_r + G\Delta u^*) = - E^\intercal QE - {\Delta u^*}^\intercal R\Delta u^*. \label{eq:dl1} \end{equation} RHS of the above equation is negative definite (from definition of the problem). Therefore, when $D^* = 0$ and the system is controlled by the proposed controller, that is $\Delta u^*$ from \cref{eq:coptcontred}, $\dot{\overline{V}}^*$ is negative definite. Second, assume that $D^*(E)<0$ for $E\neq0$. In this case, negative definiteness of the $\dot{\overline{V}}^*$ under the proposed controller can be proved by contradiction. Assume that $\dot{\overline{V}}^*$ is negative except for some $E, X_d \in ROI \textit{ and } E\neq0$. Therefore one can write $\dot{\overline{V}}^* = {\overline{V}_E^*}^\intercal(F-F_r + G\Delta u^*)\geq0$ for some $E, X_d \in ROI \textit{ and } E\neq0$, which leads to $H(\overline{V}^*)\geq H(V^*) = 0$. In this way one can conclude that $\overline{V}^*\leq V^*$ (from Lemma 1 of \cite{leake1967construction}) for some $E, X_d \in ROI \textit{ and } E\neq0$. This is contradictory with the assumption of $D^*(E)<0$, so one cannot have $\dot{\overline{V}}^*\geq 0$ for some $E, X_d \in ROI \textit{ and } E\neq0$. From the same reasoning, one also cannot have $\dot{\overline{V}}^*\geq 0$ for all $E, X_d \in ROI \textit{ and } E\neq0$. Therefore, $\dot{\overline{V}}^*$ is negative definite under the proposed controller for $D^*(E)<0$, taking into account that $\dot{\overline{V}}^* = 0$ for $E = 0$ from its construction. Third, assume that $D^*(E)>0$ for $E\neq0$. In this way, One has $H(D^*)> H(S^*) = 0$ for all $E, X_d \in ROI , E\neq0$. The reason is that assuming $H(D^*)\leq H(S^*)$ holds for some $E, X_d \in ROI \textit{ and } E\neq0$, leads to $D^*\geq S^*$, which is contradictory to $S^* = \overline{V}^* + D^*$ in the current case. Also one has $H(D^*) = 0$ for all $X_d \in ROI , E=0$, by its definition. Note that one can write \begin{equation} {D^*_E}^\intercal G\Delta u^* = {\overline{V}^*_E}^\intercal G\Delta u^*_d = -2{\Delta u^*}^\intercal R\Delta u^*_d. \label{eq:aux1} \end{equation} By expanding \cref{eq:rehamilopt}, one can write \begin{multline} {D^*_E}^\intercal(F-F_r + G\Delta u^*_{d})+ {D^*_E}^\intercal G\Delta u^* + E^\intercal QE \\+ {\Delta u^*}^\intercal R\Delta u^* + 2{\Delta u^*}^\intercal R\Delta u^*_d +{\Delta u^*_{d}}^\intercal R\Delta u^*_{d}\\ + {\overline{V}^*_E}^\intercal G\Delta u^*_d + {\overline{V}^*_E}^\intercal(F-F_r + G\Delta u^*)= 0. \label{eq:rehamiloptexpand} \end{multline} By substituting \cref{eq:aux1} into \cref{eq:rehamiloptexpand} and combining the terms one has \begin{multline} {D^*_E}^\intercal(F-F_r + G\Delta u^*_{d}) + E^\intercal QE \\+ (\Delta u^* - \Delta u^*_{d})^\intercal R(\Delta u^* - \Delta u^*_{d})\\+ {\overline{V}^*_E}^\intercal(F-F_r + G\Delta u^*)= 0, \label{eq:aux2} \end{multline} furthermore, by minimizing first three terms in \cref{eq:aux2}, it can be written as \begin{multline} {D^*_E}^\intercal(F-F_r + G\Delta u^*_{d}) + E^\intercal QE \\+{\Delta u^*_{d}}^\intercal R\Delta u^*_{d}+ {\overline{V}^*_E}^\intercal(F-F_r + G\Delta u^*)\leq 0. \label{eq:aux3} \end{multline} First three terms of \cref{eq:aux3} are equal $H(D^*)$, by definition. By using $H(D^*)>0$ for $E\neq0$ (as per current case) in \cref{eq:aux3}, one will conclude that for all $X_d,E\in ROI, E\neq0$, \begin{equation} {\overline{V}^*_E}^\intercal(F-F_r + G\Delta u^*)< 0. \label{eq:aux4} \end{equation} Also one has ${\overline{V}^*_E}^\intercal(F-F_r + G\Delta u^*) = 0$ for $E=0$, by definition. As a result, $\dot{\overline{V}}^*$ is negative definite under the control $\Delta u^*$ for $D^*(E)>0$. $\overline{V}^*(E)$ is positive definite by its construction and as we proved $\dot{\overline{V}}^* = {\overline{V}_E^*}^\intercal(F-F_r + G\Delta u^*)$ is negative definite in the ROI. Therefore $\overline{V}^*(E)$ is a Lyapunov function for the system under control of $\Delta u^*$, that is from \cref{eq:coptcontred}. Therefore, the closed loop system, from the proposed method, is locally asymptotically stable. \section{An experimental case study} To show and compare the performance of the presented approach, an experimental study is done on a developed Delta parallel robot, as depicted in \cref{fig:delta}. \begin{figure}[H] \centering \includegraphics[width=.48\textwidth]{ADP.jpg} \caption{Delta robot used in the experiments}\label{fig:delta} \end{figure} Delta robot is a parallel manipulator with three transnational DOFs\footnote{Degrees of freedom} designed by Clavel \cite{clavelphdthesis}. The dynamic model of this robot can be presented in the following form \cite{tsai1999robot} \begin{equation} M(w,q)\ddot{w} + C(w,q,\dot{w},\dot{q})\dot{w} + G(w,q) = J(w,q)^\intercal \tau, \label{eq:deldyn} \end{equation} where $M,C,G,w,q$, and $\tau$ are mass matrix, Coriolis matrix, gravitational vector, workspace coordinate vector, joint space coordinate vector, and motor torques, respectively. Different methods have been used to control the Delta robot \cite{castaneda2014robust,paccot2009review,taghirad2013parallel,codourey1998dynamic}. In the present work, computed torque method,which is usually used as a benchmark in the related literature, and sliding mode control, which is suitable benchmark of robustness, are considered for comparison. The control law based on CT\footnote{Computed torque} can be written as \cite{taghirad2013parallel} \begin{equation} \tau = J^{-\intercal}\left(C\dot{w} + G + M(\ddot{w}_d-K_d\dot{\tilde{w}}-K_p\tilde{w} )\right), \label{eq:cct} \end{equation} where $w_d,K_d$, and $K_p$ are desired position, derivative gain, and proportional gain, respectively. $\tilde{w} = w-w_d$ is also position error. The control law for SMC\footnote{Sliding mode control} can also be written as \cite{slotine1991applied} \begin{equation} \tau = J^{-\intercal}\left(C\dot{w} + G + M(\ddot{w}_d-\lambda \dot{\tilde{w}}-Ksat(\frac{S}{\varphi}) )\right), \label{eq:csmc} \end{equation} where $S = \dot{\tilde{w}} + \lambda \tilde{w}$, $\lambda$, $K$, and $\varphi$ are sliding surface, sliding surface parameter, sliding mode controller gain, and boundary layer, respectively. Also $sat$ denotes saturation function. To train ADP based controller, $500$ sets of randomly generated data is used and the training is done $10$ times independently with least square method. Then weights of these 10 trainings are averaged and used for the experiments. Based on our experience, the averaged weights present good repeatability, whereas in each individual training different weights may be achieved (even if higher number of data is used for an individual training). To make three controllers comparable, they are tuned so that they have similar rise time. Also, experiments are done with $500 Hz$ sampling frequency. Furthermore, no friction compensation is done in experiments. For all experiments, actuators are saturated at $5 \, N.m$. Other parameters used in the tests can be found in~\cref{appendixparams}. \subsection{Results} Two scenarios are considered to compare the performance of the methods. First, the robot is supposed to draw a circle in $z$-plane, with $x = 250\, cos(\pi t)\, (mm)$ and $y = 250\, sin(\pi t)\, (mm) $. Second, the robot is supposed to go to two different locations sequentially, i.e., $\begin{bmatrix}100& 100& 450\end{bmatrix}^\intercal \,(mm)$ and \linebreak $\begin{bmatrix}-100& -100& 600\end{bmatrix}^\intercal \,(mm)$. Moreover, to compare robustness of the controllers, both scenarios are repeated by adding a $1kg$ mass as an uncertainty to the end effector. Also all experiments started from robot's home position at $\begin{bmatrix}0& 0& 500\end{bmatrix}^\intercal \,(mm)$. Video of the tests can be found in \cite{testvideo}. Results related to first scenario without uncertainty are summarized in \cref{fig:circtraj,fig:excirc,fig:eycirc,fig:ezcirc,table:ms}. \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{desiredcirc.pdf} \caption{First scenario desired trajectory}\label{fig:circtraj} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{errorxcirc.pdf} \caption{First scenario tracking error $e_x$: without uncertainty}\label{fig:excirc} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{errorycirc.pdf} \caption{First scenario tracking error $e_y$: without uncertainty}\label{fig:eycirc} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{errorzcirc.pdf} \caption{First scenario tracking error $e_z$: without uncertainty}\label{fig:ezcirc} \end{figure} \begin{table}[!tp] \centering \begin{tabular}{ l c c c } {} & x& y& z\\ \hline $\mu_{\left|e\right|}\,(mm)$ & & &\\ \hline\hline CT & $2.2119$ & $2.1916$ & $0.4873$\\ ADP & $1.6669$ & $1.7063$ & $0.2387$\\ SMC & $1.4699$ & $1.4020$ & $0.2214$\\\hline $\sigma_{\left|e\right|}\,(mm)$ & & &\\ \hline\hline CT & $1.1095$ & $1.1505$ & $0.2358$\\ ADP & $0.8907$ & $0.8686$ & $0.1751$\\ SMC & $0.7393$ & $0.7247$ & $0.0861$ \end{tabular} \caption{Mean and standard deviation of steady state $\left|e\right|$,\\first scenario without uncertainty}\label{table:ms} \end{table} Results related to second scenario without uncertainty are presented in \cref{fig:steptraj,fig:exstep,fig:eystep,fig:ezstep}. \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{desiredstep.pdf} \caption{Second scenario desired trajectory}\label{fig:steptraj} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{errorxstep.pdf} \caption{Second scenario tracking error $e_x$: without uncertainty}\label{fig:exstep} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{errorystep.pdf} \caption{Second scenario tracking error $e_y$: without uncertainty}\label{fig:eystep} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{errorzstep.pdf} \caption{Second scenario tracking error $e_z$: without uncertainty}\label{fig:ezstep} \end{figure} The difference between performance of the three methods (without the uncertain mass), as observed through \cref{fig:excirc,fig:eycirc,fig:ezcirc,fig:exstep,fig:eystep,fig:ezstep,table:ms}, is small. However, even these small differences are considerable in the context of robotic applications, given tight tolerances and high accuracy requirements. CT does the worst, both in step response and following a circle. The performance of the proposed method is very close to that of SMC. But except for the $y$ coordinate error of step test, SMC controller slightly does a better job. Results of experiments with a mass added as uncertainty are shown \cref{fig:excircu,fig:eycircu,fig:ezcircu,fig:tauxcircu,fig:tauycircu,fig:tauzcircu,table:msu,fig:exstepu,fig:eystepu,fig:ezstepu,fig:tauxstepu,fig:tauystepu,fig:tauzstepu}. \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{errorxcircu.pdf} \caption{First scenario tracking error $e_x$: with uncertainty}\label{fig:excircu} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{errorycircu.pdf} \caption{First scenario tracking error $e_y$: with uncertainty}\label{fig:eycircu} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{errorzcircu.pdf} \caption{First scenario tracking error $e_z$: with uncertainty}\label{fig:ezcircu} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{tauxcircu.pdf} \caption{First scenario torque $\tau_1$: with uncertainty}\label{fig:tauxcircu} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{tauycircu.pdf} \caption{First scenario torque $\tau_2$: with uncertainty}\label{fig:tauycircu} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{tauzcircu.pdf} \caption{First scenario torque $\tau_3$: with uncertainty}\label{fig:tauzcircu} \end{figure} \begin{table}[!tp] \centering \begin{tabular}{ l c c c } {} & x& y& z\\ \hline $\mu_{\left|e\right|}\,(mm)$ & & &\\ \hline\hline CT & $2.6833$ & $2.8572$ & $2.2906$\\ ADP & $1.7528$ & $1.8637$ & $1.2390$\\ SMC & $1.6172$ & $1.7284$ & $0.8945$\\\hline $\sigma_{\left|e\right|}\,(mm)$ & & &\\ \hline\hline CT & $1.4124$ & $1.5423$ & $0.55518$\\ ADP & $0.9451$ & $0.9336$ & $0.3820$\\ SMC & $0.8496$ & $0.8882$ & $0.2250$ \end{tabular} \caption{Mean and standard deviation of steady state $\left|e\right|$,\\first scenario with uncertainty}\label{table:msu} \end{table} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{errorxstepu.pdf} \caption{Second scenario tracking error $e_x$: with uncertainty}\label{fig:exstepu} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{errorystepu.pdf} \caption{Second scenario tracking error $e_y$: with uncertainty}\label{fig:eystepu} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{errorzstepu.pdf} \caption{Second scenario tracking error $e_z$: with uncertainty}\label{fig:ezstepu} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{tauxstepu.pdf} \caption{Second scenario torque $\tau_1$: with uncertainty}\label{fig:tauxstepu} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{tauystepu.pdf} \caption{Second scenario torque $\tau_2$: with uncertainty}\label{fig:tauystepu} \end{figure} \begin{figure}[!tp] \centering \includegraphics[width=.48\textwidth]{tauzstepu.pdf} \caption{Second scenario torque $\tau_3$: with uncertainty}\label{fig:tauzstepu} \end{figure} As it can be seen in \cref{fig:excircu,fig:eycircu,fig:ezcircu,fig:exstepu,fig:eystepu,fig:ezstepu,table:msu}, adding the uncertainty to the system increases steady state errors for all three methods. While one can say the robustness of SMC is higher than other two methods, the performance of the proposed methods still remains close to that of SMC. Moreover, the computed torque controller falls much behind in comparison to the other two methods, as expected. The cost comparison of the controllers is given for first scenario, with and without uncertainty in \cref{table:cost}. This cost is evaluated based on considering the total cost in the objective function, similar to \cref{eq:ctotcost}, for first five seconds of the experiments. As expected, for the case without uncertainty, the cost of the proposed approach is lower by at least $22\%$. In the case with uncertainty, the cost of proposed method is approximately $2\%$ higher than that of computed torque method. This is logical as the superior performance of the proposed method against uncertainty, needs more control effort. \begin{table}[!tp] \centering \begin{tabular}{ l c c c } {} & CT& ADP& SMC\\ \hline\hline Without uncertainty& $681.65$&$558.43$&$1180.49$\\ \hline With uncertainty& $828.62$&$851.45$&$1181.44$\\\hline\hline \end{tabular} \caption{Total cost for first scenario}\label{table:cost} \end{table} Despite advantages of the proposed optimal controller, that can be seen in experiments, like every other method it has its disadvantageous too. ADP, that is used to solve the nonlinear optimal control problem in a closed form, is a numerical procedure and it can have convergence problems in practice. To be more precise, depending on the approximation error of the neural network that is used, the ADP based algorithm may not converge for all values of $Q,R$, and sampling time. Therefore, the designer should consider this in controller design stage. The other disadvantage of ADP is about choosing basis functions. Even though some works has been done about this, there is no conclusive work yet. Generally as ROI gets bigger in dimension, finding basis functions that accurately interpolate the value function, gets much harder. Consequently the convergence of the algorithm will be affected, however normalizing the data might be helpful. The other issue is the so called curse of dimensionality \cite{Bellman:1957} in dynamic programming. Despite mitigation of this problem by implementing ADP, and also in the proposed method through reduction of value function parameters by introducing expectation of value function (expectation of value function depends only on $E$ instead of $E$ and $X_d$), the problem still exists. The other point to be mentioned is about chattering. This is usually considered a problem related to SMC. However, this phenomenon can also happen for the other two methods, because of control discontinuity resulted from digital implementation. In tuning all of the controllers, it was observed that there is an upper-bound on the aggressiveness of each of the controllers, that can be achieved without chattering. \section{Conclusion} In this paper, a new framework is introduced for optimal tracking problem of a class of nonlinear systems. In contrast to previous works on optimal control, the presented approach can track any trajectories (of course in the ROI) after one training. Also using expectation of total cost, number of parameters decreased. This mitigates curse of dimensionality. The presented method is then applied to a relatively complex nonlinear system and its performance is shown experimentally. The current work addressed asymptotic optimal tracking problem for nonlinear systems in canonical form. Future work can focus on the same problem for general input affine systems with well defined relative degree. \section{Acknowledgments} This research was partially supported by the United States National Science Foundation through Grant 1745212. \crefalias{section}{appendix} \section*{Appendix} \setcounter{section}{0} \renewcommand*{\theHsection}{Appendix.\the\value{section}} \renewcommand\thesubsection{\Alph{subsection}} \crefalias{subsection}{appendix} \subsection{}\label{appendixexample} Consider the following optimal tracking system that is defined based on exact total cost \begin{equation*} \dot{x} = x + u, \end{equation*} \begin{equation*} J(x_0) = \int_{0}^{\infty} exp(-\rho t)(q(x-r)^2 + u^2) dt, \end{equation*} where $\rho>0$ and $q = 1$. Let assume that the desired trajectory is $r(t) = 2$. In this case if the initial condition is $x_0 = 1$. Then the optimal control intuitively becomes $u^*(t) = -1$, this can be easily verified from the standard LQT solution. Applying the optimal control, the state time history becomes $x(t) = 1$. Therefore, the optimal tracking control based on exact total cost is not asymptotically stabilizing. This happens due to the fact that the reference trajectory is not within invariant sets of the system (this makes error dynamics non-stationary at origin), which is needed for asymptotic stability of the closed loop system under optimal control. To be more precise the optimal tracking controller based on the total cost can only asymptotically track reference trajectories that are among invariant sets of the system. This solution is acceptable in an economical optimization problem, however in control context, asymptotic stability is favored. To achieve general asymptotic stability from optimal control resulted from optimizing exact total cost one typically needs $q \rightarrow \infty$. \subsection{Experimental parameters}\label{appendixparams} Here, control parameters and geometrical and inertial parameters of the robot used in the experiments are given. \noindent \newline Computed torque parameters: \begin{equation*} \begin{array}{lr} K_p = 1600 & \text{proportional gain}\\ K_d = 100 & \text{derivative gain} \end{array} \end{equation*} \noindent \newline Proposed controller parameters: \begin{equation*} \begin{array}{ll} Q = D^{\intercal} D & \text{state penalizing matrix} \end{array} \end{equation*} \begin{equation*} \begin{array}{ll} \text{where} & D = \begin{bmatrix} 20 & 0 & 0 & 1 & 0 & 0\\ 0 & 20 & 0 & 0 & 1 & 0\\ 0 & 0 & 20 & 0 & 0 & 1 \end{bmatrix} \end{array} \end{equation*} \begin{equation*} \begin{array}{ll} R = 0.001I_{3 \times 3} & \text{control penalizing matrix}\\ \end{array} \end{equation*} \begin{equation*} \begin{array}{ll} \begin{split} \varphi(X) = \left[x_1x_2 \quad x_1x_3 \quad x_1x_4 \quad \dotso \right.\\ \qquad x_2x_3 \quad x_1x_5 \quad x_2x_4 \\ \qquad x_1x_6 \quad x_2x_5 \quad x_3x_4 \\ \qquad x_2x_6 \quad x_3x_5 \quad x_3x_6 \\ \qquad x_4x_5 \quad x_4x_6 \quad x_5x_6 \\ \qquad {x_1}^2 \quad {x_2}^2 \quad {x_3}^2 \\ \left. \qquad {x_4}^2 \quad {x_5}^2 \quad {x_6}^2 \right]^{\intercal}\end{split} & \text{basis function} \end{array} \end{equation*} where \begin{multline*} \begin{bmatrix} x_1 \quad x_2 \quad x_3 \end{bmatrix} = \begin{bmatrix} e_x \quad e_y \quad e_z \end{bmatrix} \text{\qquad and}\\ \begin{bmatrix} x_4 \quad x_5 \quad x_6 \end{bmatrix} = \begin{bmatrix} \dot{e}_x \quad \dot{e}_y \quad \dot{e}_z \end{bmatrix} \end{multline*} \begin{multline*} \begin{split} W = \left[0.0025 \quad -0.1939 \qquad 0.0330 \qquad \dotso \right.\\ \quad -0.2257 \qquad 0.0026 \quad -0.0009 \\ \qquad 0.0008 \qquad 0.0317 \quad -0.0026 \\ \qquad 0.0002 \quad -0.0055 \qquad 0.0507 \\ \quad -0.0001 \quad -0.0002 \quad -0.0002 \\ \qquad 1.8550 \qquad 1.8911 \qquad 1.9928 \\ \left. \qquad 0.0012 \qquad 0.0012 \qquad 0.0016 \right]^{\intercal}\end{split} \\ \text{optimal weights} \end{multline*} \noindent \newline Sliding mode parameters: \begin{equation*} \begin{array}{ll} K = 70 & \text{sliding mode gain}\\ \lambda = 20 & \text{sliding surface parameter}\\ \varphi = 0.35 & \text{boundary layer} \end{array} \end{equation*} Geometrical parameters: \begin{equation*} \begin{array}{ll} r_b = 0.2 \: m & \text{fixed platform radius}\\ r_a = 0.05 \: m & \text{moving platform radius}\\ \varphi_1 = \pi & \text{1st leg position angle}\\ \varphi_2 = -\frac{\pi}{3} & \text{2nd leg position angle}\\ \varphi_3 = \frac{\pi}{3} & \text{3rd leg position angle}\\ l_L = 0.2 \: m & \text{motor leg length}\\ r_L = 0.3933 & \text{motor leg COM lenght ratio}\\ l_K = .52 \: m & \text{intermediary leg length}\\ r_K = .5 & \text{intermediary leg COM length ratio}\\ R_b = \begin{bmatrix} 0 & r_b & 0 \end{bmatrix}^\intercal\\ R_a = \begin{bmatrix} 0 & r_a & 0 \end{bmatrix}^\intercal \end{array} . \end{equation*} Inertial parameters: \newline \begin{tabular}{ll} $m_P = 1.055 \: kg$ & moving platform mass\\ $I_{mo} = 0.0465475 \: kg.m^2$ & motor inertia\\ $m_L = 0.116 \: kg$ & motor leg mass\\ $m_K = 2\times 0.05788 \: kg$ & intermediary leg mass\\ $I_L = 6.4345319\times 10^{-4} \: kg.m^2$ & motor leg inertia\\ \multirow{ 3}{*}{$d_K = 0.08 \: m$} & intermediary leg distance\\ &from each other at every\\ & joint\\ \multirow{ 3}{*}{$I_K = 5.74769459 \times 10^{-3} \: kg.m^2$} & intermediary leg pairs\\ &dominant inertia\\ \end{tabular} \bibliographystyle{imaiai}
1,108,101,565,888
arxiv
\section{Introduction} Most of the matter in the universe exists in the plasma state and plasma also plays an important role in gas dynamics in astrophysics. When magnetic field are present, the dynamics of an electrically conducting plasma is sensitive to magnetic forces; as a result, magnetohydrodynamics(MHD) is used to understand the dynamical evolution of astrophysical fluids. The ideal limit of MHD poses a new class of problems in dissipative processes. In ideal hydrodynamics, irreversible processes, such as shock waves and vorticity reconnection, occur at dynamical speeds, independent of microscopic viscosity parameters. Weak solutions describe these irreversible discontinuous solutions of the Euler equations. While smooth flows conserve entropy and vorticity, the infinitesimal discontinuity surfaces generate entropy and reconnect vorticity. This can also be understood as a limiting case starting with finite viscosity, where these surfaces have a finite width. With magnetic fields, a more dramatic problem emerges. If two opposing field lines sit nearby, a state of higher entropy can be reached by reconnecting the field lines, and converting their magnetic energy into fluid entropy. In the presence of resistivity, this process occurs on a resistive time scale for some relevant scale. This exaggerates the problem somewhat. Extensive theoretical research on magnetic reconnection(\cite{2000mrp..book.....B}, \cite{2000mare.book.....P}) has shown that scales intermediate between the size of a system and resistive scales can be important. Nevertheless, in many astrophysical settings, simple models for reconnection give time scales that are very long, and reconnection is observed or inferred to occur on much shorter time scales, e.g. for solar flares, more than $10^{10}$ times faster than the theory\cite{1991JGR....96.9399D}. This has led to the suggestion that magnetic reconnection in the limit of vanishing resistivity might also go to a weak (discontinuous) solution, occuring at a finite speed which is insensitive to the value of the resistivity. The problem is best illustrated by the Sweet-Parker configuration (\cite{1958IAUS....6..123S}, \cite{1957JGR....62..509P}), where opposing magnetic fields interact in a thin current sheet, the reconnection layer. This unmagnetized layer becomes a barrier to further reconnection. In a finite reconnection region, fluid can escape the reconnection region at alfvenic speeds. Because the reconnection region is thin, the reconnection speed is reduced from the alfven speed by a factor of the ratio of the current sheet width to the transverse system size. In the Sweet-Park model this factor is the inverse of the square root of the Lundquist number ($V_A L/\eta$). The predicted sheet widths are typically extremely thin. Petschek proposed a fast magnetic reconnection solution (\cite{1964NASSP..50..425P}) based on the idea that magnetic reconnection happens in a much smaller diffusive region, called the X-point, instead of a thin sheet. The global structure is determined by the log of the Lundquist number, and stationary shocks allow the fluid to convert magnetic energy to entropy. However, Biskamp's simulations (\cite{1986mrt..conf...19B}) showed that Petschek's solution is unstable when Ohmic resistivity becomes very small. In their two dimensional incompressible resistive MHD simulations, they injected and ejected plasma and magnetic flux across the boundary. They also changed the boundary condition during the simulation to eliminate the boundary current layer. However, considering the current sheet formed in their simulation, the computation domain may not be big enough. After reproducing different scaling simulations results(\cite{1986mrt..conf...19B}, \cite{1986JGR....91.6807L}), Priest and Forbes \cite{1992JGR....9716757P} pointed out that it is the boundary conditions that determine what happens (including Biskamp's unstable Petscheck's simulation) and that sufficiently free boundary conditions can make fast reconnection happen. However, there is no self-consistent simulation of fast reconnection reported, except with artificially enhanced local resistivity\cite{1989JGR....94.8805S}. To reconcile the observed fast reconnection with its absence in simulations leads to two possible resolutions: 1) ideal MHD are not the correct equations, and long range collisionless effects are required, or 2) assumptions about the reconnection regions are too restrictive. This includes the 2-dimensionality and the boundary conditions. In exploring of the first possibility, it was found that when integrating with the Hall term in the MHD equations, or using a kinetic description(\cite{2001JGR...106.3715B}), it was possible to find fast reconnection. However, this still didn't offer any help to the collisional system, which still has fast magnetic reconnection no matter whether Hall term is present or not; and also the increase of local resistivity is not generic in astrophysical environments, which mostly has highly conducting fluids. For the second possibility, we note that Lazarian \& Vishniac (LV99) \cite{1999ApJ...517..700L} proposed a model of fast magnetic reconnection with low amplitude turbulence. Subsequent simulation results \cite{2009arXiv0903.2052K} support this model. They found that the reconnection rate depends on the amplitude of the fluctuations and the injection scale, and that Ohmic resistivity and anomalous resistivity do not affect the reconnection rate. The result that only the characteristics of turbulence determine the reconnection speed provides a good fit for reconnection in astrophysical systems. LV99 offered a solution to fast magnetic reconnection in collisional systems with turbulence. In this paper, we consider a different problem, whether we could still have fast reconnection without turbulence. We present an example of fast magnetic reconnection in ideal three dimensional MHD simulation in the absence of turbulence. Here we explore a different aspect: 3-D effects and boundary conditions. Traditionally, simulations have searched for stationary 2-D solutions, or scaling solutions. In the case of fast reconnection, the geometry changes on an alfvenic time, so these assumptions might not be applicable. Specifically, we bypass the choice of boundary condition by using a periodic box. The primary constructive fast reconnection solution, the Petscheck solution, has some peculiar aspects. The global geometry of the flow, and the reconnection speed, depend on the details of a microscopic X-point. This X-point actually interacts infinitesimal matter and energy, so it seems rather surprising that this tiny volume could affect the global flow. Instead, one might worry about the global flow of the system, which dominates the energy. We will see that this is particularly important in our simulations. \section{Simulation setup} \subsection{Physical setup} The purpose of the simulation is to study magnetic reconnection and its dynamics. We start by dividing the volume in two, with each subvolume containing a uniform magnetic field. In a periodic volume, this results in two current sheets where reconnection can occur. An initial perturbation is added to trigger the reconnection. \subsection{Computational implementation} Our simulations were performed on the Canadian Institute for Theoretical Astrophysics Sunnyvale cluster: 200 Dell PE1950 compute nodes; each node contains 2 quad core Intel(R) Xeon(R) E5310 @ 1.60GHz processors, 4GB of RAM, and 2 gigE network interfaces. The code \cite{2003ApJS..149..447P} is a second-order accurate (in space and time) high-resolution total variation diminishing (TVD) MHD parallel code. Kinetic, thermal, and magnetic energy are conserved and the divergence of the magnetic field was kept zero by flux constrained transport. There is no explicit magnetic and viscous dissipation in the code. The TVD constraints result in non-linear viscosity and resistivity on the grid scale. \subsection{Numerical setup} We have a reference setup, and vary numerical parameters relative to that. Initially the upper and lower halves of the simulation volume are filled with uniform magnetic fields whose directions differ by 135 degrees (Figure \ref{figure:setup}). The magnitude of the magnetic field is the same for every cell, and $\beta$, the ratio of gas pressure to magnetic pressure, is set to one. There is a rotational perturbation on the interface of the magnetic field, at the center of the box, inside a sphere of radius 0.05, relative to the box size. The rotational axis is nearly along the X axis, with a small deviation, which is used to break any residual symmetry. We use constant specific angular momentum at the equator, with solid body rotation on shells, which comes from the same initial condition generator as \cite{2003ApJ...596L.207P}. The rotational speed is set to equal to the sound speed at a radius of 0.02, and 0.4 sound speed at the sphere's equatorial surface We also tried adding a localized magnetic field perturbation: a random Gaussian magnetic field, with ($\beta=1$) and correlation length is half of the box, was added in the same region as the rotational perturbation. Since the only dissipation is numerical, on the grid scale, a translational velocity \cite{2004NewA....9..443T} was added to the simulation to increase the numerical diffusion for all the cells in the box. The reference value of the translational velocity is equal to the sound speed and we measure the time (unit in CT) by box size divided by the initial sound speed. Varying this by a factor of 2 up or down does not change the results. At the beginning the Alfven speed is the same as the sound speed. Different resolutions were tested, from $50^3$ cells to $800^3$ cells. \begin{figure} \centering \includegraphics[scale=0.3]{setup.eps} \caption{numerical setup: the sphere in the center of the box represent the area of the rotational perturbation. up-left is the rotational perturbation looked from YZ plane.} \label{figure:setup} \end{figure} \begin{figure} \centering \includegraphics[scale=0.4]{log10B2_vs_time_why_FRC.eps} \caption{fast reconnection for different initial conditions} \label{figure:why_FRC} \end{figure} \begin{figure} \centering \includegraphics[scale=0.4]{log10B2_vs_time_reso.eps} \caption{fast reconnection for different resolutions} \label{figure:reso_FRC} \end{figure} \begin{figure} \centering \includegraphics[scale=0.45]{100cells_rough_current_42.eps} \caption{2D snapshot during reconnection. current as background color} \label{figure:100cells_rough_FRC} \end{figure} \section{Simulation results} \subsection{Global fast magnetic reconnection} We use the total magnetic energy as a global diagnostic of the system. Figure \ref{figure:why_FRC} shows the evolution of the magnetic energy. The generic feature is the sudden drop of magnetic energy, which occurs on an alfvenic box crossing time, during which much of the magnetic energy is dissipated. The onset of this event depends on numerical parameters. Due to symmetries in the code, an absence of any initial perturbations would maintain the initial conditions indefinitely. We can see that when there is no forced diffusion and no initial perturbation, the magnetic energy is almost stationary. When diffusion is added, the magnetic energy decays gradually throughout the simulation. When explicit velocity perturbations are present, all the simulations show a sudden decrease of magnetic energy, which indicates fast magnetic reconnection. The common property is that they all have some initial perturbation, either rotational or a strong localized field perturbation; and the background diffusion only affects how early reconnection happens. In order to make sure this fast reconnection is not related to resolution, we simulate different resolutions, from a $50^3$ box, to a $800^3$ box, in Figure \ref{figure:reso_FRC}. All show fast reconnection and the resolution only affects the time elapsed before fast reconnection happens, though the details of how the delay depends on resolution are still unclear. Figure \ref{figure:100cells_rough_FRC} shows a rough two dimensional snapshot of current($\propto\bigtriangledown\times{B}$) during fast reconnection, with color representing the current magnitude. It is clear that there are some regions that have reconnection(i.e. high current value) and we will use higher resolution to analyze them later. So, how fast is reconnection here? Since the magnetic energy is $\sim$ 50\% at the onset of fast reconnection, the Alfven time is also $\sim$ CT and in the simulation, we find that nearly 50\% of magnetic energy was released in one Alfven time during magnetic reconnection. This is clearly fast reconnection by any reasonable criteria. \subsection{What happens on the current sheet?} We can see there are some regions that have large currents, and the reconnection should happen there. Now we use high resolution (e.g. 800 cells) to investigate what exactly happens there. We want to show a snapshot close to the current sheet to see how flow evolves and what the magnetic field geometry looks like near the current sheet. We subtract the average value for both magnetic field and velocity in the region close to the current sheet. This places us in the frame comoving with the fluid. The mean magnetic field does not participate in the dynamics of reconnection, so its removal allows us to see the dynamics more clearly. We present snapshots of three different times during the reconnection: one at the beginning, one at the middle and one at the end. Each time step snapshot contains three graphs, with the upper left one has current magnitude as background color and white line represents magnetic field line, and the lower left one is a snapshot of both magnetic field (blue dash) and velocity field (red solid), and the right one is the corresponding magnetic energy plot. Figure \ref{figure:and1_800_185} is the beginning; Figure \ref{figure:and1_800_195} is the middle; Figure \ref{figure:and1_800_205} is the end; It is easy to find that the snapshot of both magnetic field line and velocity field line in figure \ref{figure:and1_800_185} looks like Figure \ref{figure:petscheck} \cite{1964NASSP..50..425P}, which is the geometry of Petschek's solution for fast magnetic reconnection. The X-point, which is the reconnection region, is small and at the center. The tangent of the angle $\alpha$ represent the ratio of inflow to outflow. \begin{figure} \centering \includegraphics[scale=0.45]{petscheck.eps} \caption{geometry of Petscheck solution} \label{figure:petscheck} \end{figure} \begin{sidewaysfigure} \centering \includegraphics[scale=1.2]{and1_800_185.eps} \caption{snapshot of magnetic field line on the background of current, and snapshot of both magnetic and velocity field line, and $B^2$ at 37 CT} \label{figure:and1_800_185} \end{sidewaysfigure} \begin{sidewaysfigure} \centering \includegraphics[scale=1.2]{and1_800_195.eps} \caption{snapshot of magnetic field line on the background of current, and snapshot of both magnetic and velocity field line, and $B^2$ at 39 CT} \label{figure:and1_800_195} \end{sidewaysfigure} \begin{sidewaysfigure} \centering \includegraphics[scale=1.2]{and1_800_205.eps} \caption{snapshot of magnetic field line on the background of current, and snapshot of both magnetic and velocity field line, and $B^2$ at 41 CT} \label{figure:and1_800_205} \end{sidewaysfigure} \subsection{What happens globally?} We show the long term and global 2D evolution of both velocity field lines and magnetic fields for the $400^3$ simulations, starting from the beginning until reconnection completes. These plots are analogous to the plots in the previous section: the left one is the snapshot of both magnetic and velocity field lines; the center one is the snapshot of magnetic field lines with current as the background color; and the corresponding magnetic energy is also included on the right. At the beginning, the magnetic field lines are opposite and there is no velocity field. Then the initial rotational perturbation induces two reconnected regions with closed magnetic field loops, one at each interface. The closed loops are fed by a slow X-point at each interface. Noting that there is a mean field perpendicular to the plotted surface, these loops are actually twists in the perpendicular magnetic field. In the bulk region between the interfaces, the parallel magnetic fields are not yet disturbed much by the perturbation. In Figure \ref{figure:and1_400_38} we can see the loops to move into the X-point of the opposing loop, and strong interactions occur. The fluid forms two large circular cells, offset from the magnetic loops. The energy to drive the fluid flow comes from the reconnection energy of the magnetic field. This flow pattern enhances the reconnection by driving the fluid through the X point. We illustrate the fast reconnection flows in Figure \ref{figure:global_c}. Blue dash circles with arrows represent the magnetic loops. The red field lines with arrows represent the velocity field. There are two big black X's in the global frame, which represent the X point for reconnection. Because we are using periodic boundary condition, we extend the simulation box picture to two other directions, to make the global flow easier to understand. Red solid lines represent the velocity field in the real box, and red dash-dot line represents the field line in the extended box. Reconnection is a local process in the global flow field. To see that, we need to boost into the comoving frame. Let's take the right magnetic twist for example: In global frame, the flow on the right all moves downwards, with the magnetic twist moving at the highest speed. The X-point is like a saddle point for the flow: the fluid converges vertically, and diverges horizontally. In the X-point frame, setting the velocity at B to zero, A will move down and C will move up, which supports the conditions for reconnection. \begin{figure} \centering \includegraphics[scale=0.5]{global_configuration.eps} \caption{geometry of global configuration} \label{figure:global_c} \end{figure} \begin{sidewaysfigure} \includegraphics[scale=1]{and1_400_0.eps} \caption{snapshot of magnetic field line on the background of current, and snapshot of both magnetic and velocity field line, and $B^2$ at 0 CT for 400 cells} \label{figure:and1_400_0} \end{sidewaysfigure} \begin{sidewaysfigure} \includegraphics[scale=1]{and1_400_10.eps} \caption{snapshot of magnetic field line on the background of current, and snapshot of both magnetic and velocity field line, and $B^2$ at 10 CT for 400 cells} \label{figure:and1_400_10} \end{sidewaysfigure} \begin{sidewaysfigure} \includegraphics[scale=1]{and1_400_38.eps} \caption{snapshot of magnetic field line on the background of current, and snapshot of both magnetic and velocity field line, and $B^2$ at 38 CT for 400 cells} \label{figure:and1_400_38} \end{sidewaysfigure} \begin{sidewaysfigure} \includegraphics[scale=1]{and1_400_40.eps} \caption{snapshot of magnetic field line on the background of current, and snapshot of both magnetic and velocity field line, and $B^2$ at 40 CT for 400 cells} \label{figure:and1_400_40} \end{sidewaysfigure} \begin{sidewaysfigure} \includegraphics[scale=1]{and1_400_42.eps} \caption{snapshot of magnetic field line on the background of current, and snapshot of both magnetic and velocity field line, and $B^2$ at 42 CT for 400 cells} \label{figure:and1_400_42} \end{sidewaysfigure} \section{Discussion} To summarize, we have found a global flow pattern which reinforces X-point reconnection, and the resulting fast reconnection in turn drives the global flow pattern. The basic picture is two dimensional. We did find that a pure 2-D simulation does not show this fast reconnection. This is easy to understand, since the reconnected field loops are loaded with matter, and would require resistivity to dissipate. In 3-D, these loops are twists which are unstable to a range of instabilities, allowing the field loops to collapse. So three basic ingredients are needed: 1. A global flow which keeps the field lines outside the X-point at a large opening angle to allow the reconnected fluid to escape, and avoid the Sweet-Parker time scale. 2. The reconnection energy drives this global flow 3. A three dimensional instability allows closed (reconnected) field lines to collapse, releasing all the energy stored in the field. The problem described here has two geometric dimensionless parameters: the 2 axis ratios of the periodic box. In addition, there are a number of numerical parameters. We have varied them to study their effects. Extending the box in the Y direction (separation between reconnection regions) shuts off this instability, which might be expected: there are no global flows possible if the two interaction regions are too far separated. We found the threshold to be $Y < 1.2Z$. In the other direction, there appears to be no limit to make $Y << Z$. Increasing the size of the Z dimension does not diminish this instability. There is also a dependence on X (extend along field symmetry axis). Shortening it to one grid cell protects the topology of field loops, and reconnection is not observed in 2-D simulations. We changed different initial condition to see whether the fast reconnection is sensitive to how the initial setup is. After changing the angle of the opposite magnetic field(from beyond 90 degree to 180 degree), the strength of the rotational perturbation, and axis of the rotational perturbation, we found that the fast reconnection still appeared. The boundary condition is kept periodic and we found that the evolution of fluid dynamics of different initial conditions are similar. It can be seen that the fast reconnection happens at the two interfaces of the straight magnetic field at the same time, with a magnetic twist moving towards it on each side. They are not head-on collision on the magnetic field, but a little separated in transverse direction. This special geometry helps the magnetic reconnection happen fast, since each magnetic twist pushes the field line, it also affect the velocity field at the other side and it helps to increase the outflow speed. If we look back to Sweet-Parker's solution(\cite{1958IAUS....6..123S}, \cite{1957JGR....62..509P}), the main problem is that the current sheet is so thin, that even if one accelerates the outflow to Alfven speed, the mass of outflow is still small, which slows down the speed of the reconnection. Petschek's configuration\cite{1964NASSP..50..425P} can resolve this problem with a small reconnection region and finite opening angle for the outflow. In our simulation the speed of the outflow is further increased by the feedback between the two reconnection regions. The solar flare reconnection time scale is about Alfven time scale\cite{1991JGR....96.9399D}, which is the order of seconds to minutes. If there is only magnetic diffusivity($\eta$) present, the diffusive time is $\tau_{D}=L^2/\eta$, with $L$ is the characteristic length. Taking the values from \cite{1991JGR....96.9399D}, $L=1000km$ and $\eta$ is $10^{-3}{m^2}s^{-1}$, $\tau_{D}$ is $10^{15}s$. Sweet-Parker's thin current sheet proposed a reconnection time as $\tau_{SP}=L/(V_{Ai}/R_{mi}^{1/2})$, with $R_{mi}=L\upsilon_{Ai}/\eta$. This makes the reconnection time about $10^5$ Alfven times. Petschek's configuration has a reconnection time as $\tau_{P}=L/(\alpha\upsilon_{A})$, with $\alpha$ is between 0.01 and 0.1 and Alfven speed $\sim 100km/s$, and this makes the time scale as $100-1000s$. Our fast reconnection time has the order of Alfven time scale, and Alfven time $\tau_{A}=L/\upsilon_{A}$, which is the same order as observed time scales of $20-60s$ \cite{1991JGR....96.9399D}. Furthermore, comparing to LV99, no turbulence is needed or added in our simulations. Our fast magnetic reconnection time scale is qualitatively similar to the energy release time scale for solar flares. \section{Summary} We present evidence for fast magnetic reconnection in a global three dimensional ideal magnetohydrodynamics simulation without any sustained external driving. These global simulations are self-contained, and do not rely on specified boundary conditions. We have quantified ranges in parameter space where fast reconnection is generic. The reconnection is Petscheck-like, and fast, meaning that nearly half of the magnetic energy is released in one Alfven time. This example of fast reconnection example relies on two interacting reconnection regions in a periodic box. It is an intrinsically three dimensional effect. Our interpretation is that the Petschek-like X-point angles are not determined by microscopic properties at an infinitesimal boundary where no energy is present, but rather by the global flow far away from the X-point. Whether or not such configurations are natural in an open system remains to be seen. \begin{acknowledgments} We would like to thank Christopher D. Matzner for helpful comments. The computations were performed on CITA's Sunnyvale clusters which are funded by the Canada Foundation for Innovation, the Ontario Innovation Trust, and the Ontario Research Fund. The work of ETV and UP is supported by the National Science and Engineering Research Council of Canada. \end{acknowledgments}
1,108,101,565,889
arxiv
\section{Introduction} \label{sec:introduction} In information technology, random number generators (RNGs) refer in general to devices that output numbers distributed in a certain range uniformly. If one wishes to use them for information security purposes in particular, their outputs must be secret \cite{Shannon1949}, in addition to being uniformly distributed. Furthermore, in order for the RNG to be usable by anyone, these properties need to be guaranteed by some objective evidence. Suppose, for instance, that one buys a dice from a not-necessarily-reliable vendor and throws it alone in a closed room. For this process to generate a uniform distribution, he must be sure with an evidence that the dice is fair. As for the secrecy, another evidence is necessary to ensure that the outputs are unpredictable and unknown to outside; {\it e.g.}, even to the vendor or the manufacturer who had all the chances to tamper with the dice such that the outputs follow a certain pattern. But how can one find an objective basis of secrecy that anyone can agree with? Arguably, the most convincing basis of secrecy would be the laws of nature, that is, if nature assures the secrecy by law, then nothing can be utilized to predict the outputs. This is precisely what we will adopt when we ensure the secrecy of our novel RNG proposed in this paper, under a reasonable set of assumptions which can in practice be verified without much difficulty. In what follows, if the output of a given RNG is rigorously proven to be secret, we call it a {\it secure} RNG. Throughout the paper we focus on secure RNGs. The formal definition of the security, the so-called {\it universally composable security} \cite{BHLMO05}, will be given in Section \ref{sec:definition_security_LHL}; this is the most strict definition known at the present. The secure RNG based on the laws of quantum mechanics is indeed possible \cite{Acin2016,Ma2016,Herrero-Collantes2017,Bierhorst2018,Stefanov2000,Rarity1994,Dynes2008,Ma2005a,Nie2014,Wayne2009,Wahl2011,Yan2014,Ren2011,Applegate2015,Furst2010}. For example, RNGs using photons have been studied for a long time, and some of them have been strictly proven to be secure. A common method of the single photon RNG is to use two complementary bases $+, \times$ of the polarization: The legitimate user (henceforth, Alice) generates a single photon state having a polarization in one basis, say, the vertical polarization state $\ket{\updownarrow}$ belonging to basis $+$, and then measures it in the other, diagonally slanted $\times$ basis. Alice adopts the measurement result as the random bits. The major concern here is that the vendor of the light source may be an eavesdropper (henceforth, Eve). In such a case, Eve could have tampered with the source to retain correlation with her own device, and may have access to the random bits as a result. The security against such eavesdropper can still be argued as follows. Being a pure state, the initial state $\ket{\updownarrow}$ cannot be entangled with outside, and thus has no correlation with Eve's device. When the state is measured in the complementary basis $\times$, each measurement result, \rotatebox[origin=c]{45}{$\updownarrow$} or \rotatebox[origin=c]{-45}{$\updownarrow$}, occurs exactly with probability one half. Thus the random bits are distributed uniformly, and they are uncorrelated with Eve. Unfortunately, the single photon RNGs have practical disadvantages because the energy of the photon is minute and, accordingly, the detector must be highly sensitive. For this reason, the single photon RNGs suffer constraints for reduction both in their size and cost. Besides single photon RNGs, there is another type of RNG methods which also exploit quantum phenomena, {\it i.e.}, those using radiations from nuclear decays \cite{Walker2001,Alkassar2005}. In these radioactive RNG methods one detects radiations and adopts the timings of the detections as random numbers. These methods were already studied half a century ago \cite{doi:10.1063/1.1658698}, and is actually older than the single photon RNG mentioned above. The advantage of the radioactive RNG is that their device can be made smaller and simpler than that of single photon RNGs. A sufficient sensitivity to the radiation can be achieved even with a small detector, since the energies of radiations are much larger than those of photons. Indeed, radioactive RNG chips of a few square millimeters are already manufactured \cite{quantaglionWeb,quantaglionPatent}. \begin{figure}[ht] \begin{center} \includegraphics[bb=0 30 310 310, width=0.3\linewidth]{fig1.pdf} \caption{The device setup for the radioactive random number generator (radioactive RNG) consists of a radiation source and a detector. We will denote the detector by $D$. With this setup, Alice (the legitimate user) obtains raw data $\vec{i}$ by executing the procedure, steps (i), (ii) of Section \ref{sec:RNG_method}. } \label{fig-actual-setup} \end{center} \end{figure} However, to the best of our knowledge, there is no security proof of the radioactive RNG, despite that it has been shown that they can generate a uniform distribution \cite{doi:10.1063/1.1658698}. We find this dissatisfying, even though the concept of the composable security, which is essential for the proof, is relatively new \cite{BHLMO05}. Here we present a new method of the radioactive RNG which admits a rigorous security proof. The required security is ensured by the parity (space inversion) symmetry arising in the device, which is available generically for a nuclide which decays by parity-conserving interactions. The device structure is as simple as before, consisting only of a radiation source and a detector. The only difference is the two conditions newly imposed on the device -- which are readily realized in practice -- which allow us to make use of the parity symmetry for ensuring security; see conditions (a) and (b) mentioned below. The outline of our security proof is as follows. On one hand, in the actual implementation, we use detection timings as the origin of randomness. On the other hand, in the security analysis, we instead analyze the absence/presence (denoted by $z_i=0,1$) of detection in each time bin $i$, since they are merely two different formats of the same measurement results (Fig.\,\ref{fig-correspondence1}). Then by temporarily limiting ourselves to an ideal situation (Section \ref{sec:ideal_situation} and Fig.\,\ref{fig-virtual-setup} (B)), we show that variables $z_i$ correspond to measuring the direction, up or down, of the radiation (Fig.\,\ref{fig-virtual-setup} (C)). Hence measuring a parity symmetric radiation in this setting means measuring a parity invariant state using a pair of projectors which interchange to each other under parity operation. Then values $z_i=0,1$ occur with an equal probability, and in addition, the resulting (sub-normalized) states on Eve's side remain fixed, irrespective of $z_i$; {\it i.e.}, Eve can gain no information of $z_i$ by any measurement. The security in non-ideal situations can also be shown by an essentially the same argument (Section \ref{sec:realistic_situations} and Fig.\,\ref{fig-virtual-setup} (A), (D)). \section{Main result} \subsection{RNG method} \label{sec:RNG_method} We consider the following type of the radioactive RNG method. By using a device consisting of a radiation source and a detector $D$ (Fig.\,\ref{fig-actual-setup}), Alice executes the following procedure (Fig.\,\ref{fig-randomness-extraction}): Alice chooses integer parameters $N$ and $n_{\rm fin}$ such that they satisfy $0\le n_{\rm fin}\le N$. She also selects a function $f_s$ randomly from a predetermined set of functions ${\cal F}=\{f_s\}$, each of which outputs an $n_{\rm fin}$ bit string. Then she repeats the following steps. \begin{description} \item[Radioactive RNG] \begin{itemize} \item[(i)] {\bf Measurement of decay timings:} Alice measures radiations from the source, using detector $D$, in time bins $i=1,\dots,N$. She then records the measurement result as the list of time bins where a detection occurred; {\it i.e.} as $\vec{i}=(i_1,\dots,i_{n_{\rm det}})$, with $n_{\rm det}$ being the number of detections, and $i_j$ being in the increasing order, $1\le i_1<i_2<\cdots<i_{n_{\rm det}}\le N$. If there was no detection, she lets $\vec{i}=(0)$, {\it i.e.}, $n_{\rm det}=1$, $i_1=0$. \item[(ii)]\label{step-PA} {\bf Randomness extraction:} Alice calculates the final bits $\vec{r}=f_s(\vec{i})$ of length $n_{\rm fin}$. \end{itemize} \end{description} The purpose of each step is as follows (Fig.\,\ref{fig-randomness-extraction}). Step (i) generates raw data $\vec{i}$ to be used as the source of the final bits $\vec{r}$. For $\vec{r}$ to be secure, not all, but a certain fraction of $\vec{i}$ need to be unknown to Eve. The standard theoretical results say that the size of this unknown fraction equals a quantity called the {\it smooth conditional min-entropy} $H_{\rm min}^{\delta}(\vec{I}|E)$, which is a function of the joint state $\rho_{\vec{I}E}$ of variable $\vec{i}$ and Eve (see Section \ref{sec:definition_security_LHL} and Ref. \cite{RennerPhD} for the rigorous definitions). In step (ii) she extracts these $H^\delta_{\rm min}(\vec{I}|E)$ bits that are unknown, and generate $\vec{r}$, which is completely unknown to Eve (Section \ref{sec:definition_security_LHL} and Ref. \cite{RennerPhD}). We denote the width of one time bin by $\Delta t$. In order to simplify later presentations, without loss of generality, we assume that in every time bin, Alice starts her measurement at the beginning of the time bin and finishes it in a finite time $\le \Delta t$. \begin{figure}[t] \begin{center} \includegraphics[bb=0 0 960 500, width=\linewidth]{fig_RE.pdf} \caption{ The purpose of randomness extraction is to extract from a measurement result $\vec{i}$, which may be partially known to Eve, a random bits $\vec{r}$ completely unknown to Eve. In the above picture, $\vec{i}$ being partially known to Eve is expressed by its being a mixture of black (unknown) and white (known) elements. The number of unknown bits equals the smooth conditional min-entropy $H_{\rm min}^\delta(\vec{I}|E)$, a function of $\rho_{\vec{I}E}$. } \label{fig-randomness-extraction} \end{center} \end{figure} \subsection{Conditions on the device} \label{sec:assumptions} Hence the security analysis is reduced to lower bounding $H^\delta_{\rm min}(\vec{I}|E)$. We are concerned with the possibility that the radiation source to be measured in step (i) may be entangled with Eve, and through that entanglement Eve may access $\vec{i}$; {\it i.e.}, $H^\delta_{\rm min}(\vec{I}|E)$ may become too small to guarantee the security of $\vec{r}$ (cf. 2nd and 7th paragraphs of Section \ref{sec:introduction}). The goal of this paper is to nullify such eavesdropping strategy by making use of the parity symmetry. \subsubsection{Statement of conditions} \label{sec:statement_conditions} To this end, we assume the following three conditions on the device. The first two of them, (a) and (b), in particular, are introduced in order to realize the parity symmetry in the device. \begin{itemize} \item[(a)] {\bf Radiation source:} At the beginning of each time bin ({\it i.e.}, immediately before Alice's measurement), the state of radiations is parity invariant. \item[(b)] {\bf Detector:} Detector $D$ is housed within one hemisphere around the source (Fig.\,\ref{fig-virtual-setup} (A)). \item[(c)] {\bf Effect on radiations by measurements:} Effect on radiations in the vicinity of $D$, caused by Alice's measurement of a time bin $i$, is washed away by the beginning of the next time bin $i+1$. \end{itemize} In addition, we introduce the following notions for later convenience. \begin{itemize} \item[(d)] {\bf Detections, multi-particle emissions and dark counts:} Except with probability $\delta$, there are at least $n_{\rm thr}$ detections, at most $n_{\rm multi}$ time bins where multiple particles are emitted, and at most $n_{\rm dark}$ time bins where dark counts occur. \end{itemize} The statement of condition (a) requires some explanation, which we give now. Let $\mathcal{H}_A$ be the Hilbert space describing radiated particles in the vicinity of detector $D$. Also, let ${\cal H}_E$ be that describing all degree of freedom of Eve (cf. Fig.\,\ref{fig-randomness-extraction}). We assume that in $\mathcal{H}_A$ the parity (space inversion) operator $P_A$ is well defined and satisfies $P_A^2=1$. (Throughout the paper, we use the convention of omitting the identity operators included in a tensor product; hence {\it e.g.} $P_A$ is an abbreviation of $P_A\otimes 1_{E}$.) Under this setup, we say that the joint state $\rho_{AE}(t)$ of ${\cal H}_{A}$ and ${\cal H}_{E}$ at time $t$ is parity invariant, if it satisfies \begin{equation} P_A\rho_{AE}(t)P_A = \rho_{AE}(t). \label{eq:parity_invariant_state} \end{equation} Condition (a) says that the parity invariance (\ref{eq:parity_invariant_state}) holds at the beginning of each time bin, {\it i.e.} at $t=0,\Delta t, \dots,(N-1)\Delta t$. \subsubsection{Feasibility of the conditions} \label{sec:justification} Next we discuss the feasibility of the conditions above. First, condition (a) can basically be satisfied by choosing a nuclide which decays by parity-conserving interactions ({\it e.g.} strong and electromagnetic interactions, as in the $\alpha$- and the $\gamma$-decays), since such sources will always emit radiations with a constant parity eigenvalue. However, as we deal here with an RNG, we must be aware of one scenario where such choice may not be sufficient for guaranteeing condition (a). That is, the nuclide could have been tampered with by Eve, before purchased by Alice (cf. the second and seventh paragraphs of Section \ref{sec:introduction}), to the extent of destroying the parity invariance. We point out that, even in such scenario, Alice can still verify condition (a) by performing a test on the source at hand, prior to executing the radioactive RNG. E.g., she measures the radiation from the source and checks if the results, such as the energy spectrum and the angular distribution, are always consistent with condition (a). If this verification succeeds she then executes the radioactive RNG; otherwise she aborts. Second, condition (b) can always be verified visually. Third, condition (c) is a pure assumption. However, this assumption is in fact implicit in most literature of quantum key distribution and physical random number generators (including the single photon RNG mentioned in Introduction). Finally, condition (d) can be verified by statistically estimating parameters $n_{\rm thr}$, $n_{\rm multi}$ and $n_{\rm dark}$ with a significant level $\delta$, prior to executing the radioactive RNG. \subsection{Security of measurement result $\vec{i}$} Under these conditions, the security of measurement result $\vec{i}$ can be guaranteed as follows. \begin{Thr} \label{thr_min_entropy} The smooth min-entropy $H_{\rm min}^{\delta}(\vec{I}|E)$ of $\vec{i}$, conditioned on Eve's degree of freedom $E$, is bounded as \begin{equation} H_{\rm min}^{\delta}(\vec{I}|E)\ge n_{\rm thr}-n_{\rm multi}-2n_{\rm dark}. \label{eq:H_min_delta_lowerbound} \end{equation} \end{Thr} This means that the final bits $\vec{r}$ are secure, if Alice chooses its length $n_{\rm fin}$ to be roughly equal to $n_{\rm thr}-n_{\rm multi}-2n_{\rm dark}$ (see Lemma \ref{lmm:rigorous_security_statement} of Section \ref{sec:definition_security_LHL} for a more rigorous interpretation of the bound (\ref{eq:H_min_delta_lowerbound})). \section{Proof of Theorem \ref{thr_min_entropy}} In order to simplify the analysis, we use the {\it virtual protocol} approach (also known as {\it game transform} in modern cryptography). In this approach, instead of analyzing the actual RNG directly, one modifies it and construct a {\it virtual} RNG, as well as a quantity $H'$ arising there which lower bounds $H^\delta_{\rm min}(\vec{I}|E)$. Then analyzing the virtual RNG, one obtains a lower bound on $H'$, which also lower bounds $H^\delta_{\rm min} (\vec{I}|E)$ by definition. With the virtual RNG and $H'$ designed properly, this allows one to obtain a lower bound on $H^\delta_{\rm min} (\vec{I}|E)$ by a simpler analysis. We stress that virtual RNGs will only be used for simplifying the theoretical analysis, and never need to be implemented in practice. As the first example of such virtual RNGs, we consider the case where Alice records the measurement result $\vec{i}$ in a different format $\vec{z}=(z_1,\dots, z_{N})$ where $z_i=0$ ($z_i=1$) indicates the absence (presence) of a detection in time bin $i$ (Fig.\,\ref{fig-correspondence1}). In other words, Alice records measurement results $z_i$ of all time bins $i=1,\dots,N$, instead of timings $\vec{i}$ where a detection occurs. It is straightforward to see that $\vec{i}$ and $\vec{z}$ are in a one-to-one correspondence, and are thus equally unknown to Eve, \begin{equation} H^\delta_{\rm min}(\vec{I}|E)=H^\delta_{\rm min}(\vec{Z}|E). \label{eq:equality_I_Z} \end{equation} Thus to lower bound $H^\delta_{\rm min}(\vec{I}|E)$, it suffices to bound $H^\delta_{\rm min}(\vec{Z}|E)$; this is an example of the quantity $H'$, mentioned in the second paragraph of this subsection. \begin{figure}[t] \includegraphics[clip, bb=0 0 900 250 clip, width=0.8\linewidth]{fig_correspondence.pdf} \caption{One-to-one correspondence between detection timings $\vec{i}=(i_1,\dots,i_{n_{\rm det}})$, and measurement results of all time bins $\vec{z}=(z_1,\dots,z_N)$.} \label{fig-correspondence1} \end{figure} Next we will modify this virtual RNG outputting $\vec{z}$ further, such that the parity transform $P_A$, described in Section \ref{sec:assumptions}, is related to bit flips of $z_i$. Then we will make use of this relation to lower bound $H^\delta_{\rm min}(\vec{Z}|E)$. \subsection{Ideal situation} \label{sec:ideal_situation} To elucidate this relation with a simplified situation, we temporarily idealize conditions (a) and (b) as follows. \begin{itemize} \item[(a')] At the beginning of each time bin, the state of radiations is parity invariant and consists of exactly one particle. \item[(b')] Detector $D$ is perfect ({\it i.e.}, with a unit efficiency and no dark counts) and covers exactly the entire lower hemisphere (Fig.\,\ref{fig-virtual-setup}, (B)). Hence $D$ goes off iff one particle or more go downward. \end{itemize} Then we can modify our radioactive RNG further such that bit flips of $z_i$ and $P_A$ become equivalent. To see this, first note that detector $D$ alone can determine whether the particle went upward or downward. Indeed, if $D$ detected the particle ($z_i=1$), it means that it went down due to (b'); and if not ($z_i=0$), two conditions together say that it went up. These results $z_i=0,1$ can alternatively be obtained by a pair of perfect detectors, $D^\downarrow$ and $D^\uparrow$, each exactly covering the upper and the lower hemispheres (Fig.\;\ref{fig-virtual-setup}, (C)). Thus we can define another virtual RNG satisfying (\ref{eq:equality_I_Z}). \begin{description} \item[Virtual RNG 1] Using $D^\downarrow$ and $D^\uparrow$, Alice measures the source in time bins $i=1,\dots,N$, and records the result as $w_i\in\{\uparrow,\downarrow\}$. She then lets $z_i=0,1$ if $w_i=\uparrow,\downarrow$. \end{description} \begin{figure}[t] \includegraphics[bb=0 0 960 550, width=\linewidth]{fig2.pdf} \caption{Item (A) is the side view of our radioactive RNG. We assume that $D$ is housed within one (the lower) hemisphere (condition (b)). Items (B), (C) and (D) are theoretical models introduced for simplifying the description of the security proof; these three never need to be implemented in practice. (B) is the idealized setting satisfying conditions (a') and (b'), where detector $D$ alone can determine the direction, up or down, of the particle (Section \ref{sec:ideal_situation}). Thus (C) is equivalent to (B), the virtual RNG using two ideal detectors. Item (D) is the the virtual RNGs corresponding to (A) (Section \ref{sec:realistic_situations}). } \label{fig-virtual-setup} \end{figure} Detectors $D^\uparrow$, $D^\downarrow$ are \lq covariant\rq\ under $P_A$; that is, if we let $E^{\uparrow}_A$, $E^{\downarrow}_A$ be projection operators on the upper and the lower hemispheres corresponding to $D^\uparrow, D^\downarrow$, they satisfy \begin{equation} P_AE_A^{\uparrow}P_A=E_A^{\downarrow}. \label{eq:covariant_E} \end{equation} Hence $P_A$ is equivalent to the flip of arrows $w_i=\uparrow,\downarrow$, and thus to the bit flip of $z_i$. Next we use this parity covariance to show that $w_i$ are secure. Recall that $\rho_{AE}$ before measurement is always parity invariant (last paragraph of Section \ref{sec:assumptions}). Hence each $w_i$ is the result of measuring a parity invariant state $\rho_{AE}$ using parity covariant projections $E^\uparrow$, $E^\downarrow$. Thus $w_i=\uparrow,\downarrow$ occur with an equal probability, and in addition, the resulting (sub-normalized) states on Eve's side are a fixed state, irrespective of $w_i$, \begin{eqnarray} \mathrm{tr}_{A} (E^\downarrow_A \rho_{AE})&=&\mathrm{tr}_{A} (P_A E^\downarrow_A P_A P_A\rho_{AE}P_A)\nonumber\\ &=&\mathrm{tr}_{A} (E^\uparrow_A \rho_{AE}) \label{simple-case-eq-1} \end{eqnarray} due to properties (\ref{eq:parity_invariant_state}) and (\ref{eq:covariant_E}). In other words, all elements of $\vec{w}=(w_1,\dots,w_N)$ are distributed uniformly, and Eve gains no information of it by any measurement. In terms of the min-entropy, this means \begin{equation} H_{\rm min}^{\delta}(\vec{Z}|E)=H_{\rm min}^{\delta}(\vec{W}|E)=N. \end{equation} This completes the proof of Theorem \ref{thr_min_entropy} for the ideal situation. \subsection{General situation} \label{sec:realistic_situations} We proceed to the proof of the general situation. We again construct a virtual RNG where a correspondence between bit flips of $z_i$ and $P_A$ holds. Alice again uses a detector pair $D^\downarrow,D^\uparrow$, with $D^\downarrow$ being the actual detector $D$ $D^\downarrow=D$) and $D^\uparrow$ being the parity transformed image of $D$ (Fig.\;\ref{fig-virtual-setup}, (D)). As we no longer impose conditions (a') and (b'), it is possible that none or both of this detector pair, instead of one, go off in a time bin. Hence each $w_i$ takes four values, $w_i\in\{\uparrow,\downarrow, {\rm none}, {\rm both}\}$ (Table \ref{table-correspondence2}, 1st row). \begin{table}[t] \begin{tabular}{|c|c|c|c|c|} \hline $w_i$ & $\uparrow$ & none & $\downarrow$ &both \\ \hline $z_i=g(w_i)$ & \multicolumn{2}{|c|}{0} & \multicolumn{2}{|c|}{1} \\ \hline \ $\tilde{w}_i=h(w_i)$\ &\ single\ &\ none\ &\ single\ &\ both\ \\ \hline \end{tabular} \caption{Relation between variables used in the proof of the general situation. $w_i$ are outputs from detector pair $D^\downarrow, D^\uparrow$. The output $z_i$ of the actual detector $D=(D^\downarrow)$ can be emulated from $w_i$; this corresponds to ignoring outputs of $D^\uparrow$. $\tilde{w_i}$ denotes how many detectors went off out of $D^\downarrow$ and $D^\uparrow$. } \label{table-correspondence2} \end{table} In this case, the output $z_i$ of $D(=D^\downarrow)$ alone can be emulated from $w_i$, by ignoring outputs of $D^\uparrow$ (Table \ref{table-correspondence2}, second row). Thus we can define a virtual RNG as, \begin{description} \item[Virtual RNG 2] Using $D^\downarrow$ and $D^\uparrow$, Alice measures the source in time bins $i=1,\dots,N$, and records the result as $w_i\in\{\uparrow,\downarrow, {\rm none}, {\rm both}\}$. She then lets $z_i=g(w_i)$, using function $f$ specified in the second row of Table \ref{table-correspondence2}. \end{description} whose output $g(w_i)$ satisfies \begin{equation} H_{\rm min}(\vec{Z}|E)=H_{\rm min}(g(\vec{W})|E). \label{eq:H_min_ZE_eq_H_min_WE} \end{equation} As in the previous subsection, we can bound the right hand side of (\ref{eq:H_min_ZE_eq_H_min_WE}) by exploiting the relation between measurement results and the parity transform $P_A$. However, the argument needs to be modified, as the relation is not the same as in the ideal situation. That is, unlike in the ideal situation, the bit flip of $z_i$ and $P_A$ may not be equivalent in general. This is because $z_i=0,1$ may come from measurement results $w_i=\text{`none'}$ or $\text{`both'}$, whose quantum measurements are not in general covariant under $P_A$. On the other hand, measurements of $w_i=\uparrow$ and $\downarrow$ are still covariant under $P_A$, by definition of $D^\downarrow, D^\uparrow$. Hence if we evaluate the min-entropy of $w_i$ in single detection events ({\it i.e.}, time bins $i$ where $w_i=\uparrow$ or $\downarrow$; see Table \ref{table-correspondence2}, 3rd row), we have the ideal situation again, and the security can be shown by the same reasoning as in the previous subsection. The min-entropy thus obtained lower bounds $H_{\rm min}(g(\vec{W})|E)$ on the right hand side of (\ref{eq:H_min_ZE_eq_H_min_WE}), since in general, the entropy of a part is not greater than that of the total. As a result, $H_{\rm min}(g(\vec{W})|E)$ is lower bounded by the number of single detection events. (For the rigorous proof of statements made in this paragraph, see Section \ref{sec:revealing_gz}.) We can bound the number of single detection events as follows. The number $D$ of the detection events is no larger than the sum of the number of the single detection events and the $\text{`both'}$ events. The $\text{`both'}$ events can occur if the multiparticle emission or the dark count occurs in either detector. Then due to condition (d), the number of single detection events can be further lower bounded by $n_{\rm thr}-n_{\rm multi}-2n_{\rm dark}$, except for probability $\delta$, and we obtain Theorem \ref{thr_min_entropy}. \section{Methods} \subsection{Definition of security and the leftover hashing} \label{sec:definition_security_LHL} We review definition of the security of RNG, as well as techniques to for guaranteeing it. In Introduction, we said that the final bits $\vec{r}$ is secure when it is distributed uniformly and unknown to Eve. This can be formalized as follows. Given an actual state $\rho_{\vec{R}E}$, we define the corresponding ideal state to be $\rho_{\vec{R}E}^{\rm ideal}=2^{-n_{\rm fin}}\mathbb I_{\vec{R}}\otimes\rho_E$, $\rho_E={\rm tr}_A(\rho_{AE})$, where $\vec{r}$ is distributed uniformly and is completely unknown to Eve. ${\cal H}_{\vec{R}}$ is the Hilbert space of the memory storing $\vec{r}$. However, as it is practically difficult to always guarantee this ideal situation, it is customary to relax this notion and say that $\vec{r}$ is $\varepsilon$-secure if \begin{eqnarray} \frac12\left\|\rho_{\vec{R}E}-\rho_{\vec{R}E}^{\rm ideal}\right\|_1\le \varepsilon, \label{eq:average_trace_dist_defined} \end{eqnarray} where $\|A\|_1={\rm tr}\left(\sqrt{AA^\dagger}\right)$ denotes the $L_1$-norm of an operator $A$. Intuitively, this says that the actual state cannot be discriminated from the ideal state except with probability $\varepsilon$. This notion of security using parameter $\varepsilon$ is often called the universally composable security \cite{BHLMO05}. In Section \ref{sec:RNG_method}, we stated that for the final bits $\vec{r}$ to be secure, it suffices that the smooth conditional min-entropy $H^\delta_{\rm min}(\vec{I}|E)$ of measurement results $\vec{i}$ is lower bounded. The rigorous results corresponding to this statement are as follows. The conditional min-entropy $H_{\min}(\vec{I}|E)_{\rho_{\vec{I}E}}$ of a sub-normalized state $\rho_{\vec{I}E}$ is defined to be the maximum real number $\lambda$, satisfying $2^{-\lambda} \mathbb I_{\vec{I}}\otimes \sigma_E \ge \rho_{\vec{I}E}$ for a normalized state $\sigma_E$ \cite{RennerPhD,TomamichelPhD}. We abbreviate $H_{\min}(\vec{I}|E)_{\rho_{\vec{I}E}}$ as $H_{\min}(\vec{I}|E)$, whenever the subscript $\rho_{\vec{I}E}$ is obvious from the context. The {\it smooth} conditional min-entropy $H_{\min}^{\delta}(\vec{I}|E)_{\rho_{\vec{I}E}}$ is the maximum value of $H_{\min}(\bar{\rho}_{AE}|E)_{\bar{\rho}_{\vec{I}E}}$ of sub-normalized states $\bar{\rho}_{\vec{I}E}$ that are $\delta$-close to $\rho_{\vec{I}E}$ in terms of the purified distance \cite{TomamichelPhD}. If Alice performs randomness extraction (step (ii) of Section \ref{sec:RNG_method}) using a universal$_2$ function family \cite{CARTER1979143}, ${\cal F}$, the security of its output $\vec{r}$ satisfies the following. \begin{Lmm}[Leftover hashing lemma (LHL, \cite{RennerPhD})] \label{lmm:leftover_hashing_lemma} If function set ${\cal F}$ is universal$_2$, and function $f_s\in {\cal F}$ is chosen with probability $p(s)$, \begin{equation} \sum_{s}p(s)\left\|\rho_{\vec{R}E}-\rho_{\vec{R}E}^{\rm ideal}\right\|_1 \le 2\delta+2^{\frac12[n_{\rm fin}-H^{\delta}_{\rm min}(\vec{I}|E)]}. \label{eq:original_smoothed_leftover_hashing_lemma} \end{equation} \end{Lmm} By combining this lemma and Theorem \ref{thr_min_entropy}, we can guarantee the security of $\vec{r}$ as follows. \begin{Lmm} \label{lmm:rigorous_security_statement} For a given security parameter $\varepsilon>0$, the final bits $\vec{r}$ is $\varepsilon+\delta$-secure, if Alice uses a universal$_2$ hash function for randomness extraction, and if its output length $n_{\rm fin}$ satisfies \begin{equation} n_{\rm fin}\le n_{\rm thr}-n_{\rm multi}- 2n_{\rm dark}-2\log_2\frac1{\varepsilon}+2. \label{eq:bound_on_n} \end{equation} \end{Lmm} Recall that $n_{\rm multi}$ and $n_{\rm dark}$ depend on $\delta$ through condition (d). Hence the right hand side of (\ref{eq:bound_on_n}) depends on both $\varepsilon$ and $\delta$. \subsection{Detailed descriptions of Radioactive RNG and Virtual RNG 2} We here give a detailed mathematical description of Radioactive RNG and Virtual RNG 2. We will describe Virtual RNG 2 only, but the same description applies also to Radioactive RNG if one neglects output of virtual detector $D^\uparrow$ (cf. Table \ref{table-correspondence2}, 1st and 2nd rows). \subsubsection{Description of the procedures of Virtual RNG 2} \label{sec:description_Virtual_RNG2} We will denote by $\bar{D}$ the measurements setup consisting of detector pair $D^{\uparrow}, D^{\downarrow}$. We denote four output patterns of from $\bar{D}$ in one time bin by $w\in{\cal W}$, where ${\cal W}:=\{\uparrow,\downarrow, {\rm none}, {\rm both}\}$ (Table \ref{table-correspondence2}, 1st row). For the convenience of the security proof, we classify $w$ by how many of the detector pair $D^{\uparrow}, D^{\downarrow}$ go off in the time bin, using symbols $\tilde{\cal W}:=\{\text{none},{\rm single}, {\rm both}\}$, where `single' event means $w=\uparrow$ or $\downarrow$. A function $h$ can be defined corresponding to this classification (Table \ref{table-correspondence2}, third row). We continue to describe radiated particles by the Hilbert space $\mathcal{H}_A$. In addition, we introduce $\mathcal{H}_B$ to describe the radiation source. We describe the quantum process (measurement and time evolution) occurring inside the RNG device, during the beginnings of adjacent time bins, by a completely positive map $M_{AB}^{w}:{\cal H}_A\otimes {\cal H}_B\to {\cal H}_A\otimes {\cal H}_B$. That is, if Alice measures the state $\sigma_{ABE}(j\Delta t)$ at the beginning of time bin $j+1$ and obtains output $w$, the state at the beginning of next time bin is $\sigma_{ABE}^w((j+1)\Delta t)=M_{AB}^w(\sigma_{ABE}(j\Delta t))$. (We here extend the convention for operators, introduced above eq. (\ref{eq:parity_invariant_state}), to maps of states, and omit the identity operation included in a tensor product; hence {\it e.g.} $M_{AB}^w=M_{AB}^w\otimes {\rm id}_{E}$ with ${\rm id}_{E}$ being the identity operation in ${\cal H}_{E}$.) Hence if Alice started Virtual RNG 2 with the state $\rho_{ABE}(0)$, and measured $w_1,\dots,w_j$ in time bins $1,\dots,j$, the (sub-normalized) state at the beginning of time bin $j+1$ takes the form \begin{equation} \rho^{(w_1,\dots,w_j)}_{ABE}(j\Delta t):=M^{w_j}_{AB}\circ \cdots \circ M^{w_1}_{AB}(\rho_{ABE}(0)). \label{eq:rho_z_E_multibit_defined} \end{equation} When Virtual RNG 2 is finished, the joint state of the memory that stores the entire measurement result $\vec{w}=(w_1,\dots,w_N)$ and of Eve takes the form \begin{eqnarray} \rho_{\vec{W}E}&=&\sum_{\vec{w}\in{\cal W}^N}\ket{\vec{w}}\bra{\vec{w}}_{\vec{W}}\otimes\rho^{\vec{w}}_E,\\ \rho^{\vec{w}}_E&=&\rho^{(w_1,\dots,w_N)}_E={\rm tr}_{AB}\left(\rho^{(w_1,\dots,w_N)}_{ABE}(N\Delta t)\right) \label{eq:final_state_wE} \end{eqnarray} \subsubsection{Parity invariance of the measurement result $w_i$} \label{eq:parity_invariance_w} In this setting, we can argue that $\rho^{\vec{w}}_E$ are invariant under flips of arrows $\uparrow$ and $\downarrow$ included in $w_i$, by essentially the same argument as in Eq.\;(\ref{simple-case-eq-1}). To see this, first note that condition (a) asserts that \begin{equation} \tilde{P}_{A} (\rho^{(w_1,\dots,w_j)}_{ABE}(j\Delta t)) = \rho^{(w_1,\dots,w_j)}_{ABE}(j\Delta t). \label{eq:parity_invariance_rho_w} \end{equation} Also note that the following relation holds for maps $M_{AB}^{\uparrow}$ and $M_{AB}^{\downarrow}$, \begin{equation} M_{AB}^{\uparrow}\circ\tilde{P}_{A} = M_{AB}^{\downarrow}, \label{eq:parity_covariance_L_A} \end{equation} where $\tilde{P}_A(\rho_A):= P_A \rho_{ABE} P_A$. Eq. (\ref{eq:parity_covariance_L_A}) holds for the following two reasons: i) Due to the construction of $\bar{D}$, obtaining the measurement result $\downarrow$ is equivalent to first applying the parity transform and then obtaining $\uparrow$. ii) Due to condition (c), the effect caused on radiations by the measurement of a time bin $i$ (which may depend on results $w_i=\downarrow, \uparrow$) is washed away before the measurement of the next time bin $i+1$ starts. From relations (\ref{eq:parity_invariance_rho_w}), (\ref{eq:parity_covariance_L_A}), we see that the (sub-normalized) state at the beginning of time bin $j+1$ satisfies \begin{equation} \begin{split} & \rho^{(w_1,\dots,w_{j-1},\downarrow)}_{ABE}(j\Delta t)\\ =& M^{\downarrow}_{AB}(\rho^{(w_1,\dots,w_{j-1})}_{ABE}((j-1)\Delta t))\\ =& M^{\uparrow}_{AB}\circ P_{A}(\rho^{(w_1,\dots,w_{j-1})}_{ABE}((j-1)\Delta t))\\ =& M^{\uparrow}_{AB}(\rho^{(w_1,\dots,w_{j-1})}_{ABE}((j-1)\Delta t))\\ =& \rho^{(w_1,\dots,w_{j-1},\uparrow)}_{ABE}(j\Delta t). \end{split} \end{equation} Further, combining this with eq. (\ref{eq:rho_z_E_multibit_defined}), we see that $\rho^{\vec{w}}_E$ are invariant under flips of arrows $\uparrow$ and $\downarrow$ included in $w_i$. Or in terms of classification $\tilde{\cal W}=\{\text{none},{\rm single}, {\rm both}\}$ \begin{equation} \rho^{\vec{w}}_E=\rho^{{\vec{w}}'}_E\quad {\rm if}\quad h(\vec{w})=h(\vec{w}'), \label{eq:rho_z_E_parity_invariance} \end{equation} where $h(\vec{w}):=(h(w_1),\dots,h(w_N))$. That is, $\rho^{\vec{w}}_E$, $\rho^{{\vec{w}}'}_E$ are equal, if it holds for all time bin $i$ that the number of detectors that went off in time bin $i$ is equal, $h(w_i)=h(w_i')\in\tilde{\cal W}$. \subsection{Supplement to the proof of Theorem 1} \label{sec:supplement_proof} \label{sec:revealing_gz} In the second paragraph from the last of Section \ref{sec:realistic_situations}, we argued that the right hand side of (\ref{eq:H_min_ZE_eq_H_min_WE}) is lower bounded by the number of single detection events. The argument made there was in fact rather intuitive and not sufficiently rigorous. Below we give a rigorous proof. Under these settings, we consider the following virtual RNG. This corresponds to the situation where Alice intentionally reveals $h(\vec{w})$ to Eve. \begin{description} \item[Virtual RNG 3] After executing Virtual RNG 2, Alice tells Eve $h(\vec{w})$. \end{description} The min-entropy corresponding to this case lower bounds the right hand side of (\ref{eq:H_min_ZE_eq_H_min_WE}), since Eve's ambiguity never increases on receiving an extra information $h(\vec{w})$. \begin{equation} H_{\rm min}(g(\vec{W})|E)\ge H_{\rm min}(g(\vec{W})|h(\vec{W}),E). \label{eq:H_min_fZ_Hmin_fZT} \end{equation} After Virtual RNG 3, Alice and Eve both know the classical random variable $\vec{\tilde{w}}=h(\vec{w})$, so the overall state becomes a classical ensemble of those labeled by $\vec{\tilde{w}}$. Thus it suffices to analyze each $\vec{\tilde{w}}$ separately. To rephrase this rigorously, due to Lemma 3.1.8 of Ref. \cite{RennerPhD}, \begin{equation} H_{\rm min}(g(\vec{W})|h(\vec{W}),E) \ge \min_{\vec{\tilde{w}}}H_{\rm min}(g(\vec{W})|h(\vec{W})=\vec{\tilde{w}},E), \label{eq:max_tH_min} \end{equation} where the minimum is evaluated for all values of $\vec{\tilde{w}}$ possible, {\it i.e.}, all $\vec{\tilde{w}}\in \tilde{\cal W}^N$ satisfying $\Pr(h(\vec{w})=\vec{\tilde{w}}\ |\ \rho_{\vec{W}E})>0$. $H_{\rm min}(g(\vec{W})|h(\vec{W})=\vec{\tilde{w}},E)$ on the right hand side of (\ref{eq:max_tH_min}) measures the fraction of $g(\vec{w})$ unknown to Eve, under the restriction that $\vec{w}$ takes values satisfying $h(\vec{w})=\vec{\tilde{w}}$. As can easily be seen by definition of functions $g$ and $h$ in Table \ref{table-correspondence2}, under this restriction, function $g$ becomes one-to-one, and thus the minimum entropies of $g(\vec{w})$ and $\vec{w}$ are equal, \begin{equation} H_{\rm min}(g(\vec{W})|h(\vec{W})=\vec{\tilde{w}},E)=H_{\rm min}(\vec{W}|h(\vec{W})=\vec{\tilde{w}},E). \label{eq:H_min_fZ_equal_Hmin Z} \end{equation} The right hand side of (\ref{eq:H_min_fZ_equal_Hmin Z}) can be evaluated using the parity symmetry (\ref{eq:rho_z_E_parity_invariance}). Let $s(\vec{\tilde{w}})$ be the number of `single' symbols included in $\vec{\tilde{w}}$ ({\it i.e.}, the number of single events), then there are $2^{s(\vec{\tilde{w}})}$ values of $\vec{w}$ satisfying $h(\vec{w})=\vec{\tilde{w}}$. Because of (\ref{eq:rho_z_E_parity_invariance}), Eve's (sub-normalized) states $\rho^{\vec{\tilde{w}}}_E$ are equal for all these values of $\vec{\tilde{w}}$, and thus the corresponding entropy takes the value \begin{equation} H_{\rm min}(\vec{W}|h(\vec{W})=\vec{\tilde{w}},E)= s(\vec{\tilde{w}}). \label{eq:H_min_Wt} \end{equation} Finally, combining eqs. (\ref{eq:H_min_fZ_Hmin_fZT}), (\ref{eq:max_tH_min}), (\ref{eq:H_min_fZ_equal_Hmin Z}), and (\ref{eq:H_min_Wt}) together, we obtain \begin{equation} H_{\rm min}(g(\vec{W})|E)\ge \min_{\vec{\tilde{w}}} s(\vec{\tilde{w}}). \label{eq:minst} \end{equation} \subsection{Equivalence of the ideal situation and the single photon RNG} If we restrict ourselves with the ideal situation of Section \ref{sec:ideal_situation}, we can also show the security of our radioactive RNG by using the argument of complementary bases, which was mentioned in the eighth paragraph of Section \ref{sec:introduction} to show the security of single photon RNGs. To see this, let $E_A:=E^\uparrow_A-E^\downarrow_A$. Then because $E_A^2=P_A^2=\mathbb I_A$, Jordan lemma can be applied to $E_A$ and $P_A$. Further, due to condition (\ref{eq:covariant_E}), we can decompose the Hilbert space ${\cal H}_{A}$ as ${\cal H}_{A}={\cal H}_{A_1}\otimes{\cal H}_{A_2}$ such that \begin{equation} E_A=\sigma^z_{A_1}\otimes \mathbb I_{A_2},\ P_A=\sigma^x_{A_1}\otimes \mathbb I_{A_2}, \end{equation} where $\sigma^z,\sigma^x$ are the Pauli matrices. Hence measurements of radiation directions $w_i$ and of parity becomes mathematically equivalent to those of $+$ and $\times$ bases used in the single photon RNG. And one can prove the security of $w_i$ by using the same argument as in the seventh paragraph of Introduction. \noindent{\bf Acknowledgments} TS is supported in part by Cross-ministerial Strategic Innovation Promotion Program (SIP) (Council for Science, Technology and Innovation (CSTI)); CREST (Japan Science and Technology Agency) JPMJCR1671; JSPS KAKENHI Grant Number JP18K13469. TS thanks Quantaglion Co. Ltd. for useful information and discussion about the actual implementation of the radioactive RNG.
1,108,101,565,890
arxiv
\section{Introduction} \label{sec:intro} Let $\A{B}$ be a C*-algebra and $\A{I}$ a (norm-closed, two-sided, $*$-closed) ideal in $\A{B}$; $\A{I}$ is called an \emph{essential} ideal in $\A{B}$ if for any other ideal $\A{J}$ in $\A{B}$, $\A{I}\cap\A{J} = 0$ implies $\A{J} = 0$. Given a C*-algebra $\A{A}$, the \emph{multiplier algebra} $\A{M}(\A{A})$ is defined, up to isomorphism, to be the maximal unital C*-algebra containing $\A{A}$ as an essential ideal. The \emph{corona algebra} of $\A{A}$ is the quotient $\A{Q}(\A{A}) = \A{M}(\A{A}) / \A{A}$. Corona algebras take their name from the following special case. Let $X$ be a locally compact Hausdorff space and consider $C_0(X)$, the C*-algebra of continuous functions $X\to\CC$ which vanish at infinity. Then $\A{M}(C_0(X))$ is isomorphic to the C*-algebra $C(\beta X)$, and $\A{Q}(C_0(X))$ is isomorphic to $C(\beta X\sm X)$, where $\beta X$ denotes the \v Cech-Stone compactification of $X$. Spaces of the form $\beta X\sm X$ are themselves called \emph{corona spaces} and make up a central object of study in set-theoretic topology. Corona algebras are abundant in the noncommutative setting as well; for instance, consider the C*-algebra $\A{K}(H)$ of compact operators on a Hilbert space $H$. $\A{M}(\A{K}(H))$ is isomorphic to $\B(H)$, the C*-algebra of bounded operators on $H$, and $\A{Q}(\A{K}(H))$ is thus the \emph{Calkin algebra} over $H$, $\B(H)/\K(H)$. Yet another example is given by the quotient $\prod \A{A}_n / \bigoplus \A{A}_n$, where each $\A{A}_n$ ($n\in\NN$) is a unital C*-algebra. Here $\prod \A{A}_n$ denotes the C*-algebra of norm-bounded sequences , and $\bigoplus \A{A}_n$ the C*-algebra of sequences converging to zero. Corona algebras are important in the theory of C*-algebras due to their connections with a wide array of topics, including projectivity and semiprojectivity of C*-algebras, stability of relations on C*-algebra generators, and the theory of extensions of C*-algebras. (See for instance~\cite{Loring.LSPP}, \cite{Blackadar.KT}, and~\cite{Blackadar.OA}.) They also have interesting behavior under certain set-theoretic hypotheses. Extensive study has been given in particular to their automorphism groups, under the assumption of the Continuum Hypothesis ($\CH$), and, alternately, the Proper Forcing Axiom ($\PFA$). (See~\cite{Rudin},~\cite{Shelah-Steprans.PFAA},~\cite{Farah.AQ},~\cite{Velickovic.OCAA},~\cite{Farah-McKenney.ZD} for the commutative case; and~\cite{Phillips-Weaver},~\cite{Farah.CO},~\cite{Farah.AC}, and~\cite{Coskey-Farah} for the noncommutative case.) Typically, $\CH$ implies that there are many automorphisms, whereas $\PFA$ implies that the only automorphisms are those present in any model of $\ZFC$. For example, $\CH$ implies that there are $2^{2^{\aleph_0}}$-many automorphisms of both $C(\beta\NN\sm\NN)$ and the Calkin algebra. (See~\cite{Rudin} and~\cite{Phillips-Weaver}, respectively). On the other hand, $\PFA$ implies that every automorphism of $C(\beta\NN\sm\NN)$ is induced by a function $e : \NN\to\NN$, and every automorphism of the Calkin algebra is inner. In~\cite{Coskey-Farah}, Coskey and Farah considered the automorphisms of a general corona algebra, and found a notion of triviality which, in the cases of $C(\beta\NN\sm\NN)$ and the Calkin algebra $\A{Q}(\A{K}(H))$, turns out to hold exactly for those automorphisms described above. Before we state their definition, recall that the \emph{strict topology} on a multiplier algebra $\A{M}(\A{A})$ is the weakest topology making the following seminorms continuous; \[ m\mapsto \norm{ma} + \norm{am} \qquad (m\in \A{M}(\A{A}),\, a\in\A{A}) \] \begin{defn} A $*$-homomorphism $\vp : \A{Q}(\A{A})\to\A{Q}(\A{B})$ is called \emph{trivial} if its \emph{graph}, \[ \Gr{\vp} = \set{(a,b)\in \A{M}(\A{A})_1 \times \A{M}(\A{B})_1}{\vp(a + \A{A}) = b + \A{B}} \] is a Borel subset of $\A{M}(\A{A})_1\times \A{M}(\A{B})_1$ when each factor is endowed with the strict topology. \end{defn} (We emphasize that the graph of a $*$-homomorphism $\vp : \A{Q}(\A{A})\to\A{Q}(\A{B})$ is not the graph of a function in the usual sense, but the result of pulling this set back through the quotient maps $\A{M}(\A{A})\to\A{Q}(\A{A})$ and $\A{M}(\A{B})\to\A{Q}(\A{B})$.) $\A{M}(\A{A})_1$ here refers to the unit ball of $\A{M}(\A{A})$. The following conjectures made in~\cite{Coskey-Farah} extend all currently known results on automorphisms of corona algebras; \begin{conj} \label{conj.CH} The Continuum Hypothesis implies that the corona of any separable, nonunital C*-algebra must have a nontrivial automorphism. \end{conj} \begin{conj} \label{conj.FA} Forcing axioms imply that every automorphism of the corona of a separable, nonunital C*-algebra must be trivial. \end{conj} (See also~\cite{Farah.AQ},~\cite{Farah.RC},~\cite{Just.EU} and~\cite{Farah-Shelah.TA} for work on the analogous conjectures for quotients of $\SSN{P}(\NN)$ by analytic P-ideals.) In~\cite{Coskey-Farah}, Coskey and Farah prove Conjecture~\ref{conj.CH} for a large class of C*-algebras, including simple and stable C*-algebras. In this paper, however, we will mainly be concerned with Conjecture~\ref{conj.FA}. In place of the vague term ``forcing axioms'' we will use two combinatorial consequences of the Proper Forcing Axiom, \emph{Todor\v cevi\'c's Axiom} and \emph{Martin's Axiom}, which we will abbreviate as $\TA$ and $\MA$ respectively. $\TA$ is also well-known as the \emph{Open Coloring Axiom}, $\OCA$, and can be viewed as a Ramsey-theoretic dichotomy for graphs on a set of real numbers; $\MA$ is the prototypical forcing axiom for ccc posets. These principles have no large-cardinal strength relative to $\ZFC$ and can be forced over any model of set theory. The reader is referred to~\cite{Todorcevic.PPIT},~\cite{Moore.PFA} for more information on their use in set theory and other areas of mathematics. The main result of this paper is a confirmation of Conjecture~\ref{conj.FA} for a certain class of corona algebras, assuming $\TA + \MA$. Before the statement we again need a definition. A (separable, unital) \emph{UHF algebra} is a C*-algebra which can be realized as a direct limit of full matrix algebras over $\CC$, with unital connecting maps. \begin{thm} \label{main.borel} Assume $\TA + \MA$, and let $\A{A}_n$ and $\A{B}_n$ ($n\in\NN$) be sequences of separable, unital UHF algebras. If $\vp$ is an isomorphism of the form \[ \prod \A{A}_n / \bigoplus \A{A}_n \simeq \prod \A{B}_n / \bigoplus \A{B}_n \] then $\Gamma_\vp$ is Borel. \end{thm} In proving Theorem~\ref{main.borel} it will be necessary to consider some stronger forms of triviality for $*$-homomorphisms between corona algebras. We say that a map $\alpha : \A{M}(\A{A})\to \A{M}(\A{B})$ is a \emph{lift} of $\vp : \A{Q}(\A{A})\to\A{Q}(\A{B})$ if the following diagram commutes; \[ \begin{tikzpicture} \matrix (m) [cdg.matrix] { \A{M}(\A{A}) & \A{M}(\A{B}) \\ \A{Q}(\A{A}) & \A{Q}(\A{B}) \\ }; \path [cdg.path] (m-1-1) edge node[auto]{$\alpha$} (m-1-2) (m-2-1) edge node[auto]{$\vp$} (m-2-2) (m-1-1) edge (m-2-1) (m-1-2) edge (m-2-2); \end{tikzpicture} \] We call $\alpha$ an \emph{algebraic lift} of $\vp$ if $\alpha$ is a $*$-homomorphism, and \emph{strict} if $\alpha$ is continuous with respect to the strict topologies. Note that if $\vp$ has a strict lift then $\Gamma_\vp$ is necessarily Borel. This conclusion does not always hold for $*$-homomorphisms with algebraic lifts, however; see Section~\ref{subsec:topologies} for further details. In the process of proving Theorem~\ref{main.borel} we demonstrate the following; \begin{thm} \label{main.cfh} Assume $\TA + \MA$. Let $\A{A}_n$ and $\A{B}_n$ ($n\in\NN$) be separable, unital UHF algebras, and let $\vp$ be an isomorphism of the form \[ \prod \A{A}_n / \bigoplus \A{A}_n \to \prod \A{B}_n / \bigoplus \A{B}_n \] Then for every sequence $\A{F}_n\subseteq\A{A}_n$ ($n\in\NN$) of finite-dimensional, unital C*-subalgebras, the restriction of $\vp$ to the C*-subalgebra $\prod\A{F}_n / \bigoplus \A{F}_n$ has a strict algebraic lift. \end{thm} Theorem~\ref{main.cfh} allows us to code a given isomorphism by what we call a \emph{coherent family of $*$-homomorphisms}. Coherent families appear in various forms throughout the set-theoretic literature; see, for instance, \cite{Farah.AQ}, \cite{Farah.CO}, \cite{Todorcevic.PID}, and~\cite{Dow-Simon-Vaughan}. The proof of Theorem~\ref{main.borel} is then completed with the following; \begin{thm} \label{main.cfh->borel} Assume $\TA$. Let $\A{A}_n$ ($n\in\NN$) be separable, unital UHF algebras, and let $\A{B}$ be any separable C*-algebra. Suppose $\vp$ is a $*$-homomorphism of the form \[ \prod \A{A}_n / \bigoplus \A{A}_n \to \A{M}(\A{B}) / \A{B} \] and there is a strict, algebraic lift of each restriction of $\vp$ to a unital C*-subalgebra of the form $\prod \A{F}_n / \bigoplus \A{F}_n$, with each $\A{F}_n \subseteq \A{A}_n$ finite-dimensional. Then $\Gamma_\vp$ is Borel. \end{thm} It is interesting to note that Theorem~\ref{main.cfh} already gives us a form of rigidity for this class of corona algebras; \begin{cor} \label{main.cor} Assume $\TA + \MA$. Let $\A{A}_n$ and $\A{B}_n$ ($n\in\NN$) be separable, unital UHF algebras, and suppose \[ \prod \A{A}_n / \bigoplus \A{A}_n \simeq \prod \A{B}_n / \bigoplus \A{B}_n \] Then there are cofinite sets $\SN{A},\SN{B}\subseteq\NN$ and a bijection $e : \SN{A}\to\SN{B}$ such that for all $n\in\SN{A}$, $\A{A}_n$ and $\A{B}_{e(n)}$ are isomorphic. \end{cor} Corollary~\ref{main.cor} essentially reduces the study of isomorphisms in our class of corona algebras to a study of automorphisms. Unfortunately, we are not able to provide a strict, algebraic lift for a given automorphism of a corona algebra in our class, like we are able for its restrictions (assuming $\TA + \MA$). The obstruction seems to be entirely C*-algebraic; in fact, we demonstrate in Section~\ref{sec:coherent-families} that the statement ``all trivial automorphisms of $\prod \A{A}_n / \bigoplus \A{A}_n$ have strict algebraic lifts'' is equivalent to an asymptotic form of an intertwining property. (See~\cite{Elliott.intertwining} for more on Elliott's intertwining theorem.) The proof goes through Theorem~\ref{main.cfh->borel} and an appeal to Schoenfield's absoluteness theorem. The paper is structured as follows. In Section~\ref{sec:preliminaries} we review some of the background needed for the proofs of the above results, including some standard tools from both combinatorial and descriptive set theory. In particular we introduce the assumptions for Theorem~\ref{main.cfh}, $\TA$ and $\MA$. We also provide a stratification of $\prod \A{A}_n / \bigoplus \A{A}_n$ into algebras of the form $\prod \A{F}_n / \bigoplus \A{F}_n$, where each $\A{F}_n$ is finite-dimensional; a similar stratification of the Calkin algebra underlies the proof of the main result of~\cite{Farah.CO} (see also~\cite[\S{4}]{Coskey-Farah}, and~\cite[Theorem~3.1]{Elliott.derivations-II}). This allows us to introduce coherent families of $*$-homomorphisms; we then prove Corollary~\ref{main.cor}. In Section~\ref{sec:definable-embeddings} we prove the following, in $\ZFC$. Let $\vp$ be an injective, $*$-homomorphism of the form \[ \prod \A{F}_n / \bigoplus \A{F}_n\to \A{M}(\A{A}) / \A{A} \] where $\A{A}$ is an AF algebra (i.e. a direct limit of finite-dimensional C*-algebras), and each $\A{F}_n$ is finite-dimensional. Suppose further that $\Gamma_\vp$ is Borel; then $\vp$ must in fact have a strict algebraic lift. In Section~\ref{sec:fa-embeddings} we prove Theorem~\ref{main.cfh} by showing that the restriction of a given isomorphism to a subalgebra of the form $\prod\A{F}_n / \bigoplus \A{F}_n$ must have a Borel graph. The work of this section is derived from arguments in~\cite{Velickovic.OCAA} and~\cite{Farah.AQ}. In Section~\ref{sec:coherent-families} we prove Theorem~\ref{main.cfh->borel}, and discuss the question of whether all trivial automorphisms of a given corona algebra $\prod \A{A}_n / \bigoplus \A{A}_n$ have strict, algebraic lifts. \section{Preliminaries} \label{sec:preliminaries} \subsection{Set theory} \label{subsec:set theory} We will assume that the reader is familiar with the basics of modern set theory as outlined in, for instance,~\cite{Kunen.2011}. Our notation will for the most part follow the standards of the literature in set theory; in particular, we identify $\NN$ with the first infinite ordinal $\omega$, and $n\in\NN$ with $\{0,\ldots,n-1\}$. When $f$ is a function and $X$ a set we write $f[X]$ for the image of $X$ under $f$. We will also write $[X]^k$ for the set of $k$-element subsets of $X$, and $[X]^{<\omega}$ for the set of finite subsets of $X$. We will often be concerned with the ordering of \emph{eventual dominance} on $\NN^\NN$; \[ f <^* g \iff \exists m\; \forall n\ge m\quad f(n) < g(n) \] A simple diagonalization argument shows that $\NN^\NN$ is countably directed in $<^*$. It follows that if $X\subseteq\NN^\NN$ is cofinal in $<^*$ and is written as a countable union $X = \bigcup X_n$, then there is some $n$ for which $X_n$ is also cofinal in $<^*$. We also note that any $X\subseteq\NN^\NN$ which is cofinal in $<^*$ must be cofinal in $<^m$ for some $m\in\NN$, where \[ f <^m g \iff \forall n\ge m\quad f(n) < g(n) \] Similar facts hold for $\SSN{P}(\NN)$ under the ordering of \emph{almost-inclusion}; \[ \SN{A}\subseteq^* \SN{B} \iff |\SN{A}\sm\SN{B}| < \aleph_0 \] We will often use these facts without explicit reference. Our use of forcing axioms will be limited to two of their combinatorial consequences, \emph{Todor\v cevi\'c's Axiom} ($\TA$) and \emph{Martin's Axiom} ($\MA$). $\TA$ and $\MA$ follow from $\PFA$, but have no large cardinal strength relative to $\ZFC$ since they can be forced together over any model of $\ZFC$ (\cite{Todorcevic.PPIT}). $\TA$ is also well known as the \emph{Open Coloring Axiom} ($\OCA$). Our choice of the name $\TA$ stems from the fact that other, different axioms were introduced in~\cite{ARS}, also under the name $\OCA$. $\TA$ states; \begin{quote} Let $X$ be a separable metric space, and let $[X]^2 = K_0\cup K_1$ be a partition. Suppose $K_0$ is open, when identified with a symmetric subset of $X\times X$ minus the diagonal. Then either \begin{itemize} \item there is an uncountable $H\subseteq X$ such that $[H]^2\subseteq K_0$ ($H$ is \emph{$K_0$-homogeneous}), or \item $X$ can be written as a countable union of sets $H_n$ ($n\in\NN$) with $[H_n]^2\subseteq K_1$ ($X$ is \emph{$\sigma$-$K_1$-homogeneous}). \end{itemize} \end{quote} $\TA$ has a remarkable influence on the set theory of the real line; for instance, it implies that the least size $\bb$ of an unbounded subset of $(\NN^\NN, <^*)$ is exactly $\aleph_2$ (\cite{Todorcevic.PPIT}). We will occasionally make use of this fact. $\MA$ states; \begin{quote} Let $\PP$ be a poset with the countable chain condition, and let $\SSN{D}$ be a collection of $\aleph_1$-many dense subsets of $\PP$. Then there is a filter $G\subseteq\PP$ which meets every set in $\SSN{D}$. \end{quote} This notation diverges from the more standard refinement, in which $\MA_\kappa$ is written for the analogous statement with $\kappa$ replacing $\aleph_1$, and $\MA$ stands for ``for all $\kappa < 2^{\aleph_0}$, $\MA_\kappa$ holds.'' However, we will be working in models of $\TA$, where $\bb = \aleph_2$; since $\MA_\kappa$ implies $\bb > \kappa$, it follows that $\MA_{\aleph_1}$ is the strongest fragment of $\MA$ which is consistent with $\TA$, hence we will shorten it to just $\MA$. We will make frequent use of the classical results of descriptive set theory, concerning definability properties of subsets of Polish spaces. The interested reader may consult~\cite{Kechris.CDST} for proofs and more information. For now we simply quote our most-used results. \begin{fact}(Jankov-von Neumann, see~\cite[Theorem~18.1]{Kechris.CDST}) Let $X$ and $Y$ be Polish spaces, and let $A\subseteq X\times Y$ be an analytic set. Then there is a function $f$ with domain that of $A$, such that $f$ is measurable with respect to the $\sigma$-algebra generated by the analytic subsets of $X$. \end{fact} \begin{fact}(\cite[\S{29}]{Kechris.CDST}) Let $X$ be a Polish space and $A\subseteq X$ an analytic set. Then $A$ is measurable with respect to any complete Borel probability measure. Moreover, $A$ has the Baire Property. \end{fact} \begin{fact}(\cite[Theorem~8.38]{Kechris.CDST}) A Baire-measurable function between Polish spaces is continuous on a dense $G_\delta$. \end{fact} \subsection{Multipliers, topologies and lifts} \label{subsec:topologies} In Section~\ref{sec:intro} we defined the multiplier algebra, up to isomorphism, by a maximality property. The multiplier algebra, like the \v Cech-Stone compactification, has many explicit realizations. For instance, a concrete representation of $\A{M}(\A{A})$ comes with any nondegenerate representation $\rho$ of $\A{A}$ on a Hilbert space $H$, as the idealizer of $\rho[\A{A}]$ inside $\B(H)$; \[ \A{M}(\A{A}) \simeq \set{m\in\B(H)}{m\rho[\A{A}] + \rho[\A{A}]m\subseteq\rho[\A{A}]} \] It is well-known that the isomorphism type of this representation of the multiplier algebra does not depend on $\rho$. Alternatively, one can take $\A{M}(\A{A})$ to be the idealizer of $\A{A}$ inside $\A{A}^{**}$, the enveloping von Neumann algebra of $\A{A}$. Other, more abstract approaches go via Hilbert C*-modules and double centralizers (see~\cite{Lance.HM} and~\cite{Pedersen.CAAG}, respectively, for an excellent treatment of each). The \emph{strict topology} on $\A{M}(\A{A})$ is that generated by the seminorms \[ m\mapsto \norm{ma} + \norm{am} \qquad (m\in \A{M}(\A{A}), a\in \A{A}) \] $\A{M}(\A{A})$ is the strict completion of $\A{A}$ inside its enveloping von Neumann algebra $\A{A}^{**}$, and the strict topology coincides with the norm topology when restricted to $\A{A}$. Hence if $\A{A}$ is separable then $\A{M}(\A{A})$ is also separable, in the strict topology. The unit ball of $\A{M}(\A{A})$, when endowed with the strict topology, forms a Polish space. In the case of $\A{A} = \A{K}(H)$, where $\A{M}(\A{A}) = \B(H)$, the strict topology is exactly the $\sigma$-strong-$*$ topology, and when restricted to norm-bounded subsets this coincides with both the weak and strong operator topologies. Similarly, the strict topology on $\prod \A{A}_n$, when restricted to norm-bounded subsets, coincides with the product of the norm topologies. We will always denote the quotient map $\A{M}(\A{A})\to\A{Q}(\A{A})$ by $\pi$, regardless of the C*-algebra $\A{A}$. Let $\vp$ be a $*$-homomorphism between corona algebras $\A{Q}(\A{A})$ and $\A{Q}(\A{B})$. We say that $L$ is an \emph{$\e$-lift} of $\vp$ given that the diagram below commutes, up to a tolerance of $\e$; \[ \begin{tikzpicture} \matrix (m) [cdg.smallmatrix] { \A{M}(\A{A}) & & \A{M}(\A{B}) \\ & \e & \\ \A{Q}(\A{A}) & & \A{Q}(\A{B}) \\ }; \path [cdg.path] (m-1-1) edge node[auto]{$L$} (m-1-3) (m-3-1) edge node[below]{$\vp$} (m-3-3) (m-1-1) edge node[left]{$\pi$} (m-3-1) (m-1-3) edge node[auto]{$\pi$} (m-3-3); \end{tikzpicture} \] that is, $\norm{\pi(L(x)) - \vp(\pi(x))} \le \e\norm{\pi(x)}$ for all $x\in \A{M}(\A{A})$. When $\e = 0$ we call $L$ simply a \emph{lift} of $\vp$. In general we make no assumptions on the algebraic properties of $L$, or its definability; often we will work with a lift of $\vp$ given to us by a choice of representatives. If $L$ is in fact a $*$-homomorphism we will call $L$ an \emph{algebraic lift}. We will also often be concerned with lifts which are bicontinuous with respect to the strict topology; such maps we will call \emph{strict}. Suppose now that $\A{A}_n$ ($n\in\NN$) is a sequence of unital C*-algebras, $\A{B}$ is a C*-algebra, and $\alpha : \prod\A{A}_n \to \A{M}(\A{B})$ is a strict $*$-homomorphism taking $\bigoplus\A{A}_n$ into $\A{B}$. Let $j_n : \A{A}_n\to \bigoplus\A{A}_m$ be the canonical embedding; then the sequence $\alpha_n = \alpha\circ j_n$ completely determines $\alpha$. In particular, if each $\A{A}_n$ and $\A{B}$ is separable, then we may identify $\alpha$ with a member of the separable metric space \[ \prod \Hom(\A{A}_n, \A{B}) \] where $\Hom(\A{A}_n,\A{B})$, the space of $*$-homomorphisms from $\A{A}_n$ to $\A{B}$, is given the point-norm topology. It will be important to know when a given sequence in the above space determines a strict $*$-homomorphism $\prod\A{A}_n \to \A{M}(\A{B})$, i.e. when the above identification can be reversed. For this we have the following; \begin{prop} \label{hom.reverse} Let a sequence $(\alpha_n)\in \prod \Hom(\A{A}_n,\A{B})$ be given. Suppose that the projections $p_n = \alpha_n(1_{\A{A}_n})$ ($n\in\NN$) are pairwise-orthogonal, and that their sums $e_m = \sum_{n\le m} p_n$ converge strictly in $\A{M}(\A{B})$. Then there is a unique strict $*$-homomorphism \[ \alpha : \prod \A{A}_n \to \A{M}(\A{B}) \] such that $\alpha\circ j_n = \alpha_n$ for each $n$. \end{prop} \begin{proof} Since the projections $p_n$ are pairwise-orthogonal, we may define a $*$-homomorphism $\alpha : \bigoplus \A{A}_n \to \A{B}$ with $\alpha\circ j_n = \alpha_n$. The projections $e_m$ are the image of an approximate unit for $\bigoplus \A{A}_n$ under $\alpha$. Thus we are in the situation of~\cite[Proposition~5.8]{Lance.HM}, and the conclusion is immediate. \end{proof} \subsection{Reduced products of UHF algebras} A \emph{UHF algebra} is a C*-algebra $\A{A}$ such that for all $x_1,\ldots,x_n\in\A{A}$ and $\e > 0$, there is a C*-subalgebra of $\A{A}$, isomorphic to a full matrix algebra over $\CC$, with elements $y_1,\ldots,y_n$ satisfying \[ \forall i\le n\quad \norm{x_i - y_i} < \e \] When a UHF algebra $\A{A}$ is separable, it may be realized as a (C*-)direct limit of full matrix algebras $M_{k_n}(\CC)$. In case $\A{A}$ is also unital, such a representing sequence may be chosen so that the connecting maps are unital. We will be concerned exclusively with UHF algebras which are both separable and unital, and will therefore drop these two adjectives in all further discussions with hope that the result will be more readable. We only note here that other formulations of UHF algebras, while equivalent in the separable case, are often not in the nonseparable case; the interested reader is referred to~\cite{Farah-Katsura.UHFI}. Since we are so often concerned with the sequence of matrix algebras which makes up a UHF algebra, we set aside a term for it; \begin{defn} Let $\A{A}$ be a UHF algebra. A sequence $\A{A}_n$ ($n\in\NN$) of C*-subalgebras of $\A{A}$ is called \emph{suitable} if, for all $n\in\NN$, we have \begin{itemize} \item $1_{\A{A}} \in \A{A}_n$, \item $\A{A}_n \subseteq \A{A}_{n+1}$, and \item $\A{A}_n \simeq M_{k_n}(\CC)$ for some $k_n$. \end{itemize} \end{defn} Now let $\A{A}$ be a UHF algebra with suitable sequence $\A{A}_n \simeq M_{k_n}(\CC)$. It follows that $k_n \mid k_{n+1}$ for each $n$. The \emph{supernatural number} associated to $\A{A}$ is the formal product of all primes which eventually divide $k_n$, with (possibly infinite) multiplicity; e.g. when $k_n = 2^n$ the supernatural number associated to the resulting algebra is written $2^\infty$. A classical theorem of Glimm (\cite{Glimm.UHF}) shows that the supernatural number associated to a UHF algebra is a complete invariant; moreover if $\A{A}$ and $\A{B}$ are UHF algebras with associated supernatural numbers $s$ and $t$, respectively, then $\A{A}$ embeds into $\A{B}$ if and only if $s \mid t$, i.e. every prime in the formal product $s$ appears in $t$, with multiplicity at least that of its copy in $s$. It is well-known, and easy to prove, that UHF algebras are simple. Hence, there is never a nonzero $*$-homomorphism $\A{A}\to\A{A}_n$ (unless the sequence $\A{A}_n$ is eventually constant, in which case $\A{A}$ is finite-dimensional). We can, however, get close; \begin{defn} \label{ce.def} Let $\A{A}$ be a C*-algebra and $\A{B}$ a C*-subalgebra. A map $\theta : \A{A}\to\A{B}$ is called a \emph{conditional expectation} if the following hold; \begin{enumerate} \item\label{ce.linear} $\theta$ is linear, \item\label{ce.cpc} $\theta$ is a completely-positive contraction (see~\cite{Blackadar.OA} for a definition), \item\label{ce.ext} $\theta(b) = b$ for all $b\in\A{B}$, and \item\label{ce.mod} $\theta(ba) = b\theta(a)$ and $\theta(ab) = \theta(a)b$ for all $a\in\A{A}$ and $b\in\A{B}$. \end{enumerate} \end{defn} \begin{fact} \label{ce.fact} If $\A{A}$ is a UHF algebra with suitable sequence $\A{A}_n$ ($n\in\NN$), then there is a family of conditional expectations $\theta_n : \A{A}\to\A{A}_n$ satisfying $\theta_n \circ \theta_m = \theta_n$ for all $n \le m$. In particular, we have $\theta_n(a) \to a$ for all $a\in\A{A}$. \end{fact} \begin{proof}[Proof sketch] Given $n < m$, consider $\A{A}_n'\cap\A{A}_m$. This is a finite-dimensional, unital C*-subalgebra of $\A{A}$. Let $\U$ be its unitary group; then $\U$ is compact, and hence has a bi-invariant Haar measure $\mu$. Define $\theta_{n,m} : \A{A}_m\to\A{A}_m$ by \[ \theta_{n,m}(a) = \int uau^* \,d\mu(u) \] where the integral above is defined weakly, i.e., entrywise for some matrix representation of $\A{A}_m$. Then $\theta_{n,m}$ maps into $\A{A}_n$. Moreover, $\theta_{n,m}\circ\theta_{m,p} = \theta_{n,p}$ whenever $n < m < p$. It follows that there is a map $\theta_n : \A{A}\to\A{A}_n$ satisfying $\theta_n(a) = \theta_{n,m}(a)$ whenever $a\in\A{A}_m$. The conditions on $\theta_n$ are easily checked. \end{proof} We will be concerned primarily with corona algebras of the form $\prod \A{A}_n / \bigoplus \A{A}_n$, which we call \emph{reduced products}. The following proposition describes some of the structure of reduced products of UHF algebras, and will play an important part in the results to follow. In particular, it leads to our definition of a coherent family of $*$-homomorphisms, which we state afterwards. \begin{prop} \label{stratification} Let $\A{A}_n$ ($n\in\NN$) be a sequence of UHF algebras, and for each $n\in\NN$ let $\A{A}_{n,k}$ ($k\in\NN$) be a suitable sequence of subalgebras of $\A{A}_n$. For each $\xi\in\NN^\NN$, let \[ \Q_\xi = \prod \A{A}_{n,\xi(n)} / \bigoplus \A{A}_{n,\xi(n)} \subseteq \prod \A{A}_n / \bigoplus \A{A}_n \] Then $\Q_\xi \subseteq \Q_\eta$ if and only if $\xi <^* \eta$, and if $X$ is any cofinal subset of $(\NN^\NN, <^*)$, then \[ \prod \A{A}_n / \bigoplus \A{A}_n = \bigcup_{\xi\in X} \Q_\xi \] \end{prop} \begin{proof} Let $x\in\prod \A{A}_n$ be given. For each $n\in\NN$ we may choose some $\xi(n)\in\NN$ large enough that \[ \norm{\proj{n}{x} - \A{A}_{n,\xi(n)}} \le \frac{1}{n+1} \] Hence, there is a sequence $\bar{x}\in \prod \A{A}_{n,\xi(n)}$ such that $\norm{\proj{n}{x} - \proj{n}{\bar{x}}} \to 0$ as $n\to\infty$, and so $\pi(x) = \pi(\bar{x}) \in \Q_\xi$. The rest is straightforward. \end{proof} \begin{rmk} By Fact~\ref{ce.fact}, the $\bar{x}$ in the above proof may be chosen in a canonical way, namely we may choose $\proj{n}{\bar{x}} = \theta_{n,\xi(n)}(\proj{n}{x})$ ($n\in\NN$) where $\theta_{n,k} : \A{A}_n\to \A{A}_{n,k}$ ($k\in\NN$) is a sequence of conditional expectations fixed in advance. \end{rmk} \begin{defn} Let $\A{A}_n$ ($n\in\NN$) be a sequence of UHF algebras, and let $\A{A}_{n,k}$ ($k\in\NN$) be a suitable sequence for $\A{A}_n$, for each $n\in\NN$. Let $\A{B}$ be a C*-algebra. A family of $*$-homomorphisms $\alpha^\xi_n : \A{A}_{n,\xi(n)} \to \A{B}$ ($\xi\in\NN^\NN$, $n\in\NN$) is called \emph{coherent} relative to the sequences $\A{A}_{n,k}$, if for each $\xi <^* \eta$, \[ \lim_n \norm{\alpha^\eta_n\rs\A{A}_{n,\xi(n)} - \alpha^\xi_n} = 0 \] \end{defn} The following Proposition is now immediate from Propositions~\ref{hom.reverse} and~\ref{stratification}. \begin{prop} \label{limit-homo} Suppose $\alpha^\xi_n$ ($\xi\in\NN^\NN$, $n\in\NN$) is a coherent family of $*$-homomorphisms relative to suitable sequences $\A{A}_{n,k}$, all mapping into a C*-algebra $\A{B}$. Suppose moreover that, for each $\xi$, the projections $p^\xi_n = \alpha^\xi_n(1_{\A{A}_n})$ ($n\in\NN$) are pairwise-orthogonal and have sums $\sum_{n\le m} p^\xi_n$ which converge strictly in $\A{M}(\A{B})$. Then for each $\xi$ there is a unique $*$-homomorphism $\alpha^\xi : \prod \A{A}_{n,\xi(n)} \to \A{M}(\A{B})$ such that $\alpha^\xi\circ j_n = \alpha^\xi_n$ for every $n\in\NN$. Moreover, if $\vp^\xi : \A{Q}_\xi \to \A{Q}(\A{B})$ is the $*$-homomorphism induced by $\alpha^\xi$, then $\vp^\eta$ extends $\vp^\xi$ whenever $\xi <^* \eta$, and there is a unique $\vp : \prod \A{A}_n / \bigoplus \A{A}_n \to \A{M}(\A{B}) / \A{B}$ which extends every $\vp^\xi$. \end{prop} The $*$-homomorphism $\vp$ above is said to be \emph{determined} by the coherent family $\alpha^\xi_n$. We can now rephrase Theorem~\ref{main.cfh} as follows; $\TA + \MA$ implies that every isomorphism of the form $\prod \A{A}_n / \bigoplus \A{A}_n \simeq \prod \A{B}_n / \bigoplus\A{B}_n$, where each $\A{A}_n$ and $\A{B}_n$ is a UHF algebra, is determined by a coherent family of $*$-homomorphisms. To end this section we will prove Corollary~\ref{main.cor} from Theorem~\ref{main.cfh}. Before starting we will need one more structural result on reduced products of UHF algebras. \begin{prop} \label{central.sequences} Let $\A{A}_n$ ($n\in\NN$) be a sequence of UHF algebras. Then the center of $\prod \A{A}_n / \bigoplus \A{A}_n$ is canonically isomorphic to $\ell^\infty / c_0$. \end{prop} \begin{proof} Define $i : \ell^\infty \to \prod \A{A}_n$ by $\proj{n}{i(x)} = \proj{n}{x} 1_{\A{A}_n}$ for $n\in\NN$ and $x\in\ell^\infty$. Since the center of a UHF algebra is trivial, it follows that the image of $i$ is exactly the center of $\prod\A{A}_n$. Moreover, $i$ maps $c_0$ into $\bigoplus \A{A}_n$, and hence induces an injective map \[ j : \ell^\infty / c_0 \to \Z\left(\prod\A{A}_n / \bigoplus \A{A}_n\right) \] It suffices to show that $j$ is surjective. Suppose $x\in\prod\A{A}_n$ and $\pi(x)$ is central in $\prod\A{A}_n / \bigoplus \A{A}_n$. Fix a suitable sequence $\A{A}_{n,k}$ ($k\in\NN$) of subalgebras for each $\A{A}_n$. By Proposition~\ref{stratification} above, we may assume without loss of generality that for some $\xi\in\NN^\NN$, we have $\proj{n}{x}\in\A{A}_{n,\xi(n)}$ for all $n\in\NN$. Since each $\A{A}_{n,\xi(n)}$ is finite-dimensional, its unitary group has a bi-invariant Haar measure $\mu_n$. Then let \[ \proj{n}{z} = \int u \proj{n}{x} u^* \,d\mu_n(u) \] (cf. the sketch of Fact~\ref{ce.fact} above.) It is straightforward to show that each $\proj{n}{z}$ is scalar, and $\norm{\proj{n}{x} - \proj{n}{z}} \to 0$ as $n\to\infty$, and this completes the proof. \end{proof} By Proposition~\ref{central.sequences}, every isomorphism $\vp$ between reduced products of UHF algebras must restrict to an automorphism of $\ell^\infty / c_0$. We will call this automorphism the \emph{central automorphism induced by $\vp$}. \begin{proof}[Proof of Corollary~\ref{main.cor}] Let $\vp$ be an isomorphism \[ \prod \A{A}_n / \bigoplus \A{A}_n \to \prod \A{B}_n / \bigoplus \A{B}_n \] and let $\sigma$ be the central automorphism induced by $\vp$. Then by $\TA + \MA$ and the main result of~\cite{Velickovic.OCAA}, there are cofinite sets $\SN{A},\SN{B}\subseteq\NN$ and a bijection $e : \SN{A}\to\SN{B}$ such that $x\mapsto x\circ e^{-1}$ is a lift of $\sigma$. We claim now that for all but finitely many $n\in\SN{A}$, $\A{A}_n \simeq \A{B}_{e(n)}$. Suppose otherwise; then there is some infinite $\SN{I}\subseteq\SN{A}$ such that for all $n\in\SN{I}$, $\A{A}_n$ and $\A{B}_{e(n)}$ are not isomorphic. Let $s_n$ and $t_n$ be the supernatural numbers associated to $\A{A}_n$ and $\A{B}_{e(n)}$, respectively; then for each $n\in\SN{I}$ there is some prime $p_n$ which divides one of $s_n,t_n$, but not the other. Without loss of generality, $p_n\mid s_n$ and $p_n\nmid t_n$ for all $n\in\SN{I}$. For $n\not\in\SN{I}$ let $p_n = 1$. Now let $\A{F}_n$ be a C*-subalgebra of $\A{A}_n$ isomorphic to $M_{p_n}(\CC)$, with $1_{\A{A}_n}\in \A{F}_n$, for each $n$; by Theorem~\ref{main.cfh} there is a strict algebraic lift $\alpha$ of the restriction of $\vp$ to the C*-subalgebra \[ \prod \A{F}_n / \bigoplus \A{F}_n \] Let $\alpha_n : \A{F}_n\to \bigoplus \A{B}_k$ be the coordinate $*$-homomorphisms. Notice that $\alpha_n(1_{\A{A}_n}) - 1_{\A{B}_{e(n)}}$ tends to zero as $n\to\infty$. It follows that $\alpha_n(1_{\A{A}_n}) = 1_{\A{B}_{e(n)}}$ for all but finitely many $n\in\NN$. Hence, for all but finitely many $n\in\NN$, there is a unital embedding of $M_{p_n}(\CC)$ into $\A{B}_{e(n)}$, i.e. $p_n \mid t_{e(n)}$. This contradicts the previous assumption. \end{proof} \section{Definable embeddings} \label{sec:definable-embeddings} Let $\rho : \A{A}\to \A{B}$ be a map between C*-algebras. The \emph{defect} of $\rho$ is defined to be the supremum, over all $a,b$ in the unit ball of $\A{A}$ and $t\in\CC$ with $|t|\le 1$, of the maximum of the following quantities: \begin{gather*} \norm{\rho(ab) - \rho(a)\rho(b)} \\ \norm{\rho(a+b) - (\rho(a)+\rho(b))} \\ \norm{\rho(a^*) - \rho(a)^*} \\ \norm{\rho(ta) - t\rho(a)} \\ |\norm{a} - \norm{\rho(a)}| \end{gather*} The defect of $\rho$ thus measures how far $\rho$ is from being a $*$-homomorphism. \begin{thm}(Farah,~\cite[Theorem 5.1]{Farah.CO}) \label{ulam.fd} There is a universal constant $K_{FD}$ such that for any two finite-dimensional C*-algebras $\A{A}$ and $\A{B}$, and any Borel-measurable map $\rho : \A{A}\to \A{B}$, if the defect $\delta$ of $\rho$ is less than $1/1000$ then there is a $*$-homomorphism $\vp : \A{A}\to \A{B}$ such that $\norm{\rho - \vp} \le K_{FD}\delta$. \end{thm} \begin{prop} \label{ulam.af} There is a universal constant $K_{AF}$ such that for any finite-dimensional C*-algebra $\A{A}$ and AF algebra $\A{B}$, and any map $\rho : \A{A}\to \A{B}$, if the defect $\delta$ of $\rho$ is less than $10^{-6}$ then there is a $*$-homomorphism $\vp : \A{A}\to \A{B}$ such that $\norm{\rho - \vp} \le K_{AF}\delta$. \end{prop} \begin{proof} Let $\rho : \A{A}\to \A{B}$ be a map with defect $\delta$, and assume $\delta < 1/1000$. Let $X$ be a finite, $\delta$-dense subset of the unit sphere of $\A{A}$. Since $\A{B}$ is AF, there is a finite-dimensional C*-subalgebra $\A{C}$ of $\A{B}$ such that $\rho(x)$ is within $\delta$ of $\A{C}$ for each $x\in X$. For each $x\in X$, fix some $c_x\in \A{C}$ within $\delta$ of $\rho(x)$, and let $\sigma : \A{A}\to \A{C}$ be the map defined by setting $\sigma(a) = \norm{a}c_x$, where $x$ is the first member of $X$ which is within $\delta$ of $a/\norm{a}$, in some fixed ordering of $X$. It follows that $\norm{\sigma - \rho} \le 2\delta$, and hence $\sigma$ has small defect. $\sigma$ is also, clearly, Borel-measurable. Hence by Theorem~\ref{ulam.af} there is a $*$-homomorphism $\vp : \A{A}\to \A{C}$ close to $\sigma$, and hence close to $\rho$. \end{proof} The following theorem, which is the main result of this section, has at its heart an application of Proposition~\ref{ulam.af} to a sequence of functions from finite-dimensional C*-algebras into a fixed AF algebra. The crucial detail is the independence of $K_{AF}$ from the dimension of the domain algebra. \begin{thm} \label{definable->lifts} Assume $\TA + \MA$. Let $\vp$ be an injective $*$-homomorphism of the form \[ \prod \A{F}_n / \bigoplus \A{F}_n \to \A{M}(\A{A})/\A{A} \] where $\A{A}$ is a separable AF algebra, and each $\A{F}_n$ is a finite-dimensional C*-algebra. Suppose $\vp$ has a lift which is strictly continuous on a dense $G_\delta$. Then $\vp$ has a strict algebraic lift $\alpha$. \end{thm} \begin{proof} Let $\e_n = 2^{-n}$ and fix an increasing approximate unit $r_n$ ($n\in\NN$) of projections in $\A{A}$. Let $\A{F} = \prod\A{F}_n$, and for $\SN{A}\subseteq\NN$, write \[ \A{F}\rs\SN{A} = \prod_{n\in\SN{A}} \A{F}_n \] and similarly $\X\rs\SN{A} = \X\cap (\A{F}\rs\SN{A})$ for subsets $\X$ of $\A{F}$. In particular let $X_n$ be a finite, $\e_n$-dense subset of the unit ball of $\A{F}_n$, and let $\X = \prod X_n$. Under the strict topology, $\X$ (and each $\X\rs\SN{A}$) is homeomorphic to a perfect, compact subset of the Baire space $\NN^\NN$. Now fix a lift $L : \A{F}\to\A{M}(\A{A})$ of $\vp$, which is (strictly) continuous on a dense $G_\delta$ set $G\subseteq\X$. By a standard argument (see~\cite{Farah.CO},~\cite{Jalali-Naini},~\cite{Talagrand.Submeasure}) we may find sequences $0 = n_0 < n_1 < \cdots$ and $t_i\in\X\rs [n_i,n_{i+1})$ such that for all $x\in\X$, if $x$ extends $t_i$ for infinitely many $i$, then $x\in G$. Now let \[ t^0 = \sum t_{2i}\qquad t^1 = \sum t_{2i+1} \] (The sums converge in the strict topology.) Also, let, $\SN{A}^0 = \bigcup [n_{2i},n_{2i+1})$ and $\SN{A}^1 = \bigcup [n_{2i+1},n_{2i+2})$. It follows that the map \[ x\mapsto L(x\rs\SN{A}^0 + t^1) + L(x\rs\SN{A}^1 + t^0) - L(t^1) - L(t^0) \] lifts $\vp$ and is continuous on all of $\X$; replacing $L$ with this map, we may assume the same holds of $L$. \begin{claim} For every $n$ and $\e > 0$ there are $k > n$ and $t\in \X\rs [n,k)$ such that for all $x,y\in\X$ extending $t$, \begin{enumerate} \item\label{stable.head} if for all $i < n$, $\proj{i}{x} = \proj{i}{y}$, then \[ \norm{(L(x) - L(y))r_n} \le \e \quad\mbox{and}\quad \norm{r_n(L(x) - L(y))} \le \e \] \item\label{stable.tail} if for all $i \ge k$, $\proj{i}{x} = \proj{i}{y}$, then \[ \norm{(L(x) - L(y))(1 - r_k)} \le \e\quad\mbox{and}\quad \norm{(1 - r_k)(L(x) - L(y))} \le \e \] \end{enumerate} \end{claim} \begin{proof} We will work towards condition~\eqref{stable.tail} first; condition~\eqref{stable.head} will then follow easily from the continuity of $L$. Fix $n$ and $\e > 0$, and for each $k > n$ define $V_k\subseteq \X\rs [n,\infty)$ by placing $x\in V_k$ if and only if there are $s,t\in\X\rs n$ with \[ \norm{(L(s + x) - L(t + x))(1 - r_k)} > \e \quad\mbox{or}\quad \norm{(1 - r_k)(L(s + x) - L(t + x))} > \e \] Then, $V_k$ is an open subset of $\X\rs [n,\infty)$, by continuity of $L$. For any given $x\in\X\rs [n,\infty)$ and $s,t\in\X\rs n$, there is some $k$ such that \[ \norm{(L(s + x) - L(t + x))(1 - r_k)}, \norm{(1 - r_k)(L(s + x) - L(t + x))} \le \e \] since the difference $L(s + x) - L(t + x)$ is a member of $\A{A}$. As $\X\rs n$ is finite, it follows that for any given $x\in\X\rs [n,\infty)$ there is some $k$ with $x\not\in V_k$. Thus $\bigcap V_k = \emptyset$. By the Baire Category Theorem, there must be some $m$ such that $V_m$ is not dense; then we may find $\ell\ge m$ and $s\in\X\rs [n,\ell)$ such that no $x\in\X\rs [n,\infty)$ extending $s$ can be in $V_m$. Condition~\eqref{stable.tail} follows with the choice of $t = s$ and $k = \ell$; to complete the proof, we use continuity of $L$ to find $k \ge \ell$ and $t\in\X\rs [n,k)$ extending $s$ which satisfies~\eqref{stable.head} as well. \end{proof} We call a $t$ as in the claim an \emph{$\e$-stabilizer} for the interval $[n,k)$. By the claim, we may construct sequences $0 = n_0 < n_1 < \cdots$ and $t_i\in\X\rs [n_i,n_{i+1})$ such that $t_i$ is an $\e_i$-stabilizer for the interval $[n_i,n_{i+1})$. For each $\zeta < 3$ put \begin{align*} \SN{A}_\zeta & = \bigcup\set{[n_i,n_{i+1})}{i\equiv \zeta\pmod{3}} \\ z_\zeta & = \sum \set{t_i}{i\equiv \zeta\pmod{3}} \end{align*} and define a function $L_\zeta$ by \[ L_\zeta(x) = L(x + z_{\zeta+1} + z_{\zeta+2}) - L(z_{\zeta+1} + z_{\zeta+2}) \] (Where $\zeta + 1$ and $\zeta + 2$ are computed mod $3$.) Clearly, each $L_\zeta$ lifts $\vp$. If $x\in \PA{F}_1$, let $f(x)$ be some sequence in $\X$ such that for each $n$, $\norm{\proj{n}{x} - \proj{n}{f(x)}}$ is minimal. Then in particular, $\quo{x} = \quo{f(x)}$. Let $q_i = r_{n_{i+2}} - r_{n_{i-1}}$, setting $n_{-1} = 0$. Note that $q_i \perp q_j$ if $i$ and $j$ differ by at least three, and moreover \[ \sum_{i\equiv \zeta\bmod{3}} q_i = 1 - r_{n_{\zeta - 1}} \] for $\zeta = 0,1,2$. Define maps $\rho_i : \PA{F}\rs [n_i, n_{i+1}) \to q_i \A{A} q_i$ by \[ \rho_i(x) = \norm{x} q_i L_\zeta(f(x / \norm{x})) q_i \] where $i\equiv \zeta\pmod{3}$. \begin{claim} The map \[ x\mapsto \sum_{i\equiv \zeta\bmod{3}} \rho_i(x\rs [n_i,n_{i+1})) \] lifts $\vp$ on $\SN{A}_\zeta$. \end{claim} \begin{proof} Let $x\in\X\rs\SN{A}_\zeta$. Fix an $i$ with $i\equiv\zeta\pmod{3}$, and consider $u = x\rs [n_i,\infty)$ and $v = x\rs [n_i,n_{i+1})$. Then, \begin{align*} \lVert q_i (L_\zeta(x) & - L_\zeta(v)) \rVert = \norm{q_i (L(x + z_{\zeta+1} + z_{\zeta+2}) - L(v + z_{\zeta+1} + z_{\zeta+2}))} \\ & \le \norm{(1 - r_{n_{i-1}})(L(x + z_{\zeta+1} + z_{\zeta+2}) - L(u + z_{\zeta+1} + z_{\zeta+2}))} \\ & + \norm{r_{n_{i+2}}(L(u + z_{\zeta+1} + z_{\zeta+2}) - L(v + z_{\zeta+1} + z_{\zeta+2}))} \\ & \le \e_{i-2} + \e_{i+1} \end{align*} by the properties of the stabilizers constructed above. Similarly, \begin{align*} \norm{(L_\zeta(v) - L_\zeta(0))(1 - q_i)} & \le \norm{(L_\zeta(v) - L_\zeta(0))(1 - r_{n_{i+2}})} + \norm{(L_\zeta(v) - L_\zeta(0))r_{n_{i-1}}} \\ & \le \e_{i+1} + \e_{i-1} \end{align*} Then, the following sums (taken over $i\equiv \zeta\pmod{3}$) converge in norm, and hence are members of $\A{A}$; \begin{align} \label{diff.rs} \sum & q_i (L_\zeta(x) - L_\zeta(x\rs [n_i,n_{i+1}))) \\ \label{diff.pr} \sum & q_i (L_\zeta(x\rs [n_i,n_{i+1})) - L_\zeta(0))(1 - q_i) \\ \sum & q_i L_\zeta(0)(1 - q_i) \end{align} Adding these together produces \[ (1 - r_{n_{\zeta-1}})L_\zeta(x) - \sum_{i\equiv \zeta\bmod{3}} q_i L_\zeta(x\rs [n_i,n_{i+1})) q_i \] Hence the desired conclusion holds for all $x\in\X\rs\SN{A}_\zeta$. The general case follows from the fact that $x - f(x)\in\A{A}$ whenever $x\in\PA{F}_1$. \end{proof} It follows that the defect $\delta_i$ of $\rho_i$ vanishes as $i$ tends to infinity. For instance, if there were sequences $a_i,b_i\in \PA{F}_1\rs [n_i,n_{i+1})$ ($i\in\NN$) satisfying \[ \limsup_i \norm{\rho_i(a_i b_i) - \rho_i(a_i)\rho_i(b_i)} > 0 \] then letting $a = \sum a_i$ and $b = \sum b_i$, we would have $\norm{\vp(\quo{ab}) - \vp(\quo{a})\vp(\quo{b})} > 0$, a contradiction. The other C*-algebra operations give analogous proofs. By Proposition~\ref{ulam.af}, for large enough $i$ there is a $*$-homomorphism $\alpha_i : \PA{F}\rs [n_i,n_{i+1})\to q_i \A{A} q_i$ such that $\norm{\rho_i - \alpha_i} \le K_{AF}\delta_i$. Then \[ \alpha^\zeta(x) = \sum\set{\alpha_i(x\rs [n_i,n_{i+1}))}{i\equiv \zeta\pmod{3}} \] lifts $\vp$ on $\SN{A}_\zeta$, and is a $*$-homomorphism. Hence $\alpha = \alpha^0 + \alpha^1 + \alpha^2$ (possibly with modifications on finitely-many coordinates) is as desired. \end{proof} \section{Embeddings under forcing axioms} \label{sec:fa-embeddings} In this section we prove the following strengthening of Theorem~\ref{main.cfh}. The reader can easily deduce an analogous strengthening of Corollary~\ref{main.cor} using Theorem~\ref{fa->cfh.stronger} in place of Theorem~\ref{main.cfh}. \begin{thm} \label{fa->cfh.stronger} Assume $\TA + \MA$. Let $\A{F}_n$ and $\A{B}_n$ ($n\in\NN$) be sequences of full matrix algebras and UHF algebras, respectively, and suppose \[ \vp : \prod \A{F}_n / \bigoplus \A{F}_n \to \prod \A{B}_n / \bigoplus \A{B}_n \] is an injective $*$-homomorphism which induces an automorphism of $\ell^\infty / c_0$. Then $\vp$ has a strict algebraic lift. \end{thm} \begin{rmk} Let $\A{B}_{n,k}$ ($k\in\NN$) be suitable sequences for the UHF algebras $\A{B}_n$. Then the conclusion of Theorem~\ref{fa->cfh.stronger} implies that there is some $\xi\in\NN^\NN$ such that the image of $\vp$ is contained in $\prod\A{B}_{n,\xi(n)} / \bigoplus \A{B}_{n,\xi(n)}$. Indeed, let $\alpha$ be a strict algebraic lift of $\vp$ and consider the coordinate $*$-homomorphisms $\alpha_n = \alpha\circ j_n : \A{F}_n\to \bigoplus \A{B}_m$. By a straightforward argument, we have $\alpha(1_{\A{F}_n}) - 1_{\A{B}_{e(n)}} \to 0$ for some function $e$. Then for each $n$ we may find a $\xi(e(n))$ large enough such that there is a $*$-homomorphism $\beta_n : \A{F}_n\to \A{B}_{e(n),\xi(e(n))}$ with $\norm{\alpha_n - \beta_n}$ tending to zero. The sequence $\beta_n$ then determines a strict $*$-homomorphism \[ \beta : \prod \A{F}_n \to \prod \A{B}_{n,\xi(n)} \] and $\beta$ lifts $\vp$. \end{rmk} The remainder of this section is devoted to a proof of Theorem~\ref{fa->cfh.stronger}. To this end we fix an injective $*$-homomorphism $\vp$ of the form \[ \prod \A{F}_n / \bigoplus \A{F}_n \to \prod \A{B}_n / \bigoplus \A{B}_n \] which induces an automorphism of $\ell^\infty / c_0$. We will also assume $\TA + \MA$ for the rest of the section. We will write $\PA{F} = \prod \A{F}_n$ and $\PA{B} = \prod\A{B}_n$, and for sets $\SN{A}\subseteq\NN$ we put \[ \PA{F}\rs\SN{A} = \prod_{n\in\SN{A}} \A{F}_n \qquad \PA{B}\rs\SN{A} = \prod_{n\in\SN{A}} \A{B}_n \] and we shorten the abominable $\vp\rs (\PA{F}\rs\SN{A})$ to just $\vp\rs\SN{A}$. Note that, by the main result of~\cite{Velickovic.OCAA}, we may fix a function $e : \NN\to\NN$ such that for each central $\zeta\in \prod\A{F}_n$, we have $\vp(\quo{\zeta}) = \quo{\zeta\circ e}$. By relabeling the C*-algebras $\A{B}_n$, we may, and will, assume that $e = \id$. As in the proof of Corollary~\ref{main.cor}, this implies that any strict algebraic lift $\alpha^{\SN{A}}$ of $\vp\rs\SN{A}$ must be determined by $*$-homomorphisms \[ \alpha_n^\SN{A} : \A{F}_n\to \A{B}_n \quad (n\in\SN{A}) \] This fact will be used often in what follows, without explicit mention. Now, fix a pointclass $\PC{\Gamma}$ and a number $\e \ge 0$. We define \begin{align*} \SN{A}\in\SSN{I}^\e & \iff \mbox{there is a strict algebraic $\e$-lift of $\vp\rs\SN{A}$} \\ \SN{A}\in\SSN{I}^\e(\PC{\Gamma}) & \iff \mbox{there is a $\PC{\Gamma}$-measurable $\e$-lift of $\vp\rs \SN{A}$} \\ \SN{A}\in\SSN{I}_\sigma^\e(\PC{\Gamma}) & \iff \mbox{there is a sequence $L_k$ ($k\in\NN$) of $\PC{\Gamma}$-measurable functions with} \\ & \qquad \forall x\in\PA{F}\rs\SN{A}\;\;\exists k\in\NN\quad \norm{\vp(\quo{x}) - \quo{L_k(x)}} \le \e\norm{\quo{x}} \end{align*} Our ultimate goal is to show that $\SSN{I}^0 = \SSN{P}(\NN)$. The pointclasses we will be concerned with are $\PC{BP},\PC{H},\PC{C}$, and $\PC{\Delta^1_1}$, consisting of those sets with the Baire property, the Haar-measurable sets, the $\PC{C}$-measurable sets (see~\cite[\S{29.D}]{Kechris.CDST}), and the Borel sets, respectively. \begin{prop} \label{ideals} Let $\e\ge 0$ and let $\PC{\Gamma}$ be a pointclass; then each $\SSN{I}^\e$, $\SSN{I}^\e(\PC{\Gamma})$, and $\SSN{I}_\sigma^\e(\PC{\Gamma})$ is an ideal containing the finite sets, and \[ \SSN{I}^\e(\PC{BP}) \subseteq \SSN{I}^{8\e}(\PC{\Delta}^1_1)\qquad\mbox{and}\qquad \SSN{I}_\sigma^\e(\PC{BP}) \subseteq \SSN{I}_\sigma^{8\e}(\PC{\Delta}^1_1) \] Finally, \[ \SSN{I}^0 = \SSN{I}^0(\PC{BP}) = \bigcap_{\e > 0} \SSN{I}^\e(\PC{BP}) \] \end{prop} \begin{proof} Clearly each $\SSN{I}^\e$, $\SSN{I}^\e(\PC{\Gamma})$, and $\SSN{I}_\sigma^\e(\PC{\Gamma})$ is hereditary and contains the finite sets. To see that e.g. $\SSN{I}_\sigma^\e(\PC{\Gamma})$ is closed under finite unions, consider $\SN{A},\SN{B}\in\SSN{I}_\sigma^\e(\PC{\Gamma})$. Let $L_m^\SN{A}$ ($m\in\NN$) and $L_n^\SN{B}$ ($n\in\NN$) be $\PC{\Gamma}$-measurable functions which witness that $\SN{A},\SN{B}\in\SSN{I}_\sigma^\e(\PC{\Gamma})$ respectively. Put, for all $x\in \PA{F}\rs (\SN{A}\cup\SN{B})$ and $m,n\in\NN$, \[ L_{mn}^{\SN{A}\cup\SN{B}}(x) = L_m^\SN{A}(x\rs\SN{A}) + L_n^\SN{B}(x\rs(\SN{B}\sm\SN{A})) \] Then this family of functions witnesses $\SN{A}\cup\SN{B}\in\SSN{I}_\sigma^\e(\PC{\Gamma})$. To see that $\SSN{I}^\e(\PC{BP}) \subseteq \SSN{I}^{8\e}(\PC{\Delta}^1_1)$, let $\SN{A}\in\SSN{I}^\e(\PC{BP})$ and fix a Baire-measurable $\e$-lift $L^C$ of $\vp$ on $\SN{A}$. Recall that the unitary group $\U$ of $\PA{F}\rs\SN{A}$ is a Polish group; hence as $L^C$ is Baire-measurable, there is a dense $G_\delta$ set $\X\subseteq\U$ on which $L^C$ is continuous. Let \[ \R = \set{(u,v)\in\U\times\U}{v\in\X\cap u^*\X} \] Then $\R$ is Borel, and has comeager sections, hence by~\cite[Theorem~8.6]{Kechris.CDST} it has a Borel-measurable uniformization $S$. It follows that the function \[ u\mapsto L^C(uS(u))L^C(S(u)^*) \] is Borel-measurable, and a $2\e$-lift of $\vp$ on $\U$. Now, it is a standard fact that there are continuous functions $T_1,T_2,T_3,T_4 : \PA{F}\rs\SN{A}\to\U$ such that \[ \sum_i T_i(x) = x \] for all $x\in\PA{F}\rs\SN{A}$. Composing these maps with the function on $\U$ defined above, we obtain an $8\e$-lift of $\vp$ on all of $\PA{F}\rs\SN{A}$. The inclusion $\SSN{I}_\sigma^\e(\PC{BP}) \subseteq \SSN{I}_\sigma^{8\e}(\PC{\Delta}^1_1)$ follows from similar reasoning. The equality $\SSN{I}^0 = \SSN{I}^0(\PC{BP})$ follows from Theorem~\ref{definable->lifts} and the fact that a Baire-measurable function is continuous on a dense $G_\delta$. Clearly, \[ \SSN{I}^0(\PC{BP}) \subseteq \bigcap_{\e > 0} \SSN{I}^\e(\PC{BP}) \] Now to see the other inclusion, note by the above that \[ \bigcap_{\e > 0} \SSN{I}^\e(\PC{BP}) = \bigcap_{\e > 0} \SSN{I}^\e(\PC{\Delta}^1_1) \] So, suppose that for each $\e > 0$, $\SN{A}\in\SSN{I}^\e(\PC{\Delta}^1_1)$, and let $L^\e$ be a Borel-measurable function witnessing this. Put \[ \Lambda = \set{(x,y)\in (\PA{F}_1\rs \SN{A})\times (\PA{B}_1\rs\SN{A})}{\forall \e > 0 \; \norm{\quo{L^\e(x) - y}}\le \e} \] Then, $\Lambda$ is Borel, and hence has a $\PC{C}$-measurable uniformization by the Jankov-von Neumann theorem. This uniformization is clearly a lift of $\vp$ on $\SN{A}$. Thus $\SN{A}\in\SSN{I}^\e(\PC{BP})$ as required. \end{proof} The two results below are simple modifications of \cite[Lemma~7.6]{Farah.CO} and~\cite[Proposition~7.7]{Farah.CO}, respectively; we include proofs here for completeness, but we make no claims to their originality. \begin{lemma} \label{haar.expansion} Suppose $\vp\rs\SN{A}$ has a Borel-measurable $\e$-lift on $S$, where \[ S\subseteq \prod_{n\in\SN{A}} \U(\A{F}_n) = \U \] is some set with positive Haar measure. Then $\vp$ has a Borel-measurable $2\e$-lift on all of $\U$. \end{lemma} \begin{proof} Let $L : S\to \PA{B}\rs\SN{A}$ be a Borel-measurable $\e$-lift of $\vp$. By Luzin's theorem, we may assume that $S$ is compact and $L$ is continuous on $S$. Let $U$ be a basic open subset of $\U$ such that $\mu(S\cap U) > \mu(U) / 2$. Then there are $k\in\NN$ and a finite $F$ contained in \[ \prod_{n\in\SN{A}\cap k} \U(\A{F}_n) \] such that $FU = \U$. It follows that $\mu(FS) > 1/2$. Now define $L' : FS \to \PA{B}\rs\SN{A}$ by letting $L'(u) = L(v^* u)$ whenever $v$ is the first member of $F$ such that $v^*u \in S$. Then $L'$ is continuous and an $\e$-lift of $\vp$ on $FS$ (noting that for each $v\in F$, $\pi(v) = \pi(1)$). Now let \[ \Lambda = \set{(u,v)\in \U\times FS}{ uv^*\in FS } \] Then the section of $\Lambda$ over a given $u\in\U$ is exactly $FS\cap u(FS)^*$, which has positive Haar measure since $\mu(FS) > 1/2$. By \cite[Theorem~8.6]{Kechris.CDST}, it follows that $\Lambda$ has a Borel-measurable uniformization $T : \U\to \PA{B}\rs\SN{A}$. Then the map \[ u\mapsto L'(u T(u)^*) L'(T(u)) \] defines a $2\e$-lift of $\vp$ on all of $\U$. \end{proof} \begin{prop} \label{sigma->one} Let $\e > 0$ be given. If $\SN{A}\in \SSN{I}_\sigma^\e(\PC{H})$ is infinite and $\SN{A} = \bigcup_k \SN{A}_k$ is a partition of $\SN{A}$ into infinite sets, then there is some $k$ for which $\SN{A}_k\in\SSN{I}^{4\e}(\PC{\Delta}^1_1)$. \end{prop} \begin{proof} Let $U_n$ be the unitary group of $\A{F}_n$. Then \[ \U_k = \prod_{n\in\SN{A}_k} U_n\quad\mbox{and}\quad \W_k = \prod_{\ell\ge k} \U_\ell \] are compact groups, and clearly \[ \U \simeq \prod_k \U_k = \W_0 \] We thus view each $\U_k$ and $\W_k$ as a compact subgroup of $\U$. Fix Borel functions $L_i$ $(i\in\NN)$ witnessing $\SN{A}\in\SSN{I}_\sigma^\e(\PC{H})$. Assume, for sake of contradiction, that no $\SN{A}_k$ is a member of $\SSN{I}^{4\e}(\PC{\Delta}^1_1)$. We will construct compact sets $\V_k\subseteq\W_k$ of positive measure (using the normalized Haar measure $\mu_k$ on $\W_k$), and elements $u_k$ of $\U_k$, such that \begin{enumerate} \item $u_k \V_{k+1} \subseteq \V_k$, and \item for all $v\in \V_{k+1}$, \[ \norm{\quo{(L_k (u_0\cdots u_k v) - L(u_k))\rs \SN{A}_k}} > \e \] \end{enumerate} Given such sequences, note that $\V_k' = u_0\cdots u_{k-1} \V_k$ is a decreasing sequence of nonempty, compact sets in $\U$. Thus $u_\infty = \prod_k u_k$ is a member of their intersection. Since $u_\infty \in \PA{F}\rs\SN{A}$, there is some $k$ such that \[ \norm{\quo{L_k(u_\infty) - L(u_\infty)}} \le \e \] But we have \[ u_\infty = u_0\cdots u_k \prod_{\ell > k} u_\ell \] and $\prod_{\ell > k} u_\ell \in \V_{k+1}$, so by the construction, \[ \norm{\quo{(L_k(u_\infty) - L(u_k))\rs \SN{A}_k}} > \e \] Since $\quo{L(u_\infty)\rs \SN{A}_k} = \quo{L(u_k)\rs\SN{A}_k}$ this provides the necessary contradiction. Now we proceed to the construction of $u_k$ and $\V_k$. Suppose we are given $u_0,\ldots,u_{k-1}$ and $\V_{k-1}$ satisfying the above conditions. Since $\V_{k-1}$ has positive Haar measure, we may find compact sets $S\subseteq \U_{k-1}$ and $T\subseteq \W_k$, each with positive measure (under their respective Haar measures), such that \[ \forall x\in S\quad \mu_k\set{y\in T}{(x,y)\in\V_{k-1}} > \mu_k(T)/2 \] Define $\Xi\subseteq \U_k\times\W_k\times (\PA{B}_1\rs\SN{A}_k)$ and $\Lambda\subseteq S\times \prod \U(\A{B}_n)$ by \begin{gather*} (x,y,z)\in\Xi \iff \norm{\quo{L_k(u_0\cdots u_{k-1}\cdot x\cdot y)\rs \SN{A}_k - z}} \le \e \\ (x,z)\in \Lambda\iff \mu_k\set{y\in T}{(x,y,z)\in \Xi} > \mu_k(T)/2 \end{gather*} Then $\Xi$ and $\Lambda$ are both Borel. Suppose first that for every $x\in S$ there is some $z$ with $(x,z)\in \Lambda$. Then by the Jankov-von Neumann uniformization theorem there is a $\PC{C}$-measurable function $f : S\to \prod \U(\A{B}_n)$ uniformizing $\Lambda$. Since $S$ has positive measure, by Lemma~\ref{haar.expansion} $f$ cannot be a $2\e$-lift of $\vp$ on $S$, since then $\vp$ would have a (Borel-measurable) $4\e$-lift on $\SN{A}_k$, contradicting our starting assumption. So we may find some $u_k\in S$ such that there is no $z$ with $(u_k,z)\in\Lambda$, and in particular $(u_k,L(u_k))\not\in\Lambda$. It follows that the set \[ R = \set{y\in T}{(u_k,y,L(u_k))\in \Xi\;\land\; (u_k,y)\in \V_{k-1}} \] has positive measure. Taking $\V_k$ to be some compact subset of $R$ with positive measure finishes the construction. \end{proof} Recall that a family $\SSN{A}\subseteq\SSN{P}(\NN)$ is \emph{almost-disjoint} (or \emph{a.d.}) if for all distinct $\SN{A},\SN{B}\in\SSN{A}$, $\SN{A}\cap\SN{B}$ is finite. An a.d. family $\SSN{A}$ is \emph{treelike} if there is a bijection $t : \NN\to 2^{<\omega}$ such that for all $\SN{A}\in\SSN{A}$, and all $n,m\in\SN{A}$, $t(n)\subseteq t(m)$ or $t(m) \subseteq t(n)$. Treelike families are called \emph{neat} in~\cite{Velickovic.OCAA}. \begin{lemma} \label{can.has.trees} If $\SSN{A}$ is a treelike, a.d. family, then $\SSN{A}\sm\SSN{I}_\sigma^\e(\PC{C})$ is countable for each $\e > 0$. \end{lemma} \begin{proof} Fix $\e > 0$, and let $X$ consist of the pairs $(\SN{A},x)$ where $\SN{A}$ is an infinite subset of some member $\tau(\SN{A})$ of $\SSN{A}$, and $x$ is in the unit ball of $\PA{F}\rs\SN{A}$. Notice that $\tau(\SN{A})$ is unique, and $\tau$ as a map $\SSN{A}\to 2^\omega$ is continuous, since $\SSN{A}$ is treelike. We define a coloring $[X]^2 = K_0\cup K_1$ by placing $\{(\SN{A},x),(\SN{\bar{A}},\bar{x})\}\in K_0$ if and only if \begin{enumerate} \item $\tau(\SN{A})\neq \tau(\SN{\bar{A}})$, \item for all $n\in \SN{A}\cap\SN{\bar{A}}$, $\norm{\proj{n}{x} - \proj{n}{\bar{x}}} < 1 / (n+1)$, and \item there is an $n\in \SN{A}\cap\SN{\bar{A}}$ such that $\norm{\proj{n}{L(x)} - \proj{n}{L(\bar{x})}} > \e/2$. \end{enumerate} It follows that $K_0$ is open in the topology on $X$ obtained by identifying $(\SN{A},x)\in X$ with $(\SN{A},x,L(x))$, a member of the Polish space \[ \pow(\NN)\times \PA{F}_1\times \PA{B}_1 \] \begin{claim} There is no uncountable $Y\subseteq X$ such that $[Y]^2\subseteq K_0$. \end{claim} \begin{proof} Suppose for sake of contradiction that $Y$ is uncountable and $[Y]^2\subseteq K_0$. Let $\SN{D} = \bigcup\set{\SN{A}}{(\SN{A},x)\in Y}$, and choose $y\in\PA{F}\rs d$ such that for all $n\in d$ there is some $(\SN{A},x)\in Y$ with $n\in \SN{A}$ and $\proj{n}{y} = \proj{n}{x}$. Since $Y$ is $K_0$-homogeneous, it follows that for all $(\SN{A},x)\in Y$, \[ \forall n\in a\quad \norm{\proj{n}{x} - \proj{n}{y}} < \frac{1}{n+1} \] In particular, $\quo{x} = \quo{y\rs a}$ for all $(\SN{A},x)\in Y$, and hence $\quo{L(x)} = \quo{L(y\rs a)}$ for all $(\SN{A},x)\in Y$. Since $Y$ is uncountable we may find an $\bar{n}\in\NN$ such that for uncountably many $(\SN{A},x)\in Y$, we have \[ \forall n\in a\sm\bar{n}\quad \norm{\proj{n}{L(x)} - \proj{n}{L(y)}} \le \e / 2 \] By the separability of $B_n$ for $n < \bar{n}$, there are distinct $(\SN{A},x),(\SN{\bar{A}},\bar{x})\in Y$, both satisfying the above, such that \[ \forall n < \bar{n} \quad \norm{\proj{n}{L(x)} - \proj{n}{L(\bar{x})}} \le \e \] Then $\{(\SN{A},x),(\SN{\bar{A}},\bar{x})\}\in K_1$, a contradiction. \end{proof} By $\TA$, there is a countable cover $X_p$ ($p\in\NN$) of $X$ by $K_1$-homogeneous sets. Let $D_p$ be a countable, dense subset of $X_p$ for each $p$, and let \[ \SSN{D} = \set{\tau(\SN{\bar{A}})}{p\in\NN\land (\SN{\bar{A}},\bar{x})\in D_p} \] To prove the lemma it will suffice to show that $\SSN{A}\sm\SSN{D}\subseteq\SSN{I}_\sigma^\e(\PC{C})$. \begin{claim} Let $\SN{C}\in\SSN{A}\sm\SSN{D}$. Then there is a partition $\SN{C} = \SN{C}_0\cup \SN{C}_1$ such that for all $p\in\NN$ and all $(\SN{A},x)\in X_p$, if $\SN{A}\subseteq \SN{C}_i$ for some $i$ then for all $k$ there is $(\SN{\bar{A}},\bar{x})\in D_p$ with \begin{enumerate} \item\label{close.a} $\SN{A}\cap k = \SN{\bar{A}}\cap k$, \item\label{close.x} for all $n\in \SN{A}\cap\SN{\bar{A}}$, $\norm{\proj{n}{x} - \proj{n}{\bar{x}}} < 1 / (n+1)$. \end{enumerate} \end{claim} \begin{proof} For each $k\in\NN$, let $E_k$ be a finite subset of $X$ such that for all $p < k$ and $(\SN{A},x)\in X_p$, there is some $(\SN{\bar{A}},\bar{x})\in D_p^m\cap E_k$ satisfying~\eqref{close.a} and the following restricted form of~\eqref{close.x}; \[ \forall n\in \SN{A}\cap\SN{\bar{A}}\cap k\quad \norm{\proj{n}{x} - \proj{n}{\bar{x}}} < \frac{1}{n+1} \] This is possible by density of $D_p$ in $X_p$ and the fact that $\PA{F}\rs k$ is finite-dimensional (and hence has a totally-bounded unit ball). Note that for each $(\SN{\bar{A}},\bar{x})\in E_k$, the set $\SN{C}\cap\SN{\bar{A}}$ is finite. Let $k^+$ be minimal such that for all $(\SN{\bar{A}},\bar{x})\in E_k$, $\SN{C}\cap\SN{\bar{A}}\subseteq k^+$. Set $k_0 = 0$ and $k_{i+1} = k_i^+$ for each $i$, and \[ \SN{C}_0 = \bigcup_i \SN{C}\cap [k_{2i},k_{2i+1})\qquad \SN{C}_1 = \bigcup_i \SN{C}\cap [k_{2i+1},k_{2i+2}) \] The claim follows. \end{proof} Define a set $\Lambda_p \subseteq (\PA{F}\rs \SN{C}_0) \times (\PA{B}\rs \SN{C}_0)$ by placing $(x,y)\in\Lambda_p$ if and only if for every $k\in\NN$ there is some $(\SN{\bar{A}},\bar{x})\in D_p$ such that conditions~\eqref{close.a} and~\eqref{close.x} hold (with $\SN{C}_0$ replacing $\SN{A}$), and moreover \[ \forall n < k\quad \norm{\proj{n}{y} - \proj{n}{L(\bar{x})}} \le \e/2 \] Clearly, $\Lambda_p$ is Borel (in the usual topology), and if $(\SN{C}_0,x)\in X_p$ then by the above claim and the $K_1$-homogeneity of $X_p$, $(x,L(x))\in \Lambda_p$. Moreover, if $(x,y)\in \Lambda_p$, then \[ \forall n\in \SN{C}_0 \norm{\proj{n}{L(x)} - \proj{n}{y}} \le \e \] Let $L_p$ be a $\PC{C}$-measurable uniformization of $\Lambda_p$ for each $p\in\NN$. Then, since $X = \bigcup_p X_p$, the sequence $L_p$ ($p\in\NN$) is a witness to the fact that $\SN{C}_0\in\SSN{I}_\sigma^\e(\PC{C})$. Similarly, $\SN{C}_1$ is in $\SSN{I}_\sigma^\e(\PC{C})$, and hence so is $\SN{C} = \SN{C}_0\cup \SN{C}_1$. \end{proof} \begin{prop} \label{ccc/fin} Every uncountable, a.d. family $\SSN{B}\subseteq\SSN{P}(\NN)$ meets $\SSN{I}^0$. \end{prop} \begin{proof} Suppose for sake of contradiction that $\SSN{B}$ is an uncountable a.d. family disjoint from $\SSN{I}^0$. By Proposition~\ref{ideals}, we may assume that for some $\e > 0$, $\SSN{B}$ is disjoint from $\SSN{I}^\e(\PC{\Delta}^1_1)$. By a standard application of $\MA$, we may find an uncountable, a.d. family $\SSN{A}'$ such that every $\SN{A}\in\SSN{A}'$ almost-contains infinitely many members of $\SSN{B}$. Moreover, using $\MA$ with~\cite[Lemma~2.3]{Velickovic.OCAA}, there is an uncountable $\SSN{A} \subseteq \SSN{A}'$ and, for each $\SN{A}\in\SSN{A}$, a partition $\SN{A} = \SN{A}_0\cup \SN{A}_1$, such that for each $i < 2$ the family \[ \SSN{A}_i = \set{\SN{A}_i}{\SN{a}\in\SSN{A}} \] is treelike. By Lemma~\ref{can.has.trees}, there are uncountably many $\SN{A}\in\SSN{A}$ such that $\SN{A}_0\in\SSN{I}_\sigma^{\e/4}(\PC{C})$; and by another application of Lemma~\ref{can.has.trees}, there is then some $\SN{A}\in\SSN{A}$ such that both $\SN{A}_0$ and $\SN{A}_1$ are members of $\SSN{I}_\sigma^{\e/4}(\PC{C})$, and hence their union $\SN{A}$ is also a member of $\SSN{I}_\sigma^{\e/4}(\PC{C})$. By Proposition~\ref{sigma->one}, since $\SN{A}$ almost-contains infinitely many members of $\SSN{B}$, there must be some $\SN{B}\in\SSN{B}\cap\SSN{I}^\e(\PC{\Delta}^1_1)$. This contradicts our assumption. \end{proof} \begin{lemma} $\SSN{I}^0$ is a dense $P$-ideal. \end{lemma} \begin{proof} That $\SSN{I}^0$ is dense follows easily from Proposition~\ref{ccc/fin}. To prove it's a $P$-ideal, we will first show that given any infinite sequence $\SN{A}_k$ ($k\in\NN$) of sets in $\SSN{I}^\e$, where $\e > 0$, there is some $\SN{B}\in\SSN{I}^{3\e}$ such that $\SN{A}_k\subseteq^* \SN{B}$ for all $k$. So fix a sequence $\SN{A}_k$ ($k\in\NN$) of sets in $\SSN{I}^\e$, where $\e > 0$. We may assume that the $\SN{A}_k$'s are pairwise disjoint. Assume for sake of contradiction that there is no $\SN{B}\in\SSN{I}^{3\e}$ which almost-includes every $\SN{A}_k$, and for each $f : \NN\to\NN$ let \[ \SN{B}_f = \bigcup\set{\SN{A}_k\cap f(k)}{k\in\NN} \] Then for every $f\in\NN^\NN$ and $k\in\NN$, $\SN{B}_f\cap \SN{A}_k$ is finite, and if $f <^* g$ then $\SN{B}_f\subseteq^* \SN{B}_g$. We will prove that for every $f\in\NN^\NN$ there is some $g\in\NN^\NN$ such that $f <^* g$ and $\SN{B}_g\sm \SN{B}_f\not\in\SSN{I}^\e$. By a simple recursion we may then construct a $<^*$-increasing sequence $f_\gamma\in\NN^\NN$, for $\gamma < \omega_1$, with $\SN{B}_{f_{\gamma+1}}\sm \SN{B}_{f_\gamma} \not\in\SSN{I}^\e$ for each $\gamma$. Thus the sets $\SN{B}_{f_{\gamma+1}}\sm \SN{B}_{f_\gamma}$ form an uncountable almost-disjoint family which is disjoint from $\SSN{I}^\e$, contradicting Proposition~\ref{ccc/fin}. For simplicity we will assume that $f(k) = 0$ for all $k\in\NN$, and show that for some $g\in\NN^\NN$, $\SN{B}_g\not\in\SSN{I}^\e$. For sake of contradiction, suppose that this is not so. Define a coloring $[\NN^\NN]^2 = K_0\cup K_1$ by \[ \{g,h\}\in K_0 \iff \exists n\in \SN{B}_g\cap \SN{B}_h \quad \norm{\alpha_n^{\SN{B}_g} - \alpha_n^{\SN{B}_h}} > 2\e \] where for each $\SN{B}\in\SSN{I}^\e$, we have fixed a sequence of $*$-homomorphisms $\alpha_n^{\SN{B}} : \A{F}_n\to\A{B}_n$ (with $\alpha_n^{\SN{B}} = 0$ when $n\not\in\SN{B}$) according to the definition of $\SSN{I}^\e$. It follows that $K_0$ is open when $\NN^\NN$ is given the topology obtained by identifying $g$ with $(g,\alpha^{\SN{B}_g})$, a member of the Polish space \[ \NN^\NN\times \prod_n \Hom(\A{F}_n, \A{B}_n) \] \begin{claim} There is no uncountable, $K_0$-homogeneous subset of $\NN^\NN$. \end{claim} \begin{proof} Suppose $H$ is such and $|H| = \aleph_1$. Since $\bb > \aleph_1$, there is some $\bar{h}\in\NN^\NN$ such that for every $h\in H$, $h <^* \bar{h}$. By refining $H$ to an uncountable subset $\bar{H}$, we may assume that for some $\bar{k}\in\NN$ and some sequence of $*$-homomorphisms $\zeta_k : \A{F}_k\to \A{B}_k$ ($k < \bar{k}$), we have for all $h\in\bar{H}$ that \begin{enumerate} \item for all $k\ge \bar{k}$, $h(k) < \bar{h}(k)$, \item\label{close.tail} for all $k\ge \bar{k}$, $\norm{\alpha_k^{\SN{B}_h} - \alpha_k^{\SN{B}_{\bar{h}}}} \le \e$. \item\label{close.head} for all $k < \bar{k}$, $\norm{\alpha_k^{\SN{B}_h} - \zeta_k} \le \e$. \end{enumerate} Now, clearly, $\bar{H}$ is $K_1$-homogeneous, and this is a contradiction. \end{proof} By $\TA$, $\NN^\NN$ must be $\sigma$-$K_1$-homogeneous. Since $\NN^\NN$ is countably directed under $<^*$, there must be some $K_1$-homogeneous set $H$ which is $<^*$-cofinal in $\NN^\NN$. It follows that for some $\bar{k}\in\NN$, $H$ is $<^{\bar{k}}$-cofinal in $\NN^\NN$, and hence \[ \SN{C} = \bigcup_{h\in H} \SN{B}_h \supseteq \bigcup_{k=\bar{k}}^\infty \SN{A}_k \] For each $n\in \SN{C}$, choose a $*$-homomorphism $\alpha_n : \A{F}_n\to \A{B}_n$ from the set $\set{\alpha_n^{\SN{B}_h}}{h\in H\land n\in \SN{B}_h}$. By the $K_1$-homogeneity of $H$, then, for any $h\in H$ we have \[ \forall n\in \SN{B}_h\quad \norm{\alpha_n - \alpha_n^{\SN{B}_h}} \le 2\e \] \begin{claim} There is some $\ell$ such that the sequence $\alpha_n$ ($n\in \SN{C}$) forms a $3\e$-lift of $\vp$ on $\SN{A}_k$ for all $k\ge \ell$. \end{claim} \begin{proof} Suppose not; then there are infinitely many $k\ge\bar{k}$ such that for some $x_k\in\PA{F}_1\rs\SN{A}_k$, \[ \limsup_{n\in \SN{A}_k} \norm{\alpha_n(\proj{n}{x_k}) - \proj{n}{L(x_k)}} > 3\e \] For simplicity we assume that this occurs for all $k\ge \bar{k}$. Define $x\in\PA{F}\rs\SN{A}$ by $x\rs\SN{A}_k = x_k$ for each $k\ge\bar{k}$ and $x\rs\SN{A}_k = 0$ for $k < \bar{k}$. Then $\quo{L(x)\rs \SN{A}_k} = \quo{x_k}$ for each $k\ge\bar{k}$. Hence for each $k\ge\bar{k}$, we may choose some $n_k\in \SN{A}_k$ large enough that \[ \norm{\alpha_{n_k}(\proj{n_k}{x}) - \proj{n_k}{L(x)}} > 3\e \] Define $h\in\NN^\NN$ by $h(k) = n_k + 1$ and let $y = x\rs \SN{B}_h$. Then $\quo{L(y)} = \quo{L(x)\rs \SN{B}_h}$, and so for any $k\ge\bar{k}$ large enough, \[ \norm{\alpha_{n_k}(\proj{n_k}{y}) - \proj{n_k}{L(y)}} > 3\e \] But $\SN{B}_h\in \SSN{I}^\e$, and $\norm{\alpha_n - \alpha_n^{\SN{B}_h}} \le 2\e$ for all $n\in \SN{B}_h$. This is a contradiction. \end{proof} \begin{claim} The sequence $\alpha_n$ ($n\in \SN{C}$) forms a $3\e$-lift of $\vp$ on $\bigcup\set{\SN{A}_k}{k\ge\ell}$. \end{claim} \begin{proof} This follows from the fact that the ideal generated by $\set{\SN{B}_f}{f\in\NN^\NN}$ and $\set{\SN{A}_k}{k\ge\ell}$ is dense in $\pow(\bigcup\set{\SN{A}_k}{k\ge\ell})$. \end{proof} Since $\SN{A}_0,\ldots,\SN{A}_{\ell-1}\in\SSN{I}^\e$, and $\bigcup\set{\SN{A}_k}{k\ge\ell}\in\SSN{I}^{3\e}$, it follows that their union is in $\SSN{I}^{3\e}$. This clearly contradicts our assumption on the sequence $\SN{A}_k$. Now assume we've been given a sequence $\SN{A}_k$ ($k\in\NN$) from $\SSN{I}^0$. Then for each $\ell\in\NN$ we may choose some $\SN{B}_\ell\in\SSN{I}^{1/(\ell+1)}$ such that $\SN{A}_k\subseteq^* \SN{B}_\ell$ for all $k\in\NN$. Then we may find $\SN{C}$ such that $\SN{A}_k \subseteq^* \SN{C} \subseteq^* \SN{B}_\ell$ for all $k,\ell\in\NN$. It follows that $\SN{C}\in\SSN{I}^0$, hence $\SSN{I}^0$ is a $P$-ideal. \end{proof} Finally, we are ready to prove Theorem~\ref{fa->cfh.stronger}. \begin{proof} For each $\SN{A}\in\SSN{I}^0$, fix a sequence $\alpha_n^{\SN{A}} : \A{F}_n\to\A{B}_n$ of $*$-homomorphisms, witnessing that $\SN{A}\in\SSN{I}^0$. For each $\e > 0$ define a coloring $[\SSN{I}^0]^2 = K_0^\e\cup K_1^\e$ by \[ \{\SN{A},\SN{B}\}\in K_0^\e\iff \exists n\in\SN{A}\cap\SN{B}\quad \norm{\alpha_n^{\SN{A}} - \alpha_n^{\SN{B}}} > \e \] Then $K_0^\e$ is open when $\SN{A}\in\SSN{I}^0$ is identified with $(\SN{A},\alpha^{\SN{A}})$. \begin{claim} There is no uncountable, $K_0^\e$-homogeneous subset of $\SSN{I}^0$, for any $\e > 0$. \end{claim} \begin{proof} Suppose $H$ is $K_0^\e$-homogeneous, and has size $\aleph_1$. Since $\SSN{I}^0$ is a P-ideal, we may form a subset $\bar{H}$ of $\SSN{I}^0$ which, under the $\subseteq^*$ ordering, is an $\omega_1$-chain dominating $H$. By (a weakening of) $\TA$, we may assume (by going to a cofinal subset of $\bar{H}$) that $\bar{H}$ is either $K_0^{\e/2}$- or $K_1^{\e/2}$-homogeneous. Assume the latter holds; by refining $H$ to an uncountable subset, we may assume there is some $\bar{n}$ such that for all $\SN{A}\in H$, there is some $\SN{\bar{A}}\in\bar{H}$, for which \[ \SN{A}\sm\SN{\bar{A}}\subseteq \bar{n}\quad\mbox{and}\quad\forall n\in\SN{A}\sm\bar{n}\quad \norm{\alpha_n^{\SN{A}} - \alpha_n^{\SN{\bar{A}}}} \le \e/4 \] But then any pair $\{\SN{A},\SN{B}\}\in [H]^2$ with \[ \forall n < \bar{n}\quad \norm{\alpha_n^{\SN{A}} - \alpha_n^{\SN{B}}} \le \e \] is in $K_1^\e$, a contradiction; so $\bar{H}$ is $K_0^{\e/2}$-homogeneous. Replacing $H$ with $\bar{H}$ and $\e$ with $\e / 2$, we may assume without loss of generality that $H$ is an increasing $\omega_1$-chain with respect to $\subseteq^*$. Define a forcing notion $\PP$ as follows. The conditions of $\PP$ are taken to be triples $p = (\ell_p, x_p, H_p)$, where \begin{enumerate} \item $\ell_p\in\NN$, $x_p\in\PA{F}_1\rs \ell_p$, and $H_p\in [H]^{<\omega}$, \item\label{condition.spread} for all distinct $\SN{A},\SN{B}\in H_p$, there is some $n\in\SN{A}\cap\SN{B}\cap \ell_p$ with \[ \norm{\alpha_n^\SN{A}(\proj{n}{x_p}) - \alpha_n^\SN{B}(\proj{n}{x_p})} > \e/2 \] \end{enumerate} Put $p\le q$ if and only if $\ell_p\ge\ell_q$, $H_p\supseteq H_q$, and $\norm{x_p\rs \ell_q - x_q} < \e/4$. We will argue that $\PP$ is ccc. Suppose $\T{A}\subseteq\PP$ is uncountable. For each $p\in \T{A}$, let $\SN{A}_p$ be the minimal member of $H_p$ with respect to $\subseteq^*$, and choose $m_p\ge\ell_p$ large enough and $\delta_p > 0$ small enough that \begin{itemize} \item for all $\SN{A}\in H_p$, $\SN{A}_p\sm \SN{A} \subseteq m_p$ and \[ \forall n\ge m_p\quad \norm{\alpha_n^\SN{A} - \alpha_n^{\SN{A}_p}} \le \e / 4 \] \item for all distinct $\SN{A},\SN{B}\in H_p$, there is some $n\in\SN{A}\cap\SN{B}\cap \ell_p$ with \[ \norm{\alpha_n^\SN{A}(\proj{n}{x_p}) - \alpha_n^\SN{B}(\proj{n}{x_p})} > \e/2 + \delta_p \] \end{itemize} By thinning out $\T{A}$ we may assume that there are $k,\ell,m\in\NN$ and $\delta > 0$ such that for all $p\in \T{A}$, we have $|H_p| = k$, $\ell_p = \ell$, $m_p = m$ and $\delta_p \ge \delta$. Finally, by further thinning $\T{A}$ we may assume that for all distinct $p,q\in \T{A}$, \begin{itemize} \item $\norm{x_p - x_q} < \delta / 2$, \item for all $n < m$, $\norm{\alpha_n^{\SN{A}_p} - \alpha_n^{\SN{A}_q}} < \e/2$, and \item $H_p\cap H_q = \emptyset$. \end{itemize} Now let $p,q\in \T{A}$ be given. Since $\{\SN{A}_p,\SN{A}_q\}\in K_0^\e$, there is some $n\in\SN{A}_p\cap\SN{A}_q$ such that $\norm{\alpha_n^{\SN{A}_p} - \alpha_n^{\SN{A}_q}} > \e$. By the above it must be that $n\ge m$. Choose $x\in\PA{F}_1\rs (n + 1)$ with $x\rs \ell = x_p$ and \[ \norm{\alpha_n^{\SN{A}_p}(\proj{n}{x}) - \alpha_n^{\SN{A}_q}(\proj{n}{x})} > \e/2 \] and put $r = (n + 1, x, H_p\cup H_q)$. We claim that $r\in\PP$ and $r$ extends both $p$ and $q$. The only thing to check is that $r$ satisfies~\eqref{condition.spread}; the rest is clear. Let $\SN{A},\SN{B}\in H_p\cup H_q$ be given. In the case where both $\SN{A}$ and $\SN{B}$ are in $H_p$, \eqref{condition.spread} holds simply because $x\rs \ell = x_p$; in the case of $\SN{A},\SN{B}\in H_q$,~\eqref{condition.spread} holds since $\norm{x_p - x_q} < \delta / 2$. Finally, if $\SN{A}\in H_p$ and $\SN{B}\in H_q$, then since $n\ge m$, \[ \norm{\alpha_n^\SN{A}(\proj{n}{x}) - \alpha_n^\SN{B}(\proj{n}{x})} > \norm{\alpha_n^{\SN{A}_p}(\proj{n}{x}) - \alpha_n^{\SN{A}_q}(\proj{n}{x})} - (\e/4 + \e/4) > \e/2 \] and so~\eqref{condition.spread} is satisfied. By $\MA$, we may find an $x\in\PA{F}_1$ and an uncountable $\hat{H}\subseteq H$ such that for all distinct $\SN{A},\SN{B}\in\hat{H}$, \[ \exists n\in\SN{A}\cap\SN{B} \quad \norm{\alpha_n^\SN{A}(\proj{n}{x}) - \alpha_n^\SN{B}(\proj{n}{x})} > \e / 2 \] By our choice of $*$-homomorphisms $\alpha_n^\SN{A}$, we have for all $\SN{A}\in\hat{H}$ \[ \limsup_{n\in\SN{A}} \norm{\alpha_n^\SN{A}(\proj{n}{x}) - \proj{n}{L(x)}} = 0 \] The usual pigeonhole argument shows that this is a contradiction. \end{proof} We have shown that the first alternative of $\TA$ fails for each of the partitions $[\SSN{I}^0] = K_0^\e\cup K_1^\e$; hence $\SSN{I}^0$ is $\sigma$-$K_1^\e$-homogeneous for every $\e > 0$. Let $\e_k = 2^{-k}$ for each $k\in\NN$; then, since $\SSN{I}^0$ is a P-ideal, we may find a decreasing sequence of sets \[ \SSN{I}^0 \supseteq X_0 \supseteq X_1 \supseteq \cdots \] such that each $X_k$ is $K_1^{\e_k}$-homogeneous and cofinal in $\SSN{I}^0$ in the ordering $\subseteq^*$. By density of $\SSN{I}^0$, for each $k$ the set $\bigcup X_k$ must be cofinite; say $[m_k,\infty) \subseteq \bigcup X_k$, and $m_k < m_{k+1}$ for each $k$. Choose any sequence of $*$-homomorphisms $\alpha_n$, $n\ge m_0$, satisfying \[ \alpha_n \in \set{\alpha_n^\SN{A}}{m_k \le n < m_{k+1}\land n\in \SN{A}\in X_k} \] It follows that, for any $\SN{A}\in\SSN{I}^0$, \[ \limsup_{n\in\SN{A}} \norm{\alpha_n - \alpha_n^\SN{A}} = 0 \] Moreover, by density of $\SSN{I}^0$, this proves that the sequence $\alpha_n$ ($n\ge m_0$) makes up a lift of $\vp$ on $[m_0,\infty)$. \end{proof} \section{Coherent families of $*$-homomorphisms} \label{sec:coherent-families} \begin{thm} \label{cfh->borel} Assume $\TA$ and let $\A{A}_n$ ($n\in\NN$) be a sequence of UHF algebras. Let $\A{B}$ be a separable C*-algebra, and suppose \[ \vp : \prod\A{A}_n / \bigoplus \A{A}_n \to \A{M}(\A{B}) / \A{B} \] is determined by a coherent family of $*$-homomorphisms. Then $\Gr{\vp}$ is Borel. \end{thm} \begin{proof} Let $\A{A}_{n,k}$ ($k\in\NN$) be a suitable sequence of subalgebras of $\A{A}_n$, and suppose \[ \alpha^\xi_n : \A{A}_{n,\xi(n)}\to \A{B}\qquad (\xi\in\NN^\NN, n\in\NN) \] is a coherent family of $*$-homomorphisms which determines $\vp$. Define colorings $[\NN^\NN]^2 = K_0^\e \cup K_1^\e$, for each $\e > 0$, by placing $\{\xi,\eta\}\in K_0^\e$ if and only if \[ \exists n\in\NN\;\;\exists x\in\A{A}_{n,\xi(n)}\cap\A{A}_{n,\eta(n)}\;\;\norm{\alpha_n^\xi(x) - \alpha_n^\eta(x)} > \e\norm{x} \] Note that $\A{A}_{n,\xi(n)}\cap \A{A}_{n,\eta(n)} = \A{A}_{n,\min(\xi(n),\eta(n))}$. Given $\xi\in\NN^\NN$, let $\beta^\xi$ denote the sequence \[ \beta^\xi_{n,m} = \left\{\begin{array}{ll} \alpha_n^\xi\rs\A{A}_{n,m} & m\le\xi(n) \\ 0 & m > \xi(n) \end{array}\right. \] Each $K_0^\e$ is then open in the topology on $\NN^\NN$ obtained by identifying $\xi$ with \[ (\xi,\beta^\xi) \in \NN^\NN\times \prod_{n,m} \Hom(\A{A}_{n,m},\A{B}) \] where we use the point-norm topology on $\Hom(\A{F}_{n,m},\A{B})$. We claim, as usual, that there are no uncountable $K_0^\e$-homogeneous subsets of $\NN^\NN$, for any $\e > 0$. To see this, fix some $H\subseteq\NN^\NN$ of size $\aleph_1$. Since $\bb > \aleph_1$ we may find some $\bar{\xi}\in\NN^\NN$ such that $\xi <^* \bar{\xi}$ for all $\xi\in H$, and by refining $H$ to an uncountable subset we may assume that for some $\bar{n}$, we have for all $\xi\in H$ that \begin{itemize} \item for all $n\ge\bar{n}$, $\xi(n) < \bar{\xi}(n)$ and \item for all $n\ge\bar{n}$, $\norm{\alpha_n^{\bar{\xi}}\rs\A{A}_{n,\xi(n)} - \alpha_n^\xi} \le \e/2$. \end{itemize} By the uncountability of $H$ we may find distinct $\xi,\eta\in H$ with $\xi\rs\bar{n} = \eta\rs\bar{n}$ and for all $n < \bar{n}$, \[ \norm{\alpha_n^\xi - \alpha_n^\eta} \le \e \] hence $\{\xi,\eta\}\in K_1^\e$ and so $H$ cannot be $K_0^\e$-homogeneous. Let $\e_k$ ($k\in\NN$) be some sequence in $\RR^+$ converging to zero. By $\TA$ and the $\sigma$-directedness of $\NN^\NN$ under $<^*$, we may find sets \[ \NN^\NN \supseteq X_0\supseteq X_1 \supseteq \cdots \] where each $X_k$ is $K_1^{\e_k}$-homogeneous, and cofinal in $\NN^\NN$ with respect to $<^*$ (cf. the argument near the end of Theorem~\ref{fa->cfh.stronger}). Then we may construct an increasing sequence $n_k\in\NN$ ($k\in\NN$) such that each $X_k$ is $<^{n_k}$-cofinal in $\NN^\NN$. For each $n\in [n_k,n_{k+1})$ and $i\in\NN$, choose a function $\xi_{n,i}\in X_k$ such that $\xi_{n,i}(n) \ge i$. For each $n\in\NN$ let $\theta_{n,i} : \A{A}_n\to\A{A}_{n,i}$ ($i\in\NN$) be a commuting system of conditional expectation maps, and define $\theta^\xi : \prod \A{A}_n \to \prod \A{A}_{n,\xi(n)}$ by \[ \proj{n}{\theta^\xi(a)} = \theta_{n,\xi(n)}(\proj{n}{a}) \] We also let $p_n = \alpha^{\bar{0}}_n(1_{\A{A}_n})$ ($n\in\NN$) where $\bar{0}$ is the function with constant value $0$. Note that, by coherence, if $\eta\in\NN^\NN$ then \[ p_n - \alpha^\eta_n(1_{\A{A}_n}) \to 0 \] Moreover, if $p = \sum p_n$ then $\pi(p) = \pi(\alpha^\eta(1))$ for all $\eta$. \begin{claim} Let $(a,b)\in\prod(\A{A}_n)_1\times \A{M}(\A{B})_1$ be given. Then $(a,b)\in\Gr{\vp}$ if and only if $\quo{b} = \quo{\sum p_n b p_n}$ and \[ \lim_n \limsup_{i\to\infty} \norm{p_n b p_n - \alpha_n^{\xi_{n,i}}(\theta_{n,i}(\proj{n}{a}))} = 0 \] \end{claim} \begin{proof} Suppose that $(a,b)\in\Gr{\vp}$. Clearly $\quo{b} = \quo{pbp}$. Find some $\eta\in \NN^\NN$ such that $\quo{a} = \quo{\theta^\eta(a)}$. Then $\quo{b} = \quo{\alpha^\eta(\theta^\eta(a))}$, so \[ \lim_{n\to\infty} \norm{\left(\sum_{m\ge n} p_m\right) \left(pbp - \sum_m \alpha^\eta_m(\theta_{m,\eta(m)}(\proj{m}{a}))\right)\left(\sum_{m \ge n} p_m\right)} = 0 \] Since $p_m \alpha^\eta_n(x) p_k = 0$ whenever $m \neq k$, it follows that \[ \lim_{n\to\infty} \sup_{m\neq k,\, m,k\ge n} \norm{ p_m b p_k} = 0 \] and this implies $\pi(b) = \pi(\sum p_n b p_n)$. Now fix $k\in\NN$; since $X_k$ is $<^*$-cofinal in $\NN^\NN$ we may choose $\eta$ as above with $\eta\in X_k$. Then, for large enough $m\ge n_k$, we have for all $n\ge m$, \begin{align} \label{close.dom} \norm{\proj{n}{a} - \theta_{n,\eta(n)}(\proj{n}{a})} & \le \e_k \\ \label{close.ran} \norm{p_n b p_n - \alpha_n^\eta(\theta_{n,\eta(n)}(\proj{n}{a}))} & \le \e_k \end{align} Now fix $n\ge m$ and $i\ge\eta(n)$. Then, \begin{equation} \label{close.xieta} \norm{\alpha_n^{\xi_{n,i}}(\theta_{n,\eta(n)}(\proj{n}{a})) - \alpha_n^\eta(\theta_{n,\eta(n)}(\proj{n}{a}))} \le \e_k \end{equation} since $\xi_{n,i}$ and $\eta$ are both members of $X_k$. Finally, note that by~\eqref{close.dom}, \begin{equation} \label{close.ieta} \norm{\theta_{n,i}(\proj{n}{a}) - \theta_{n,\eta(n)}(\proj{n}{a})} \le \e_k \end{equation} Together the inequalities~\eqref{close.ran},~\eqref{close.xieta}, and~\eqref{close.ieta} imply \[ \norm{p_n b p_n - \alpha_n^{\xi_{n,i}}(\theta_{n,i}(\proj{n}{a}))} \le 3\e_k \] for any $n\ge m$ and $i\ge\eta(n)$, as required. Now assume that $(*)$ holds. Fix $k$, and choose $\eta\in X_k$ such that $\quo{a} = \quo{\theta^\eta(a)}$. By $(*)$ and the $K_1^{\e_k}$-homogeneity of $X_k$, for all large enough $n$ and $i$ we have \begin{align} \norm{p_n b p_n - \alpha_n^{\xi_{n,i}}(\theta_{n,i}(\proj{n}{a}))} & \le \e_k \\ \norm{\theta_{n,i}(\proj{n}{a}) - \theta_{n,\eta(n)}(\proj{n}{a})} & \le \e_k \\ \norm{\alpha_n^{\xi_{n,i}}(\theta_{n,\eta(n)}(\proj{n}{a})) - \alpha_n^\eta(\theta_{n,\eta(n)}(\proj{n}{a}))} & \le \e_k \end{align} Then, \[ \limsup_{n\to\infty} \norm{p_n b p_n - \alpha_n^\eta(\theta_{n,\eta(n)}(\proj{n}{a}))} \le 3\e_k \] Since $\alpha^\eta(\theta^\eta(a))$ is a representative of $\vp(\quo{a})$, it follows that for every $k$, \[ \norm{\quo{\sum p_n b p_n} - \vp(\quo{a})} \le 3\e_k \] and since $\quo{\sum p_n b p_n} = \quo{b}$, we have $(a,b)\in\Gr{\vp}$. \end{proof} The claim provides a Borel definition of $\Gr{\vp}$, hence the proof is complete. \end{proof} \begin{defn} Let $\A{A}$ be a C*-algebra, and suppose $\A{A} = \lim\A{A}_n$. We say that $\A{A}$ has the \emph{$(\delta,\e)$-intertwining property} with respect to the sequence $\A{A}_n$ ($n\in\NN$) if, for every sequence of $*$-homomorphisms $\alpha_n : \A{A}_{2n}\to \A{A}_{2n+1}$, $\beta_n : \A{A}_{2n+1}\to\A{A}_{2n+2}$ ($n\in\NN$) satisfying, for any $n\le m$, \[ \begin{tikzpicture} \matrix (m) [cdg.smallmatrix] { \A{A}_{2m+1} & & \A{A}_{2m+2} \\ & \delta & \\ \A{A}_{2n} & & \A{A}_{2n+1} \\ }; \path [cdg.path] (m-1-3) edge node[above]{$\beta_m$} (m-1-1) (m-3-1) edge node[below]{$\alpha_n$} (m-3-3) (m-3-1) edge (m-1-1) (m-3-3) edge (m-1-3); \end{tikzpicture} \qquad \begin{tikzpicture} \matrix (m) [cdg.smallmatrix] { \A{A}_{2m+2} & & \A{A}_{2m+3} \\ & \delta & \\ \A{A}_{2n+2} & & \A{A}_{2n+1} \\ }; \path [cdg.path] (m-1-1) edge node[above]{$\alpha_m$} (m-1-3) (m-3-3) edge node[below]{$\beta_n$} (m-3-1) (m-3-1) edge (m-1-1) (m-3-3) edge (m-1-3); \end{tikzpicture} \] there are $*$-homomorphisms $\alpha,\beta: \A{A}\to\A{A}$ such that for all $n\in\NN$, \[ \begin{tikzpicture} \matrix (m) [cdg.smallmatrix] { \A{A} & & \A{A} \\ & \e & \\ \A{A}_{2n} & & \A{A}_{2n+1} \\ }; \path [cdg.path] (m-1-3) edge node[above]{$\beta$} (m-1-1) (m-3-1) edge node[below]{$\alpha_n$} (m-3-3) (m-3-1) edge (m-1-1) (m-3-3) edge (m-1-3); \end{tikzpicture} \qquad \begin{tikzpicture} \matrix (m) [cdg.smallmatrix] { \A{A} & & \A{A} \\ & \e & \\ \A{A}_{2n+2}& & \A{A}_{2n+1} \\ }; \path [cdg.path] (m-1-1) edge node[above]{$\alpha$} (m-1-3) (m-3-3) edge node[below]{$\beta_n$} (m-3-1) (m-3-1) edge (m-1-1) (m-3-3) edge (m-1-3); \end{tikzpicture} \] \end{defn} \begin{rmk} The diagrams above imply that for any $n < m$ we have \[ \norm{\alpha_m\rs\A{A}_{2n} - \alpha_n},\norm{\beta_m\rs\A{A}_{2m+1} - \beta_n} \le 2\delta \] Moreover if $\alpha,\beta$ are as in the conclusion, then \[ \norm{\alpha\rs\A{A}_{2n} - \alpha_n}, \norm{\beta\rs\A{A}_{2n+1} - \beta} \le 2\e \] and \[ \norm{\alpha\circ\beta - \id}, \norm{\beta\circ\alpha - \id} \le 2\e \] \end{rmk} \begin{prop} Let $\A{A}_n$ ($n\in\NN$) be UHF algebras. Then the following are equivalent. \begin{enumerate} \item\label{tfae.borel} Every automorphism of $\prod\A{A}_n / \bigoplus \A{A}_n$ with Borel graph has a strict, algebraic lift. \item\label{tfae.intertwining} For every $\e > 0$ there is some $\delta > 0$ such that for all large enough $n$, $\A{A}_n$ has the $(\delta,\e)$-intertwining property with respect to any suitable sequence $\A{A}_{n,k}$ ($k\in\NN$). \end{enumerate} \end{prop} \begin{proof} We first prove that $\lnot\eqref{tfae.intertwining}$ implies $\lnot\eqref{tfae.borel}$. Fix a sequence $\delta_n$ ($n\in\NN$) of positive reals tending to zero. Assuming $\lnot\eqref{tfae.intertwining}$, we may construct, by a straightforward recursion, an infinite set $\SN{I}\subseteq\NN$ and for every $n\in\SN{I}$, \begin{enumerate} \item a suitable sequence $\A{A}_{n,k}$ ($k\in\NN$) of subalgebras of $\A{A}_n$, and \item $*$-homomorphisms $\alpha_{n,k} : \A{A}_{n,2k}\to \A{A}_{n,2k+1}$ and $\beta_{n,k} : \A{A}_{n,2k+1}\to \A{A}_{n,2k+2}$ \end{enumerate} such that for all $k \le \ell$, we have \[ \begin{tikzpicture} \matrix (m) [cdg.smallmatrix] { \A{A}_{n,2\ell+2} & & \A{A}_{n,2\ell+1} \\ & \delta_n & \\ \A{A}_{n,2k} & & \A{A}_{n,2k+1} \\ }; \path [cdg.path] (m-1-3) edge node[above]{$\beta_\ell$} (m-1-1) (m-3-1) edge node[below]{$\alpha_k$} (m-3-3) (m-3-1) edge (m-1-1) (m-3-3) edge (m-1-3); \end{tikzpicture} \qquad \begin{tikzpicture} \matrix (m) [cdg.smallmatrix] { \A{A}_{n,2\ell+2} & & \A{A}_{n,2\ell+3} \\ & \delta_n & \\ \A{A}_{n,2k+2} & & \A{A}_{n,2k+1} \\ }; \path [cdg.path] (m-1-1) edge node[above]{$\alpha_\ell$} (m-1-3) (m-3-3) edge node[below]{$\beta_k$} (m-3-1) (m-3-1) edge (m-1-1) (m-3-3) edge (m-1-3); \end{tikzpicture} \] but for any pair of $*$-homomorphisms $\alpha,\beta : \A{A}_n\to \A{A}_n$ there is some $k\in\NN$ such that either $\norm{\beta\circ\alpha_{n,k} - \id} > \e$ or $\norm{\alpha\circ \beta_{n,k} - \id} > \e$, where $\e > 0$ is fixed. When $n\not\in\SN{I}$ we take any suitable sequence $\A{A}_{n,k}$ and let $\alpha_{n,k}$ and $\beta_{n,k}$ be the inclusion maps $\A{A}_{n,2k}\to\A{A}_{n,2k+1}$ and $\A{A}_{n,2k+1}\to\A{A}_{n,2k+2}$ respectively. Then the families \[ \alpha^\xi_n = \alpha_{n,\xi(n)}, \quad \beta^\xi_n = \beta_{n,\xi(n)} \qquad (\xi\in\NN^\NN, n\in\NN) \] are coherent, and hence determine endomorophisms $\vp$ and $\psi$ of $\prod \A{A}_n / \bigoplus \A{A}_n$ respectively. It is easy to see that $\vp$ and $\psi$ are inverses, so $\vp$ is an automorphism. Note that, for each $a,b\in \prod (\A{A}_n)_1$, $(a,b)\in\Gamma_\vp$ if and only if \begin{align*} \exists \xi\in\NN^\NN\;\; \exists x\in(\PA{A}_{2\xi})_1\quad \lim \norm{\proj{n}{a} - \proj{n}{x}} & = 0 \\ \mbox{ and } \lim \norm{\alpha_{n,\xi(n)}(\proj{n}{x}) - \proj{n}{b}} & = 0 \end{align*} if and only if \begin{align*} \forall \xi\in\NN^\NN\;\; \forall x\in(\PA{A}_{2\xi})_1\quad \mbox{if }\lim \norm{\proj{n}{a} - \proj{n}{x}} & = 0 \\ \mbox{ then } \lim \norm{\alpha_{n,\xi(n)}(\proj{n}{x}) - \proj{n}{b}} & = 0 \end{align*} so $\Gamma_\vp$ is Borel. Now suppose $\vp$ and $\psi$ have strict algebraic lifts $\alpha$ and $\beta$; let $\alpha_n$ and $\beta_n$ ($n\in\NN$) be the coordinate $*$-homomorphisms for $\alpha$ and $\beta$, respectively. For each $n\in I$ we may choose some $\xi(n)$ such that $\norm{\alpha_n\circ\beta_{n,\xi(n)} - \id} > \e$ or $\norm{\beta_n\circ\alpha_{n,\xi(n)} - \id} > \e$; it follows that there is some $x\in\prod\A{A}_{n,\xi(n)}$ such that $\psi(\vp(\quo{x})) \neq \quo{x}$ or $\vp(\psi(\quo{x})) \neq \quo{x}$, a contradiction. Now we show that~\eqref{tfae.intertwining} implies~\eqref{tfae.borel}. Assume~\eqref{tfae.intertwining} and fix an automorphism $\vp$ of $\prod \A{A}_n / \bigoplus \A{A}_n$ with Borel graph. Notice that the statement ``$\vp$ has a strict algebraic lift'' is $\PC{\Sigma}^1_2$, and condition~\eqref{tfae.intertwining} is $\PC{\Pi}^1_2$; hence both are absolute between the ground model and any forcing extension, and so we may assume $\TA$ without any loss of generality. Theorem~\ref{definable->lifts} implies that both $\vp$ and $\vp^{-1}$ are determined by coherent families of $*$-homomorphisms, say \[ \alpha^\xi_n : \A{A}_{n,\xi(n)} \to \bigoplus \A{A}_n \quad \beta^\xi_n : \A{A}_{n,\xi(n)} \to \bigoplus\A{A}_n \qquad (\xi\in\NN^\NN, n\in\NN) \] respectively. Let $[\NN^\NN]^2 = K_0^\e\cup K_1^\e$ and $[\NN^\NN]^2 = L_0^\e \cup L_1^\e$ be the colorings defined in the proof of Theorem~\ref{cfh->borel}, given the coherent families $\alpha^\xi_n$ and $\beta^\xi_n$ respectively. Let $M_0^\e = K_0^\e\cup L_0^\e$; then $M_0^\e$ is open in an appropriate separable metrizable topology, and $\NN^\NN$ has no uncountable, $M_0^\e$-homogeneous subsets, for any $\e > 0$. Arguing as in Theorem~\ref{cfh->borel}, we may find $*$-homomorphisms \[ \alpha_{n,k} : \A{A}_{n,k} \to \bigoplus \A{A}_n \qquad \beta_{n,k} : \A{A}_{n,k} \to \bigoplus \A{A}_n \] such that for some sequence $\delta_n\to 0$ and all $\xi$, we have \[ \norm{\alpha^\xi_n - \alpha_{n,\xi(n)}}, \norm{\beta^\xi_n - \beta_{n,\xi(n)}} \le \delta_n \] Applying the main result of~\cite{Velickovic.OCAA} to the central automorphism induced by $\vp$, we may find a function $e : \NN\to\NN$ such that $\vp(\quo{\zeta}) = \quo{\zeta\circ e}$ for all central $\zeta$. Then for all but finitely many $n$, and all $k$, $\alpha_{n,k}$ maps into $\A{A}_{e(n)}$, and $\beta_{n,k}$ maps into $\A{A}_{e^{-1}(n)}$. Moreover, arguing as in the proof of Corollary~\ref{main.cor}, for all but finitely many $n$ we have that $\A{A}_n\simeq \A{A}_{e(n)}$. By composing with this isomorphism, or its inverse, we may assume that each $\alpha_{n,k}$ and $\beta_{n,k}$ maps into $\A{A}_n$. Finally, by perturbing each $\alpha_{n,k}$ and $\beta_{n,k}$ by an amount tending to zero as $n\to\infty$, we may assume that $\alpha_{n,k}$ maps into $\A{A}_{n,k'}$, and $\beta_{n,k}$ into $\A{A}_{n,k'}$, for some large enough $k'$ depending on $k$. We are now in a situation where we can apply condition~\eqref{tfae.intertwining}; choose $\alpha_n : \A{A}_n\to\A{A}_n$ and $\beta_n : \A{A}_n\to\A{A}_n$ such that \[ \norm{\alpha_n\circ \beta_{n,k} - \id},\norm{\beta_n\circ \alpha_{n,k} - \id} \le \e_n \] It follows that the sequence $\alpha_n$ ($n\in\NN$) determines a strict algebraic lift of $\vp$. \end{proof}
1,108,101,565,891
arxiv
\section{Introduction} \label{intro} Let $(\Omega,\mathscr{A},\mu)$ be a measure space, $P$ be a finite index set and $\{A_p\}_{p\in P}\subseteq \mathscr{A}$ be a family of measurable sets. The formula \begin{equation} \mu\left(\bigcap_{p\in P}{\overline{A}_p}\right)=\sum_{I\subseteq P}(-1)^{|I|}\mu\left(\bigcap_{i\in I}A_i\right) \end{equation} is known as the principle of inclusion-exclusion, where $\overline{A}_p$ denotes the complement of $A_p$. The principle of inclusion-exclusion is a classic counting technique in combinatorics and has been extensively studied \cite{Dohmen02,Dohmen01,Narushima01,Riordan01,Rota01,Whitney01}. Since the sum on the right side of Eq.(1) ranges over a large number of terms, it is natural to ask whether fewer terms would give the same result, that is, is it possible to reduce the number of terms by predicted cancellation? Lots of the answers to this question have been given by several authors. A well-known example is the one given by Whitney \cite{Whitney01} in 1932 for chromatic polynomial of a graph, which states that the calculation of a chromatic polynomial can be restricted to the collection of those sets of edges which do not include any broken circuit as a subset. Various cancellations for the inclusion-exclusion principle were given from the perspective of both combinatorics and graph theory in the literatures. In \cite{Narushima01}, Narushima presented a cancellation for the inclusion-exclusion principle, depending on a prescribed ordering on the index set $P$. This result was later improved by Dohmen \cite{Dohmen02}. Using the same technique, Dohmen \cite{Dohmen01} also established an abstraction of Whitney's broken circuit theorem, which not only applies to the chromatic polynomial, but also to other graph polynomials, see \cite{Dohmen05,Dohmen08,Dohmen01,Liao01,Trinks} for details. So far, the known cancellation methods for inclusion-exclusion principle strongly depend on the prescribed (linear or partial) ordering on the index set $P$. In this article we establish a new cancellation method, which does not require any ordering on $P$. Our method extends all the `ordering-based' methods given in the previous literatures and in general may reduce more terms. As examples, we use our `ordering-free' method to improve the relevant results on the chromatic polynomial of hypergraphs, the independence polynomial and domination polynomial of graphs. \section{Inclusion-exclusion by predicted cancellations} For a subset $B$ of a poset (partially ordered set) $P$, let $B'$ denote the set of upper bounds of $B$ which are not in $B$, that is, \begin{equation*} B'=\{p\in P:p>b\ \ {\rm for\ any}\ \ b\in B\}. \end{equation*} In \cite{Narushima01}, Narushima presented a cancellation for the inclusion-exclusion principle on semilattices. This result was later extended to many forms. The following one was given by Dohmen \cite{Dohmen02}: \begin{theorem}\label{thm1} \cite{Dohmen02} Let $(\Omega,\mathscr{A},\mu)$ be a measure space, $P$ be a poset and $\{A_p\}_{p\in P}\subseteq \mathscr{A}$ be a family of measurable sets. If $\mathfrak{X}$ is a class of subsets of $P$ such that \begin{equation} \bigcap_{p\in B}A_p\subseteq \bigcup_{p\in B'}A_{p} \end{equation} for each $B\in\mathfrak{X}$. Then \begin{equation}\label{main} \mu\left(\bigcap_{p\in P}{\overline{A}_p}\right)=\sum_{I\in 2^{P}\setminus\mathfrak{I}}(-1)^{|I|}\mu\left(\bigcap_{i\in I}A_i\right), \end{equation} where $2^P$ is the power set of $P$ and $\mathfrak{I}=\{I\subseteq P:I\supseteq B\ \ {\rm for\ some}\ \ B\in\mathfrak{X}\}.$ \end{theorem} Let $\{B_1,B^*_1\},\{B_2,B^*_2\},\cdots,\{B_k,B^*_k\}$ be pairs of subsets of $P$ with $B_i\cap B^*_i=\emptyset$ for every $i\in\{1,2,\cdots,k\}$. Denote $$\mathscr{B}_i=\{I\subseteq P: I\supseteq B_i, I\nsupseteq B_j\setminus B^*_i\ {\rm for}\ j<i\}$$ and \begin{equation} \mathscr{B}=\mathscr{B}_1\cup\mathscr{B}_2\cup\cdots\cup\mathscr{B}_k. \end{equation} We note that $\mathscr{B}_i$ is empty when $B_j\setminus B^*_i\subseteq B_i$ for some $j<i$ since there is no $I$ satisfies the requirement. We now give our main result which does not require any ordering on $P$. \begin{theorem}\label{thm2} Let $(\Omega,\mathscr{A},\mu)$ be a measure space, $P$ be a set and $\{A_p\}_{p\in P}\subseteq \mathscr{A}$ be a family of measurable sets. Let $\{B_1,B^*_1\},\{B_2,B^*_2\},\cdots,\{B_k,B^*_k\}$ be pairs of subsets of $P$. If $B_i\cap B^*_i=\emptyset$ and \begin{equation} \bigcap_{p\in B_i}A_p\subseteq \bigcup_{p\in B^*_i}A_p \end{equation} for every $i\in\{1,2,\cdots,k\}$, then \begin{equation}\label{main} \mu\left(\bigcap_{p\in P}{\overline{A}_p}\right)=\sum_{I\in 2^P\setminus \mathscr{B}}(-1)^{|I|}\mu\left(\bigcap_{i\in I}A_i\right). \end{equation} \end{theorem} \begin{proof} Let $I\in\mathscr{B}$. Then $I\in\mathscr{B}_i$ for some $i\in\{1,2,\cdots,k\}$. We claim that such $\mathscr{B}_i$ is unique. In fact, suppose to the contrary that $I\in\mathscr{B}_j$ and, with no loss of generality, that $j<i$. Then by the definition of $\mathscr{B}_i$, $I\nsupseteq B_j$. This contradicts that $I\in \mathscr{B}_j$. As a result, $$\mathscr{B}_1,\mathscr{B}_2,\cdots,\mathscr{B}_k$$ are pairwise disjoint and therefore, (4) is a partition of $\mathscr{B}$. For $I\in \mathscr{B}_i$, let $I^*=I\setminus B^*_i$. Since $I\supseteq B_i$ and $B_i\cap B^*_i=\emptyset$, we have $I^*\supseteq B_i$. We claim that $I^*\cup D^*_i\in\mathscr{B}_i$ for any $D^*_i\subseteq B^*_i$. Suppose to the contrary that $I^*\cup D^*_i\notin \mathscr{B}_i$ for some $D^*_i\subseteq B^*_i$. Since $I^*\cup D^*_i\supseteq I^*\supseteq B_i$, so by the definition of $\mathscr{B}_i$, $I^*\cup D^*_i\supseteq B_j\setminus B^*_i$ for some $j<i$. Thus, $I^*\supseteq B_j\setminus B^*_i$ since $D^*_i\subseteq B^*_i$. Therefore, $$I\supseteq I^*\supseteq B_j\setminus B^*_i.$$ This is a contradiction because $I\in \mathscr{B}_i$, i.e., $I\nsupseteq B_j\setminus B^*_i$. Our claim follows. For $I\in \mathscr{B}_i$, let $$\langle I\rangle=\{I^*\cup D^{*}_i:D^*_i\subseteq B^*_i\}.$$ Then $$\sum_{J\in \langle I\rangle}(-1)^{|J|}\mu\left(\bigcap_{p\in J}A_p\right)=\sum_{D^*_i\subseteq B^*_i}(-1)^{|I^*\cup D^*_i|}\mu\left(\bigcap_{p\in I^*}A_p\cap\bigcap_{p\in D^*_i}A_p\right)$$ $$=(-1)^{|I^*|}\sum_{D^*_i\subseteq B^*_i}(-1)^{|D^*_i|}\mu\left(\bigcap_{p\in I^*}A_p\cap\bigcap_{p\in D^*_i}A_p\right)$$ $$=(-1)^{|I^*|}\mu\left(\bigcap_{p\in I^*}A_p\cap\bigcap_{p\in B^*_i}\overline{A}_p\right),$$ where the last equality holds by the principle of inclusion-exclusion. Notice that $\bigcap_{p\in B^*_i}\overline{A}_p$ is the complement of $\bigcup_{p\in B^*_i}A_p$. So by (5), $$\bigcap_{p\in I^*}A_p\cap\bigcap_{p\in B^*_i}\overline{A}_p=\emptyset$$ since $I^*\supseteq B_i$. Therefore, \begin{equation} \sum_{J\in \langle I\rangle}(-1)^{|J|}\mu\left(\bigcap_{p\in J}A_p\right)=0. \end{equation} Finally, for any $I,J\in \mathscr{B}_i$, by the definition of $I^*$ we can see that either $\langle J\rangle\cap \langle I\rangle=\emptyset$ or $\langle J\rangle=\langle I\rangle$. In other words, $\bigcup_{I\in\mathscr{B}_i}\langle I\rangle$ is a partition of $\mathscr{B}_i$, written by $$\mathscr{B}_i=\langle I_1\rangle\cup\langle I_2\rangle\cup\cdots\cup\langle I_t\rangle.$$ Thus, $$\sum_{I\in \mathscr{B}}(-1)^{|I|}\mu\left(\bigcap_{i\in I}A_i\right)=\sum_{i=1}^k\sum_{I\in \mathscr{B}_i}(-1)^{|I|}\mu\left(\bigcap_{i\in I}A_i\right)$$ $$=\sum_{i=1}^k\sum_{j=1}^t\sum_{I\in\langle I_j\rangle}(-1)^{|I|}\mu\left(\bigcap_{i\in I}A_i\right)=0.$$ So (6) follows directly, which completes our proof. \end{proof} \noindent{\bf Remark}. Theorem \ref{thm2} is an extension of Theorem \ref{thm1} and may reduce more terms: Firstly, let $\mathfrak{X}$ be defined as in Theorem \ref{thm1}. Set $\{B_1,B_2,\cdots,B_k\}=\mathfrak{X}$ and, for $i\in\{1,2,\cdots,k\}$, set $B^*_i=B'_i$ and let $b_i=\min B'_i$ (the minimum element in $B'_i$). Without loss of generality, we may assume that $b_1\leq b_2\leq \cdots \leq b_k$. If $I\in \mathfrak{I}$, say $I$ contains exactly $B_{i_1},B_{i_2},\cdots,B_{i_p}$ with $p>0$ and $i_1<i_2<\cdots<i_p$, then we claim that $I\in\mathscr{B}_{i_1}$ and, therefore, $I\in\mathscr{B}$. Suppose to the contrary that $I\notin\mathscr{B}_{i_1}$. Then there is $j<i_1$ such that $I\supseteq B_j\setminus B'_{i_1}$. On the other hand, by the minimality of $i_1$, we have $I\nsupseteq B_j$ since $j<i_1$. This means that there is $b\in B'_{i_1}$ such that $b\in B_j$. Therefore, $b<b_j$ since $b_j$ is an upper bound of $B_j$. This is a contradiction since $b_j\leq b_{i_1}\leq b$. Our claim follows. Conversely, if $I\in\mathscr{B}$, say $I\in\mathscr{B}_{i}$, then we have $I\supseteq B_i$ and, therefore, $I\in \mathfrak{I}$. As a result, we have $\mathfrak{I}=\mathscr{B}$. Thus, (\ref{main}) implies (3). Secondly, if $\{B,B^*\}$ is a pair such that $B$ differs from $B_1,B_2,\cdots,B_k$; $\{B,B^*\}$ satisfies (5); $\{B,B'\}$ does not satisfy (2); $B_i\setminus B^*\nsubseteq B$ for any $i=1,2,\cdots,k$. Then $\mathscr{B}$ can contain $B$ as an element while $\mathfrak{X}$ and therefore $\mathfrak{I}$ cannot contain $B$ as an element. This means that $\mathscr{B}\supsetneqq \mathfrak{I}$, that is, (6) reduces more terms than (3) does. \hfill$\square$ \section{Examples in graph polynomials} As examples, in this section we apply Theorem \ref{thm2} to chromatic polynomial of hypergraph, independence and domination polynomial of graph. We will see that the ordering-free method reduces more terms than the ordering-based method. Let $P(G,x)$ be a graph polynomial of a graph $G$ represented in the form of inclusion-exclusion principle, i.e., \begin{equation*} P(G,x)=\sum_{F\subseteq E(G)}(-1)^{|F|}p(F,x), \end{equation*} where $E(G)$ is the edge set of $G$ and $p(F,x)$ is a polynomial in $x$ associated with $F\subseteq E(G)$. We specialize the index set $P$ to be $E(G)$ and, for any $F\subseteq E(G)$, set \begin{equation} \mu\left(\bigcap_{e\in F}{A_e}\right)=p(F,x). \end{equation} For a pair $B,B^*\subseteq E(G)$ with $B\cap B^*=\emptyset$, if $B^*$ is a single-edge set, say $B^*=\{b\}$, then the condition \begin{equation*} \bigcap_{e\in B}{A_e}\subseteq \bigcup_{e\in B^*}{A_e}, \end{equation*} i.e., $\bigcap_{e\in B}{A_e}\subseteq A_b$, is equivalent to \begin{equation*} \bigcap_{e\in B}{A_e}=\bigcap_{e\in B\cup\{b\}}{A_e}. \end{equation*} Combining with (8), we have \begin{equation} p(B,x)=p(B\cup\{b\},x). \end{equation} Thus, a pair $\{B,\{b\}\}$ (viewed as $\{B_i,B_i^*\}$) satisfies the requirement of Theorem \ref{thm2} provided it satisfies (9). We refer to such pair $\{B,b\}$ as a {\it broken pair} of $P(G,x)$ and $B$ a {\it broken set} if $B$ is minimal (i.e., $B$ has no proper subset satisfying (9)). Further, given a linear ordering `<' on $E(G)$, we call $B$ a {\it broken pair with respect to} `<' if $\{b\}=B'$. By Theorem \ref{thm2} we have the following corollary immediately. \begin{corollary}\label{cor} Let $\{B_1,B^*_1\},\{B_2,B^*_2\},\cdots,\{B_k,B^*_k\}$ be broken pairs of $P(G,x)$. Then \begin{equation*} P(G,x)=\sum_{F\in 2^{E(G)}\setminus \mathscr{B}}(-1)^{|F|}p(F,x). \end{equation*} \end{corollary} \noindent{\bf Chromatic polynomial of hypergraph}. The chromatic polynomial $\chi(H,x)$ of a simple hypergraph $H$ counts the number of the vertex colorings such that each (hyper) edge of cardinality at least two has two vertices of distinct colors \cite{Berge,Dohmen01}. The following inclusion-exclusion expression was given in \cite{Dohmen01,Trinks}: $$\chi(H,x)=\sum_{F\subseteq E(H)}(-1)^{|F|}x^{c(F)},$$ where $c(F)$ is the number of the components of the spanning subgraph of $H$ with edge set $F$. Given a linear order `<' on the edge set $E(H)$, Dohmen \cite{Dohmen01} generalized the Whitney's broken circuit theorem to hypergraph by extending the broken circuit defined on a cycle (see \cite{Berge} for the definition of a cycle), with a particular constraints that each edge of the cycle is included by the union of the other edges of that cycle. A set $F\subseteq E(H)$ is called a {\it $\delta$-cycle} if $F$ is minimal such that $c(F\setminus\{f\})=c(F)$ for each $f\in F$. We note that every cycle with the above particular constraints is or contains a $\delta$-cycle while a $\delta$-cycle is not necessarily a cycle with this constraints. A set $B$ is called a {\it broken cycle} if $B$ is obtained from a $\delta$-cycle by deleting its maximum edge. In \cite{Trinks}, Trinks generalized the Dohmen's result by extending the broken circuit to broken cycle. For $B\subseteq E(H)$ and $b\in E(H)\setminus B$, by (9) it can be seen that $B$ is a broken set of $\chi(H,x)$ provided $B$ is minimal such that \begin{equation} c(B)=c(B\cup\{b\}). \end{equation} We can see that the notion `broken set' for hypergraph is an extension of `broken cycle'. Moreover, in condition (10) there is no need to require $b$ to be the maximum edge of $B\cup\{b\}$ for a broken set. Let's consider the hypergraph $H=(V,E)$ with vertex set $V=\{1,2,3,4,5,6\}$ and edge set $E=\{\{1,2,3\},\{3,4,5\},\{2,3,4\},\{1,2,6\}\}$. We note that $H$ contains neither broken circuit (with the particular constraints) nor broken cycle, no matter how to order its edges. This means that no terms in $\chi(H,x)$ can be reduced by broken circuit or broken cycle. For an edge $\{i,j,k\}$ we write it simply as $ijk$. By (10) it can be seen that $H$ has two broken sets $B_1=\{123,345\}$ with $B_1^*=\{b_1\}=\{234\}$ and $B_2=\{234,126\}$ with $B_2^*=\{b_2\}=\{123\}$. Therefore,\\ $\mathscr{B}_1=\{\{123,345\},\{123,345,234\},\{123,345,126\},\{123,345,234,126\}\}$ and\\ $\mathscr{B}_2=\{\{234,126\},\{234,126,123\}\}$. Consider the edge ordering $123<345<234<126$. Again by (10), $H$ contains only one broken set with respect to `<', i.e., $B=\{123,345\}$ with $B'=\{234\}$. Thus, $\mathfrak{X}=\{B\}$ (see Theorem \ref{thm1}) and \\ $\mathfrak{I}=\{\{123,345\},\{123,345,234\},\{123,345,126\},\{123,345,234,126\}\}=\mathscr{B}_1$. So by Theorem \ref{thm1} and Corollary \ref{cor}, the chromatic polynomial of $H$ is $$\chi(H,x)=\sum_{F\in 2^{E}\setminus\mathfrak{I}}(-1)^{|F|}x^{c(F)}=\sum_{F\in 2^{E}\setminus(\mathscr{B}_1\cup\mathscr{B}_2)}(-1)^{|F|}x^{c(F)}=k^6-4k^4+3k^3+k^2-k.$$ Moreover, we see that $|2^{E}|=16>|2^{E}\setminus\mathfrak{I}|=12>|2^{E}\setminus(\mathscr{B}_1\cup\mathscr{B}_2)|=10$. Finally, it can be seen that $H$ has at most one broken set with respect to `<', no matter how to define the order `<'. \noindent{\bf Independence polynomial of graph}. For a graph $G$, the independence polynomial \cite{Gutman02,Hoede01} of $G$ can be represented as the following inclusion-exclusion formula \cite{Dohmen05}: \begin{equation} I(G,x)=\sum_{F\subseteq E(G)}(-1)^{|F|}x^{|G[F]|}(1+x)^{n-|G[F]|}, \end{equation} where $|G[F]|$ is the number of vertices in the subgraph of $G$ induced by $F$. It was shown \cite{Dohmen05} that the Whitney's broken circuit theorem is also valid for independence polynomial. By (9) and (11), a set $B$ of edges is a broken set provided $B$ is minimal such that $G[B]=G[B\cup\{b\}]$ for some $b\notin B$. This means that $B=\{e_1,e_2\}$ and $e_1be_2$ is a path or a cycle of length 3. We call such $B$ a {\it broken path}. We note that every broken circuit includes a broken path as a subgraph. Let's consider the path $G=e_1e_2e_3e_4$ of length 4 with edge ordering $e_1<e_3<e_2<e_4$. Similar to the previous example, we have $B_1=\{e_1,e_3\}$ with $B_1^*=\{e_2\}$ and $B_2=\{e_2,e_4\}$ with $B_2^*=\{e_3\}$, and $\mathfrak{X}=\{\{e_1,e_3\}\}$. Therefore: \\ $\mathscr{B}_1=\{\{e_1,e_3\},\{e_1,e_2,e_3\},\{e_1,e_3,e_4\},\{e_1,e_2,e_3,e_4\}\}$;\\ $\mathscr{B}_2=\{\{e_2,e_4\},\{e_2,e_3,e_4\}\}$; and\\ $\mathfrak{I}=\{\{e_1,e_3\},\{e_1,e_2,e_3\},\{e_1,e_3,e_4\},\{e_1,e_2,e_3,e_4\}\}=\mathscr{B}_1.$ \noindent{\bf Domination polynomial of graph}. For a graph $G$ and $W\subseteq V(G)$, denote by $N[W]$ the closed neighbourhood of $W$, i.e., $$N[W]=W\cup\{v:v\ {\rm is\ adjacent\ to\ some\ vertex\ in}\ W\}.$$ Let $d_i$ be the number of the sets $W$ of $i$ vertices such that $N_G[W]=V(G)$. The domination polynomial $D(G,x)$ is defined by $D(G,x)=\sum_{i=1}^nd_ix^i.$ The following form was given in \cite{Dohmen08}, \begin{equation} D(G,x)=\sum_{W\subseteq V(G)}(-1)^{|W|}(1+x)^{n-|N[W]|}. \end{equation} A set $B$ is called {\it broken neighbourhood} if $B=N(v)$ and $v=\max N[v]$. In \cite{Dohmen08}, Dohmen and Tittmann proved that the sum in (12) can be restricted to those subsets of vertices which do not contain any broken neighbourhood. Due to (12), we replace the role of edges in (9) by vertices. For $B\subseteq V(G)$ and $b\in V(G)\setminus B$, by (9) it can be seen that $B$ is a broken set of $D(G,x)$ provided $B$ is minimal such that $|N[B]|=|N[B\cup\{b\}]|,$ i.e., \begin{equation} N[b]\subseteq N[B]. \end{equation} We can see that the`broken set' of $D(G,x)$ is an extension of `broken neighbourhood'. Consider the path $P=v_1v_2v_3v_4$ with vertex ordering $v_1<v_4<v_3<v_2$. Similarly, by (13) we have $B_1=\{v_1,v_3\}$ with $B_1^*=\{v_2\}$, $B_2=\{v_1,v_4\}$ with $B_2^*=\{v_2\}$ and $B_3=\{v_2,v_4\}$ with $B_3^*=\{v_3\}$, and $\mathfrak{X}=\{\{v_1,v_3\},\{v_1,v_4\}\}$. Therefore: \\ $\mathscr{B}_1=\{\{v_1,v_3\},\{v_1,v_2,v_3\},\{v_1,v_3,v_4\},\{v_1,v_2,v_3,v_4\}\}$;\\ $\mathscr{B}_2=\{\{v_1,v_4\},\{v_1,v_2,v_4\}\}$;\\ $\mathscr{B}_3=\{\{v_2,v_4\},\{v_2,v_3,v_4\}\}$; and\\ $\mathfrak{I}=\{\{v_1,v_3\},\{v_1,v_2,v_3\},\{v_1,v_3,v_4\},\{v_1,v_2,v_3,v_4\},\{v_1,v_4\},\{v_1,v_2,v_4\}\}=\mathscr{B}_1\cup\mathscr{B}_2.$ \section*{Acknowledgments} This work was supported by the National Natural Science Foundation of China [Grant numbers, 11471273, 11561058].
1,108,101,565,892
arxiv
\section{Introduction} Because of their extreme luminosity in $\gamma$-rays, gamma ray bursts (GRBs) are a unique probe to high energy regimes where exotic physics is likely to manifest. A fraction of GRBs have been associated with the collapse of massive stars via the association of supernova signatures observed with the fading GRB optical afterglow e.g. \citep{hjorth03,stan03}. The afterglow most likely originates from an external shock produced as the blast wave from the progenitor collides with the interstellar medium causing it to slow down and lose energy. Fast moving telescopes linked to GCN notices \citep{Barthelmy1998} are able to record the optical counterpart at the time when the prompt $\gamma-$ray emission is still active. The first positive detection of such event was GRB~990123 \citep{akerlof99}. Some other successful detections have been achieved so far \citep[e.g.][]{Rascusin08}. Two general results have been seen: Either a bright optical emission, uncorrelated to the gamma-ray light curve, occurred \citep[for 5 to 20\% of GRBs according to][]{klotz09}, or that a faint optical emission is correlated with the gamma-ray flares \citep[\objectname{GRB~050820A},][]{Vestrand2006}. In the former case, these bright optical flashes are often interpreted as the reverse shock signature \citep{Jin2007}. \\ Time lags between X--ray and gamma-ray data are often observed \citep[e.g.][]{Norris00}. However, this is rare between optical and $\gamma-$rays. As an example, \cite{Tang06} estimated the most probable time lags for the light curves of \objectname{GRB~990123} (5--7\,sec) and GRB~041219A (1--5\,sec). However, the optical data have poor time sampling, putting doubts on these results. Moreover, no lag was noticed for GRB~041219 by \citet{Zheng2006}. No lag was reported for GRB~050820A at a level of few seconds \citep{Vestrand2006}. \\ In this letter, we present the measurements of the optical emission observed by TAROT \citep{klotz08} during the prompt $\gamma-$ray activity of \objectname{GRB~081126}. We show evidence for a positive time lag between optical and $\gamma-$ray light curves. \section{GRB~081126} GRB~081126 (Swift BAT trigger 335647, with T$_0$=26th Nov. 2008, 21:34:10 UT) light curve shows a small precursor starting at $\sim$T$_0-$30 s, peaking at $\sim$T$_0-$18 s, and returning almost to zero at T$_0-$7 s \citep{Sato08}. The burst features two peaks, the first one at $\sim$T$_0+$1.5 s, reaching its maximum at $\sim$T$_0+$7 sec. The second one peaks at $\sim$T$_0+$31.5 sec The duration of that burst is T$_{90} = 54\pm4$\,s (15-350 keV). This event was also detected by Konus-Wind \citep{Golenetskii08} and the Fermi GBM \citep{Bhat08}. The time-averaged spectrum of the first pulse from T$_0$ to T$_0+$11 s is well fit by a Band function with E$_{peak}$ = 192 $\pm$ 74 keV, alpha = -0.3 $\pm$ 0.4, and beta = -1.6 $\pm$ 0.1. The second pulse from about T$_0+$20 s to T$_0+$40 s is also well fit by a Band function with E$_{peak}$ = 162 $\pm$ 77 keV, alpha = -0.3 $\pm$ 0.5, and beta = -1.6 $\pm$ 0.1. The fluence (8-1000 keV) in the two pulses are (2.7 $\pm$ 0.8)$\times$10$^{-7}$ erg\,cm$^{-2}$ and (1.9 $\pm$ 0.8)$\times$10$^{-7}$ erg\,cm$^{-2}$ respectively. XRT observation reported by \cite{Margutti08} started 65.7 seconds after the BAT trigger, too late to gather X--ray information of the second peak. The XRT detected a characteristic afterglow emission of the burst. This afterglow was not detected by a quick visual inspection of images taken by TAROT, started 20.6 s after the burst \citep{gendre08}. However, \cite{Skvarc08} reported the optical light curve of the afterglow in R band using the 60\,cm of the Crni Vrh Observatory. Their observations start at T$_0$+82s. They observe a slow rise in optical emission that peaks 200s after the trigger and then fades. This optical afterglow was also reported by \cite{Andreev08}, using the Z-600 telescope of Mt. Terskol observatory, 33 minutes after the burst, and by UVOT \citep{Holland08} at 21$^h$34$^m$03.59$^s$ +48$^\circ$42'38.3" (J2000.0). They report that the detection in the U filter, combined with the lack of detections in the UV filters, is consistent with the afterglow having a redshift of approximately $2.4<z<3.8$ \citep{Holland08}. Unfortunately, no other photometric observations were performed to improve this estimation. From Konus-Wind data, we deduced a pseudo-redshift of $5.3 \pm 1.8$ using the method described in \cite{att03}. \\ The Galactic latitude of the afterglow position is -2.29$^\circ$ and the corresponding extinction is E(B-V)=0.782 mag. according to \cite{Schlegel1998}. Assuming $R$=3.1, this gives A$_{V}$=2.6 and A$_{R}$=2.1 mag. \\ \section{TAROT data} The first TAROT images were obtained at T$_0+$20.1s (duration 60\,sec) with the tracking speed adapted to obtain a small trail of a few pixel length. This technique is used in order to obtain temporal informations during the exposure \citep[e.g.][]{Klotz2006}. The spatial sampling is 3.29\,arcsec/pix and the FWHM of stars (in the perpendicular direction of the trail) is 2.05 pixels. On the trailed image (see Figure~\ref{trail}), the flux of the afterglow is affected by the proximity of NOMAD1 1387-0420537 (R=18.1) but also by the end of the trail of NOMAD1 1387-0420579 (R=15.48 hereafter A). This last star lies at 21 arcsec East and 7\,arcsec South from the GRB position. As a matter of consequence, the trail of star A (which spreads over 30\,arcsec) covers partially the beginning of the trail of the GRB (Fig.~\ref{trail} top).\\ Knowing the position of the afterglow, we first subtracted the trail of the star A within the image. The star NOMAD1 1387-0420302 (R=13.17, hereafter B) is far enough to other neighbor stars to be used as a trail template to model the star A. We then subtracted this model from the image (using a correct scaling factor to take into account the difference of flux between the stars A and B). The result of the subtraction shows clearly the presence of a dim optical emission (Fig.~\ref{trail} bottom).\\ \\ Successive images are 30\,s long exposures tracked on the diurnal motion. \cite{gendre08} published only upper limits using TAROT data because it was impossible to detect the optical counterpart so close to the star NOMAD1 1387-0420537 without careful subtraction. The images taken later by TAROT were employed to perform this subtraction. The technique successfully revealed the optical afterglow. In Fig.~\ref{tarot_crni} we display the initial part of the TAROT light curve. We add data from \cite{Skvarc08} showing that we can distinguish the early emission that occurred during the gamma activity and the afterglow that followed. A discussion of the afterglow emission process is beyond the scope of this paper and will be presented in Corsi et al. (2009, in preparation). \begin{figure}[htb] \centering \includegraphics[width=0.9\columnwidth]{traine4xcom.eps} \vspace*{5mm} \includegraphics[width=0.9\columnwidth]{trainecleaned4x.eps} \caption{Field of GRB~081126. Top: TAROT image taken between 21s and 81s after the GRB trigger. The hour angle velocity was adapted to obtain stars as trails of $\sim$9.2 pixel length during the 60s exposure. The theoretical position of the GRB trail is indicated by the white box. The star A covers partly the GRB trail. (see text). Bottom: After subtraction of star A using the model of star B, the trace of the optical emission of the GRB appears in the box The image size is 5 arcmin, North is up, East left. \label{trail}} \end{figure} \begin{figure}[htb] \centering{\includegraphics[width=1\columnwidth]{tarot-crni.eps}} \caption{ Optical light curve of GRB~081126. TAROT optical data are thick bars and observations from \cite{Skvarc08} are thin bars (2 sigma level). There are data in the ranges 20--29 and 49--89\,sec, but with no detection at the limit of R=18.0. } \label{tarot_crni} \end{figure} \section{Data analysis} From the trailed image, a horizontal profile corresponding to the predicted position of the afterglow gives directly the light curve. We measured the temporal sampling of 6.5 sec/pixel using trails of bright stars. The light curve of the afterglow in the trail presents a flare within 3 pixels (Fig. \ref{tarot_bat}), with a probability of $\sim10^{-8}$ to be spurious. The probability to observe a cosmic ray at that position is $3.6\times 10^{-6}$ (estimated from dark fields of the same night). We thus conclude that this flare is real and produced by the burst itself. Such light curve profiles are affected by the Point spread function (PSF) of TAROT. In order to compare the optical and $\gamma-$ray light curves, we need to convolve the BAT signal by the TAROT PSF. The PSF can be extracted as a vertical profile of a bright non saturated star (seen as the doted curve in the Figure\,\ref{tarot_bat}). We performed a symetrisation of the PSF shape to be compatible with the hypothesis that the PSF shows no direction effect. Once convolved with the TAROT PSF, the BAT signal corresponding to one peak is very similar to that of TAROT. We note at the start of the trail a bright single pixel that could be associated with the end of an optical flare. However, this event is not significant enough to be used in our analysis. Nonetheless, it could be an optical flare related to the first $\gamma-$ray pulse. In the following analysis, we will consider this as a possibility, and thus that the optical flare is correlated to the second $\gamma$-ray pulse, without discarding the possibility that the optical flare is linked to the first $\gamma$-ray pulse. A $\chi^{2}_{\nu}$ fit between the optical flare and $\gamma-$ray pulse implies a temporal lag of $+8.4 \pm 3.9$ s (see Fig. \ref{tarot_lag}) at the 97\% confidence ($+38.4 \pm 3.9$ s if the optical flare is related to the first $\gamma$-ray pulse). This is strong evidence for a positive time-lag between the optical and high energy feature. We point out that the exposure time of TAROT images has a better accuracy than 0.1s because we use a GPS card triggered by the opening of the shutter, and is not dependent on the computer internal clock variations \citep{laas2008}. As the TAROT PSF is larger than the BAT second pulse, we also studied the influence of the duration of the BAT pulse modelized by a Gaussian shape, letting free the width of the Gaussian within the fit. The best match of the modelized BAT pulse is a Gaussian spread by sigma=4.0\,sec. The $\chi^{2}_{\nu}$ fit gives the same lag as for the actual BAT pulse meaning that the profile shape of the pulse does not constrain the lag value. The fit remains compatible for Gaussians with sigma lower than 9\,sec. This means the optical pulse is compatible with a high energy pulse which could have a duration between 0 to 9\,sec. The flux of the optical peak observed by TAROT is 0.45 mJy. To be compared with the Fermi observations, this value must be corrected for two effects: i) the spread of the flux due to the PSF profile, and ii) the large optical extinction in the R band. Correcting for all these effects, the optical flux is $\sim$6\,mJy at the peak. We used the Band model parameters obtained by the Fermi-GBM \citep{Bhat08} to compute the optical flux expected from the high energy band. We derived an expected optical flux of 2.6$\times 10^{-10}$\,Jy, which is $\sim10^{-7}$ times the one observed. Taking account for the uncertainties in the Bhat's alpha parameter the extrapolated flux is always $\sim10^{-5}$ times the one observed. \section{Discussion and conclusions} The analysis of optical and gamma--ray light curves of GRB~081126 reveals: i) the width of the optical peak is the same as the gamma--ray peaks, ii) the profile of the optical peak is consistent to the gamma--ray peaks after correcting for the different PSF, iii) the optical peak occurred 8.4$\pm$3.9\,s (or $+38.4 \pm 3.9$ s) later than the gamma peak. This is the first time-lag measured between optical and gamma light curves of a GRB. iv) the gamma--ray flux measured by GBM Fermi, extrapolated to optical energies is $\sim10^{-7}$ times smaller than the optical flux. These three results provide potentially new constraints on the theory of prompt GRB emissions. For example, time lags between different energy photons are predicted by quantum gravity in the framework of string theory \citep[e.g.][]{Amelino98}. However, in such a case optical photons should arrive before gamma ones. As we observe the opposite, one can rule out this hypothesis for the GRB~081126's optical lag. Gamma-ray photons comptonization on cold electrons could explain the profile of the optical flare. However, this cannot explain the positive lag observed. Within the internal shock framework, this temporal lag implies that optical photons were emitted after the $\gamma-$ray ones. However, it is surprising that the flux {\it increases} so dramatically during this process. This is not well understood in the standard model for the inelastic internal shock and our results provide new tools for refining the standard model. \begin{figure}[htb] \centering{\includegraphics[width=1\columnwidth]{tarot-bat.eps}} \caption{ Light curves of GRB~081126 measured by BAT and TAROT. The dotted line labeled 'PSF-TAROT' stands for the spread of a star equivalent to an instantaneous flash of 0s duration. This figure appears in colors in the electronic version. } \label{tarot_bat} \end{figure} \begin{figure}[htb] \centering{\includegraphics[width=1\columnwidth]{lag.eps}} \caption{ The convolution of the peak of BAT light curve (in blue) by the PSF-TAROT shifted by 8.4s (in black) compared to the TAROT data (in red). This figure appears in colors in the electronic version. } \label{tarot_lag} \end{figure} \begin{acknowledgements} B. Gendre acknowledges support from {\it Centre National d'Etudes Spatiales} (CNES). The TAROT telescope has been funded by the {\it Centre National de la Recherche Scientifique} (CNRS), {\it Institut National des Sciences de l'Univers} (INSU) and the Carlsberg Fundation. It has been built with the support of the {\it Division Technique} of INSU. We thank the technical staff contributing to the TAROT project, G. Buchholtz, J. Eysseric, M. Merzougui, C. Pollas, P. Richaud and Y. Richaud. \end{acknowledgements}
1,108,101,565,893
arxiv
\section{Introduction} Understanding the relation between structure and dynamics in glassy systems has sparked extensive discussion over the last few decades\cite{cavagna2009supercooled,berthier2011theoretical,royall2015role,tanaka2019revealing}. While cooling or compressing a glassy system, we typically observe little to no change in structure, while at the same time observing an extreme decrease in dynamics\cite{ediger1996supercooled}. This decrease in dynamics is highly heterogeneous, with increasingly large regions of slow and fast particles as a function of supercooling\cite{ediger1998can, Berthier2020GlassesAA, Candelier2010}. One of the approaches to probe the apparent discrepancy between structure and dynamics has been the use of machine learning\cite{Yang-2021, ciarella2022dynamics,cubuk2015identifying, schoenholz2016structural,bapst2020unveiling, boattini2020autonomously,paret2020assessing,boattini2021averaging,jung2022predicting, shiba2022unraveling,pezzicoli2022se,coslovich2022dimensionality}. By capturing the local structure of particles in terms of parameters and training algorithms to predict the mobility of particles based on these parameters, the idea is that we can learn what aspects of the structure influence the heterogeneous dynamics. Over the last two years, the quest for accurate dynamical predictions in glassy systems has led to an explosion of papers introducing new methodologies that compete in predicting the so-called dynamic propensity of simple glassy models. This propensity is defined as the average expected displacement a particle will undergo in a certain time interval when starting from a specific initial configuration \cite{widmer2004reproducible, widmer2007study}. This explosion started in 2020 with the work of Bapst \textit{et al.}\cite{bapst2020unveiling}, where several machine learning methods were trained to predict the dynamic propensity of a Kob-Andersen system\cite{kob1995testing}, with a graph neural network (GNN) performing the best. In 2021, a linear-regression-based algorithm with input parameters that captured structure over several length scales was shown to be able to rival GNNs in predicting the propensity for the same system\cite{boattini2021averaging}. Since then, several works have improved on this feat, by e.g. using physics-informed parameters as input for a deep neural network\cite{jung2022predicting}, by modifying the loss function of a GNN to also consider relative displacements between pairs of particles \cite{shiba2022unraveling}, or by designing GNNs that preserve roto-translation equivariance\cite{pezzicoli2022se}. These works clearly demonstrate that careful consideration of the physics involved can aid in improving the predictive accuracy of these advanced methods. However, these neural-network based approaches still carry the downside of high complexity. In contrast, due to the simplicity of linear methods, accurately capturing dynamics using linear regression gives a clearer perspective on what structural aspects most strongly drive glassy dynamics. This raises the question: can a clever choice of input parameters boost the performance of linear regression approaches? Here, we show that this is indeed the case, and apply linear regression to predict the dynamic propensity of three glass-forming models: hard spheres, harmonic spheres, and the Kob-Andersen model. The main idea of our method is to consider the structure of the system during the caging regime, where each particle is confined by its neighbors in a reasonably well-defined location. By directly incorporating information about this ``cage state'', we show that it is possible to drastically improve the ability of linear regression to predict dynamics, to the point where it even exceeds advanced non-linear machine learning algorithms over a wide range of time scales. \section{Model, descriptors and prediction method} \subsection{Model} Each of our three models consists of two species of particles, labeled $A$ and $B$, with different particle sizes but equal mass $m$. We denote the number of particles of the two species as $N_A$ and $N_B$, such that the total number of particles $N=N_A + N_B$. Below we discuss each of the models and the statepoints at which we investigate them individually. \subsubsection{Binary hard-spheres mixture} The first model we consider consists of hard-sphere particles of two diameters, denoted $\sigma_A$ and $\sigma_B$. The hard-sphere potential for two particles $i$ and $j$ is given by \begin{equation} V^\mathrm{HS}(r)= \begin{cases} \infty &\text{for } r\leq \sigma_{ij}\\ 0 &\text{for } r > \sigma_{ij}, \end{cases}\end{equation} where $\sigma_{ij} = (\sigma_i + \sigma_j)/2$. Here, we use a size ratio $\sigma_{B}/\sigma_{A} = 0.85$, and a number ratio $N_A/N = 0.3$. The considered packing fraction of $\eta = 0.58$ leads to a structural relaxation time of approximately $\tau_\alpha= 10^4\tau$, with $\tau$ the unit of time given by $\tau = \sqrt{m\sigma_A^2/k_BT}$, $k_B$ Boltzmann's constant and $T$ the temperature. \subsubsection{Binary harmonic mixture} The binary harmonic potential is given by\cite{durian1995foam,berthier2009compressing} \begin{equation} V^\mathrm{Har}(r)= \begin{cases} \epsilon\left(1-\frac{r}{\sigma_{ij}}\right)^2 &\text{for } r\leq \sigma_{ij}\\ 0 &\text{for } r > \sigma_{ij}, \end{cases}\end{equation} where again $\sigma_{ij} = (\sigma_i + \sigma_j)/2$. Here, we consider the case where $\sigma_B / \sigma_A = 1.4$, $N_A / N = 0.5$. Our state point of interest is at number density $\rho \sigma_A^3 = 0.82$ and temperature $k_BT/\epsilon = 0.0045$, where the structural relaxation time is approximately $\tau_\alpha = 671 \tau$\cite{tah2022fragility}. \\ \subsubsection{Binary Kob-Andersen mixture} The Kob-Andersen (KA) mixture consists of two particles types \textit{A} and \textit{B} interacting via the Lennard-Jones potential: \cite{kob1995testing} \begin{equation}V^\mathrm{KA}(r)= 4\epsilon_{ij}\left[\left(\frac{\sigma_{ij}}{r}\right)^{12}-\left(\frac{\sigma_{ij}}{r}\right)^{6}\right],\tag{1}\end{equation} where $\epsilon_{AA}: \epsilon_{AB}:\epsilon_{BB} = 1: 1.5:0.5$ and $\sigma_{AA}: \sigma_{AB}:\sigma_{BB} = 1: 0.80:0.88$. Note that $(\sigma_{AA}+\sigma_{BB})/2\neq \sigma_{AB}$, i.e. the system is non-additive. The composition of the system is $N_A/N=0.8$. We investigate this system at number density $\rho\sigma_A^3= 1.203$ and temperature $k_BT/\epsilon_A= 0.44$. The relaxation time of the system is approximately $\tau_\alpha \simeq 3075\tau$\cite{bapst2020unveiling}. \subsection{Generating initial configurations} The HS system is simulated using event-driven molecular dynamics\cite{Rapaport2009, smallenburg2022efficient} (EDMD) in the microcanonical ensemble, i.e. at fixed number of particles $N$, volume $V$ and energy $E$. In order to generate snapshots that can serve as initial configurations, we place $N$ particles in the box at a reduced size, and then grow them over time until the desired packing fraction is reached \cite{donev2005neighbor}. Afterwards, the system is equilibrated for $10\tau_\alpha$. To simulate both the harmonic and KA systems, we use LAMMPS\cite{LAMMPS}. First we initialize the system by performing a simulation in the canonical ensemble, i.e. at fixed $N$, $V$ and $T$, using a Nose-Hoover thermostat\cite{evans1985nose} at the desired temperature $T$. Afterwards, we equilibrate the system in the microcanonical ensemble, for $10\tau_\alpha$. For each system we equilibrate 100 snapshots, where each snapshot contains 2000 particles. \begin{figure} \centering \includegraphics[width=0.49\textwidth]{propensity.pdf} \caption{Propensity averaged over all $A$ particles in the system and correlation between the predicted and measured propensity for the same particles, plotted over time for the binary hard sphere mixture at the state point described in this paper. The propensity clearly exhibits three different dynamical regimes: In the ballistic regime, particles have not yet collided with their neighbours, and thus follow a straight ballistic trajectory. In the caging regime, particles move around for an extend period of time in the cage formed by their nearest neighbours. Finally, after particles have escaped the system enters the diffusive regime. } \label{fig:prop} \end{figure} \subsection{Dynamic propensity} As a measure of the dynamical heterogeneity we use the dynamic propensity, which is a quantity that captures the average mobility of particles \cite{widmer2004reproducible, widmer2007study}. To measure it, we simulate the dynamical evolution of each initial configuration 50 times, using distinct initial velocities drawn from a Maxwell-Boltzmann distribution at the desired temperature, i.e. we sample the isoconfigurational ensemble\cite{widmer2004reproducible}. Afterwards the dynamic propensity $\Delta r_i(t)$ of each particle $i$ is obtained by averaging its absolute displacement over the different runs, i.e. \begin{equation} \Delta r_i(t) = \left\langle |\mathbf{r}_i(t) - \mathbf{r}_i(0)| \right\rangle_\mathrm{conf},\label{eq:propensity} \end{equation} where the subscript 'conf' indicates the isoconfigurational average. We measure the dynamic propensity at logarithmic time intervals between $t/\tau = 0.01$ and $t/\tau = 10\tau_\alpha$. Simulations of the all the three systems are performed in a microcanocical ensemble. \subsection{Structural Descriptors} To fit the dynamic propensity, we use standard ridge regression combined with structural order parameters, as previously done in Refs. \onlinecite{boattini2021averaging,alkemade2022comparing}. The structural parameters include rotationally invariant parameters that capture both the local density as well as the local \textit{n}-fold symmetry. The local density is measured by using radial density functions that capture the density in a shell at distance $r$ and with thickness $2\delta$ from the reference particle. They are defined as \begin{equation} G^{(0)}_i(r, \delta, s) = \sum_{j\neq i, s_j = s}e^{-\frac{(r_{ij} -r)^2}{2\delta^2}}, \label{eq:radial} \end{equation} where $i$ is the reference particle and $r_{ij}$ is the absolute distance between particles \textit{i} and \textit{j}. The sum goes over all other particles $j$ in the system that are part of particle species \textit{s}. In this paper we include the radial functions up to the fifth minimum in the radial distribution. The other structural descriptors we use are based on bond order parameters which express the local structure in terms of spherical harmonics \cite{steinhardt1983bond,lechner2008accurate}. To compute the parameters we first compute the complex coefficient \begin{equation} q_i^{(0)}(l, m, r, \delta) = \frac{1}{Z} \sum_{i\neq j} e^{-\frac{(r_{ij} -r)^2}{2\delta^2}}Y^m_l(\mathbf{r}_{ij}). \label{eq:qlm} \end{equation} Here $Y^m_l(\mathbf{r}_{ij})$ is the $l^\text{th}$ spherical harmonic function and $m$ is a function that runs from $-l$ to $l$. To normalize the coefficient, we use $Z$, which is given by \begin{equation} Z = \sum_{i\neq j} e^{-\frac{(r_{ij} -r)^2}{2\delta^2}}. \end{equation} Note that due to the exponent, just as with the radial density functions, mainly particles that are within the shell $r-\delta$ to $r+\delta$ contribute to $q_i^{(0)}(l, m, r, \delta)$. The parameters are made rotationally invariant by summing over all possible value of $m$: \begin{equation} q_i^{(0)}(l, r, \delta) = \sqrt{\frac{4\pi}{2l+1}\sum_{m =-l}^{m =l}|q_i^{(0)}(l, m, r, \delta)|^2}. \label{eq:ql} \end{equation} Note that the $q_i^{(0)}(l, r, \delta) $ will mainly pick-up the $l$-fold symmetry of the particles structure in each shell. In Ref. \onlinecite{boattini2021averaging} it was shown that the prediction of the dynamic propensity via linear regression can be improved when not only the structural parameters of the reference particle itself are included, but also structural parameters averaged over nearest neighbours. These averaged structural parameters are obtained via the following recursive formula: \begin{equation} x^{(n)}_i= \frac{\sum_{j: r_{ij}< r_c} x_j^{(n-1)} e^{-r_{ij}/r_c} }{\sum_{j: r_{ij}< r_c} e^{-r_{ij}/r_c}}. \label{eq:higherorderbops} \end{equation} Here $x_i^{(n)}$ are the structural parameters (which can be both radial density or bond order parameters) of order $n$ for particle \textit{i}. The sum goes over all particles within a certain radius $r_c$ as seen from the reference particle $i$. This $r_c$ is chosen to be located at the second minimum of the radial distribution function, although it was shown in Ref.~\onlinecite{boattini2021averaging} that its exact value has no substantial influence on the order parameters. Boattini \textit{et al} showed including parameters up to three generations significantly improves the predictions. Further information on the descriptors can be found in the Supplementary Information (SI). \section{Results} \begin{figure*}[t!] \begin{tabular}{lclclc} & Hard spheres && Harmonic && Kob-Andersen\\ a) & & b) & & c) & \\[0cm] & \includegraphics[width=0.3\linewidth]{distance_HS_no_LR.PDF} & & \includegraphics[width=0.3\linewidth]{distance_harmonic_no_LR.PDF} & & \includegraphics[width=0.3\linewidth]{distance_KA_no_LR.PDF} \\ d) & & e) & & f) & \\[0cm] & \includegraphics[width=0.3\linewidth]{paircorrelation_HS_A.PDF} & & \includegraphics[width=0.3\linewidth]{paircorrelation_har_A.PDF} & & \includegraphics[width=0.3\linewidth]{paircorrelation_KA_A.PDF} \end{tabular} \caption[width=1\linewidth]{\textbf{a,b,c)} The Pearson coefficient plotted over time between the propensity and absolute difference between the positions of particles in the initial configuration and the inherent state/cage state ($\Delta r^\text{IS/CS}$) for the $A$-particles in hard spheres (a), harmonic spheres (b), and Kob-Andersen (c). \textbf{d,e,f)} The total radial distribution function around A-type particles in the initial configuration, the inherent state, and the cage state, for the same three models. } \label{fig:cagestates} \end{figure*} A number of recent studies have made significant progress in predicting the dynamic propensity of particles in glassy fluids based on local structural information using a variety of machine learning algorithms \cite{bapst2020unveiling, boattini2021averaging, jung2022predicting, shiba2022unraveling, pezzicoli2022se}. The accuracy of such predictions is typically evaluated using the Pearson correlation coefficient\footnote{The Pearson correlation coefficient is a measure of the degree of linear correlation between two variables. If the Pearson correlation $\mathrm{cor}(\mathbf{X}, \mathbf{Y})$ between two datasets $\mathbf{X}$ and $\mathbf{Y}$ is equal to 1, the two variables are perfectly related via a linear function with positive slope. A value of 0 indicates no linear correlation, while a value of -1 would indicate perfect negative linear correlation.} between the predicted and measured dynamic propensities. A typical example of this correlation as a function of time is shown in Fig. \ref{fig:prop}, where we used linear regression in combination with structural order parameters to predict the propensity in the binary hard-sphere mixture we consider here \cite{alkemade2022comparing}. Interestingly, the correlation is weak in three distinct regimes. Two of these regimes are trivial. The first corresponds to the very short-time regime, where dynamics are dominated by the random choice of initial velocities. Especially for hard spheres, the motion of the particles in this regime is entirely unaffected by local structure, and hence fully unpredictable. The second trivial regime that is hard to predict occurs at long time scales $t \gg \tau_\alpha$, where the system loses memory of its initial configuration. The intriguing regime, where the correlations are weak, lies at intermediate time scales, and corresponds to the times where particles are trapped inside their local cages, as also indicated by the behavior of the average propensity in Fig. \ref{fig:prop}. This lower performance of machine-learned algorithms for predicting propensity in the caging regime is not unique to the results of Fig. \ref{fig:prop}, but has been observed in a variety of studies involving different machine learning methods and different ways to describe the system \cite{jung2022predicting, bapst2020unveiling,boattini2021averaging,alkemade2022comparing}, although recent, more advanced machine learning methods have improved significantly the correlation in this regime \cite{shiba2022unraveling,pezzicoli2022se}. Observing the weak correlations in the caging regime, a natural question to ask is then: what structural information do we need to include in order to make accurate predictions about the dynamics of our system in this regime? Since during the caging regime, particles on average move around the so called 'cage center', a rather obvious choice would be to consider the expected average distance between a particle's initial position $\mathbf{r}_i^\mathrm{init}$ and its typical position while in its cage $\mathbf{r}_i^\mathrm{cage}$. Since the dynamic propensity measures the averaged absolute distance travelled by particles, we would expect this distance between the initial and mean cage position $\Delta r_i^\mathrm{cage} = |\mathbf{r}_i^\mathrm{cage} - \mathbf{r}_i^\mathrm{init}|$ to be a good predictor of the dynamic propensity. While this statement seems logically trivial, an important question is: what is a good estimate of the cage center of a given particle? A common approach in glass literature for predicting dynamical behavior in a glassy system is to quench the system to its inherent state (IS) (see e.g. \cite{widmer2006predicting, schoenholz2016structural, tong2018revealing, jung2022predicting}). The inherent state of a configuration is defined as the local potential energy minimum that one obtains via a rapid energy minimization\cite{stillinger1982hidden}. Note, however, that this is not exactly the same as the average position of a particle in its cage, as the average should include the effects of thermal fluctuations as well. As such, a natural alternative choice to estimate the position of the cage center is to simply take the average position of each particle, under the constraint that no significant particle rearrangements have happened. We will refer to this second option as the cage state (CS). To explore these two definitions of $\Delta r^\mathrm{cage}$, we calculate for each of our initial configurations both the inherent state and cage state positions. For the inherent state, we use the FIRE algorithm proposed by Bitzek \textit{et al.} \cite{bitzek2006structural} to minimize the potential energy. In the case of hard spheres, we apply an effective logarithmic interaction potential proposed by Arceri \textit{et al.}\cite{arceri2020vibrational} in order to obtain an effective inherent state. Specifically, we use the effective interaction potential $V^\mathrm{eff}(r) = -k_BT \log(r-\sigma_{ij})$, with both the forces and interaction energy truncated and shifted to zero at a cutoff radius $r_c = 1.35 \sigma_{ij}$. To obtain the cage state, we use a Monte Carlo (MC) simulation in the canonical ensemble that measures the average positions of each particle $\mathbf{r}_i^{CS} = \langle\mathbf{r}^c_i\rangle$, while restricting the movement of each particle to ensure that it stays inside its initial cage. In order to do this, we reject all MC moves that would move the center of a particle outside of its original Voronoi cell. Since we consider binary systems, we use an approximate definition of a Voronoi cell which takes into account the particle sizes. In particular, the approximate Voronoi cell for particle $i$ is defined as the collection of points $\mathbf{R}$ for which \begin{equation} \frac{\left|\mathbf{R} - \mathbf{r}_i^\mathrm{init} \right|}{\sigma_i} < \frac{\left|\mathbf{R} - \mathbf{r}_j^\mathrm{init} \right|}{\sigma_j} \forall j \in \mathcal{N}(i), \end{equation} where $\mathbf{r}_i^\mathrm{init}$ is the position of particle $i$ in the initial configuration and $\mathcal{N}(i)$ are the nearest neighbours of particle \textit{i} determined by the SANN algorithm \cite{van2012parameter}. For the KA mixture, we set $\sigma_i = \sigma_{ii}$, since the individual particle sizes are ill-defined. Note that since the restrictions on the particle positions eliminate the possibility of long-time diffusion, short simulations are sufficient to sample the restricted phase space. Here, we use MC simulations of $5 \cdot 10^5$ initial steps and $10^6$ measuring steps. Note that as an alternative to confining each particle to its initial Voronoi cell, we have also explored the possibility of instead confining each particle to a spherical region with a fixed radius $r_c$. When the size of this sphere is chosen to be close to the size of the particle (and hence similar to the size of the Voronoi cell), we find essentially the same results (as shown in the SI). \begin{figure*}[t!] \begin{tabular}{lclclc} & Hard spheres && Harmonic && Kob-Andersen\\ a) & & b) & & c) & \\[0cm] & \includegraphics[width=0.3\linewidth]{param_HS.PDF} & & \includegraphics[width=0.3\linewidth]{param_harmonic.PDF} & & \includegraphics[width=0.3\linewidth]{param_KA.PDF} \\ d) & & e) & & f) & \\[0cm] & \includegraphics[width=0.3\linewidth]{param_distance_HS.PDF} & & \includegraphics[width=0.3\linewidth]{param_distance_harmonic.PDF} & & \includegraphics[width=0.3\linewidth]{param_distance_KA.PDF} \end{tabular} \caption[width=1\linewidth]{\textbf{a,b,c)} Pearson correlation coefficient between the dynamic propensity and the prediction of a linear regression model trained on the structural parameters evaluated for the initial-, the inherent- and the cage-state coordinates (i.e. $\mathbf{X}^\text{init}$, $\mathbf{X}^\text{IS}$ ,$\mathbf{X}^\text{CS}$), for the $A$-particles in hard spheres (a), harmonic spheres (b), and Kob-Andersen (c). \textbf{d,e,f)} Correlation between the measured propensity and the predicted propensity by a linear regression model trained on either only the structural parameters based on the initial positions, or the set of input parameters given by $\{\mathbf{X}^\text{init},\mathbf{X}^\text{IS/CS}, \Delta r^\text{IS/CS} \}$ .} \label{fig:correlations} \end{figure*} These two approaches give us two results for $\Delta r^\mathrm{cage}$, which we denote $\Delta r^\mathrm{IS(CS)}$ for the inherent (cage) state. To compare how effective these measurements are at capturing the actual cage center, we plot in Fig. \ref{fig:cagestates}abc the Pearson correlation between the dynamic propensity and both definitions of $\Delta r^\mathrm{cage}$, for all three glass model systems. Note that since here we consider only a single parameter at a time, no linear regression is required to obtain a correlation. As expected, both the inherent state and the cage state provide significant information about the expected position of a particle during the caging regime. However, clearly $\Delta r^\mathrm{CS}$ is a better predictor of the dynamic propensity in all three cases, reaching correlations stronger than 0.8. This is significantly higher than the results shown in Fig. \ref{fig:prop}, despite being based only on a single variable. Evidently, the simple averaging method we used to obtain the cage state indeed manages to eliminate the thermal fluctuations that are present in the initial configuration, without losing significant information about the underlying cage structure. The inherent state performs less well, in particular in the case of harmonic spheres. To shed light on the reason behind this, we measure the radial distribution function $g(r)$ of the initial, inherent, and cage state configurations and compare them in Fig. \ref{fig:cagestates}def. In the harmonic and KA models, both the inherent state and cage state increase the degree of local structure in the system, resulting in higher peaks in $g(r)$ -- and this degree of additional ordering is stronger for the inherent state than for the cage state. This implies that the inherent state quench pushes the system significantly further away from its local structure than the system would normally sample. In contrast, the cage state procedure only takes into account configurations that the system samples during (constrained) thermal fluctuations. Hence, it is perhaps not surprising that the cage state better reflects the expected dynamics of our systems. We now examine whether knowledge of the cage structure can help us make more accurate predictions of the dynamic propensity even outside the caging regime. To this end, we start with the structural order parameters and linear regression approach described in Refs. \onlinecite{alkemade2022comparing, boattini2021averaging}. In particular, for each particle, we define a set of approximately 1000 structural parameters describing their local structural environment, and use standard ridge regression in order to fit the dynamic propensity as a function of these structural parameters. A full description of these structural order parameters for each model is provided in the SI. Note that this is the same approach as we used for the data in Fig. \ref{fig:prop}. As a basis for calculating the local structural descriptors, we now use either the initial, inherent, or cage state, resulting in three sets of structural descriptors for each particle: $\mathbf{X}^\mathrm{init}$, $\mathbf{X}^\mathrm{IS}$, and $\mathbf{X}^\mathrm{CS}$. We then examine how these different sets of input data influence our ability to predict dynamic propensity. The results are shown in Fig. \ref{fig:correlations}abc for the three model systems. In all cases, at short times the initial state of the system provides the best input for predicting propensities. This is understandable, since only the initial state contains information about the exact particle environments in the limit $t\to 0$. At longer time scales, both the inherent and cage state structures outperform the initial state, with the cage state always outperforming the inherent state. This further supports the observation that the cage state, as determined by our MC approach, represents an excellent approximation for the underlying structure of our system when it comes to understanding its dynamics on time scales around the structural relaxation time. The obvious next step is to combine this information with knowledge of $\Delta r^\mathrm{cage}$, which we already know provides a strong prediction of dynamics in the caging regime. In Fig. \ref{fig:correlations}def, we plot the correlation between the dynamic propensity and predictions based on linear regression, combining as input $\mathbf{X}^\mathrm{init}$, $\mathbf{X}^\mathrm{cage}$, and $\Delta r^\mathrm{cage}$, for both cage definitions. Overall, we observe a massive improvement in our ability to predict propensity at all time scales beyond the ballistic regime. \begin{figure} \vspace{0pt} \raggedright a)\\ \includegraphics[width=\linewidth]{KA_resulst_otherpapers.pdf} b)\\ \includegraphics[width=\linewidth]{compare.pdf} \caption{a) Pearson correlation coefficient between the dynamic propensity and the prediction for \textit{A} particles in a KA system at the statepoint used in this paper, made by different models: linear regression (LR) by Boattini \textit{et al.}\cite{boattini2021averaging}, multi-layer perceptrop (MLP) by Jung \textit{et al.} \cite{jung2022predicting}, graph neural network (GNN) by Bapst \textit{et al.}\cite{bapst2020unveiling}, BOnd TArgetting Network (BOTAN) GNN by Shiba \textit{et al.} \cite{shiba2022unraveling} and SE(3)-equivariant GNN by Pezzicoli \textit{et al.}\cite{pezzicoli2022se}. b) Average dynamic propensity per snapshot in the dataset of Ref. \onlinecite{bapst2020unveiling}, which was also used in Refs. \onlinecite{boattini2021averaging,pezzicoli2022se,jung2022predicting}. For each initial snapshot in the training set of Ref. \onlinecite{bapst2020unveiling}, we plot the mean propensity in that snapshot at the times reported for that snapshot, and color the point based on the system density. For comparison, we also plot in black the mean propensity taken from our own dataset, where the times and density are the same for each initial configuration. The error bars indicate $\pm 2$ times the standard deviation of the mean propensity at each time. \label{fig:allKAresults} } \end{figure} The predictive power of our models using information about the cage state is particularly impressive compared to past results. Specifically, the state point of the KA mixture we study here has been used in a variety of previous studies where new methodologies were introduced to attempt accurate prediction of the dynamic propensity. In Fig. \ref{fig:allKAresults}, we plot our results from Fig. \ref{fig:correlations}f and compare them to the results of Refs. \onlinecite{bapst2020unveiling, boattini2021averaging, shiba2022unraveling, jung2022predicting, pezzicoli2022se}. Note that the comparison between different predictions is complicated by significant differences in the datasets we used here and the dataset from the work of Bapst \textit{et al.} \cite{bapst2020unveiling}, which was also used by Boattini, Jung, and Pezzicolo and their respective co-workers\cite{boattini2021averaging,jung2022predicting,pezzicoli2022se}. In particular, for our dataset, we determine dynamics propensities for a set of initial snapshots that are all taken at the same density, and we always measure the dynamic propensity at a fixed set of times. In contrast, the authors of Ref. \onlinecite{bapst2020unveiling} took the initial snapshots from constant-pressure simulations, and hence the configurations vary slightly in density. Additionally, for each initial configuration, they measured the dynamic propensity at different time intervals, based on the decay of the intermediate scattering function for that specific snapshot. In Fig. \ref{fig:allKAresults}b, we illustrate this difference by plotting for each of the initial configurations in the training set of Ref. \onlinecite{bapst2020unveiling} the average propensity as a function of time, and color each point based on the corresponding system density. Hence, in our comparison in Fig. \ref{fig:allKAresults}a, the time for each point based on this dataset is an average time. Clearly, there is a significant correlation between the density of the snapshot and the mean dynamic propensity, and a significant spread in acquisition times. Additionally, the spread in mean propensities is significantly lower than in our own data (shown in black in Fig. \ref{fig:allKAresults}b), due to the grouping of initial configurations based on their structural relaxation instead of time. It should be noted that this grouping is not fully consistent with Eq. \ref{eq:propensity}, and implies that some dynamical information is already included in the input data. Evidently, including information about the cage state -- captured in our parameters $\mathbf{X}^{CS}$ and $\Delta r^{CS}$ -- allows our linear regression approach to outperform the current state-of-the-art machine learning methods for predicting dynamic propensity over a wide range of time scales. The GNN-based methods do outperform our predictions in the very short-time regime, where the dynamics are likely dominated by the forces that act on the particles in the initial configuration. This may be an indication that these instantaneous forces cannot be directly recovered from our set of input parameters. In contrast, the three GNN-based approaches plotted (in blue) in Fig. \ref{fig:allKAresults} all have information about the relative positions of particles with respect to their neighbors included in the input graph, and hence might be able to learn the net forces on the particles with high accuracy. At very long time scales, where the motion of particles becomes more diffusive, the results of the different machine learning approaches appear to converge to similar performance -- with the physics-informed approach by Jung \textit{et al.} \cite{jung2022predicting} performing the strongest in this regime. To test whether the improved performance of the GNNs at short time scales is indeed due to them directly learning the initial forces, we adapted our method to also include the net force on each particle in the initial configuration. In Fig. \ref{fig:difKAinflunnces} we show the correlation between the dynamic propensity and the initial force in blue, and indeed we see a strong peak at short times. Adding this information into our input data for the full linear regression model, we indeed see a significant improvement at short times (black line). However, the GNN-based methods still achieve a significantly higher accuracy in this regime, and hence appear to be capable of learning more about the short time dynamics than simply the instantaneous forces. In Fig. \ref{fig:difKAinflunnces} we also break down the our prediction of the dynamic propensity into the different relevant aspects of the input data. Overall, we see that the dynamic propensity at short times is indeed dominated by the forces, at intermediate times by the initial distance to the cage center, and at long times by the structural features of the cage state. While this is in principle not surprising, it is impressive to see that the vast majority of the variation in propensity at short and intermediate time intervals can be explained by just two simple measurements. Moreover, the predictive ability of the cage state structure at long times is impressive as well, and demonstrates that it using the cage state, rather than the initial state to make long-time predictions is an excellent strategy. Note that outside of short times, adding in information on the initial state on top of the cage state is fully irrelevant. \begin{figure} \vspace{0pt} \includegraphics[width=\linewidth]{diffinfluences_KA.pdf} \caption{Pearson correlation coefficient between measured and predicted propensity over time, shown for linear regression models trained on different subsets of structural parameters for \textit{A} particles in a KA system. $F^{\text{net},0}$ is the absolute net force that a particle feels at $t=0$, $\Delta r^\text{CS}$ is the absolute difference between the positions of particles in the initial configuration and the cage state and $\mathbf{X}^\text{CS}$ are the structural parameters evaluated for the cage state. Note that the green line (``All'') is the linear regression model trained on the combination of $\{F^{\text{net},0},\Delta r^\text{CS}, \mathbf{X}^\text{CS}\}$, while the black line (``All $+\mathbf{X}^\text{init}$'') also includes the structural parameters of the initial configuration in the structural dataset. \label{fig:difKAinflunnces} } \end{figure} \section{Conclusions} In short, we have demonstrated that the behavior of the dynamic propensity of glassy fluids can be predicted to high accuracy by using information about the cage state of the initial configuration. This cage state is defined as the set of coordinates describing the average position of the particles when the system is constrained to ensure particles cannot escape their local cages. Combining this information with a simple linear-regression-based algorithm, we can predict dynamic propensities with accuracies that rival or exceed current state-of-the-art machine learning methods at nearly all times. This suggests that the cage state could be a helpful tool for further studying the underlying structure of glassy fluids. \section*{References} \section*{Acknowledgements} The authors would like to thank Marjolein de Jager for many discussion. L.F. acknowledges funding from NWO for a Vidi grant (Grant No. VI.VIDI.192.102) \section*{Data Availability Statement} Raw data associated with the measurements of the dynamical propensity, as well as all figure data, will be published at the following url [???] prior to publication of the paper.
1,108,101,565,894
arxiv
\section{Introduction} A blockchain is an append-only and immutable distributed database or ledger that records a time-sequenced history of transactions. One key aspect of the blockchain protocol is the consensus algorithm which enables agreement among a network of \emph{nodes} or \emph{processors} on the current state of the ledger, assuming that some of them could be malicious or faulty. Blockchain protocols are classified as permissioned or permissionless depending on whether a trusted infrastructure to establish verifiable identities for nodes is present or not. One of the first, and still popular, permissionless blockchain protocol is Bitcoin~\cite{nakamoto2008bitcoin}. Consensus in Bitcoin is achieved by selecting a leader node in an unbiased fashion once every 10 minutes (an epoch) on an average, who then gets the right to append a new block onto the blockchain. The network then implicitly accept this block by building on top of it in the following epoch or reject it by building on top of some other block. Bitcoin uses a Proof-of-Work (PoW) mechanism to select the leader in each epoch. In Bitcoin's PoW, each node independently attempts to solve a hash puzzle and the one that succeeds is selected as a leader who gets the right to propose the next block. As PoW involves significant computation, Bitcoin's protocol includes a reward mechanism for the winning node in order to incentivize everyone to behave honestly. Ever since the introduction of Bitcoin in 2008, the power of permissionless blockchain technology has been harnessed to create systems that can host and execute distributed contracts (also referred to as ``smart contracts") which have found many interesting applications, including, cryptocurrencies, secure data sharing and digital copyright management to name a few~\cite{kiayias2016blockchain}. A significant shortcoming of Bitcoin's (and of other similar permissionless system's) leader and PoW competition based consensus protocol is its low transaction throughput and poor network scalability. For instance, Bitcoin's and Ethereum's transaction rates are only 7 and 20 transactions per second, respectively, which is clearly not sufficient for practical applications \cite{bitcoinscalability}. Although there have been several efforts towards improving the Bitcoin protocol itself, for instance, BIP \cite{bip102} and Bitcoin-NG \cite{eyal2016bitcoin}, other works have focused towards developing alternate high throughput and scalable permissionless blockchain protocols. One key outcome in this line of research is committee or shard-based protocols ~\cite{luu2016secure,kokoris2018omniledger,secure2018zilliqa} that operate by periodically partitioning the network of nodes into smaller non-overlapping committees, each of which processes a disjoint set of transactions (also called, a shard) in parallel with other committees. As each committee is reasonably small, the transaction throughput and scalability can be significantly improved by running a classical Byzantine consensus protocol such as PBFT \cite{castro1999practical} within each committee (and in parallel across all committees) rather than the traditional PoW based competition as used in Bitcoin. The idea of parallelizing the tasks of transaction processing and consensus by partitioning the processor network into committees is very promising and is already seeing deployment \cite{ethereumupgrade}. \begin{figure*}[t] \centering \includegraphics[scale= 0.29]{SySModelv3.pdf} \caption{{\small A shard-based blockchain system model. \emph{Step 1:} All nodes generate their IDs using a PoW defined mechanism. \emph{Step 2:} IDs and transactions will be distributed among shards, given the generated IDs. There exist $2^{s}$ shards. Each shard accepts up to $c$ IDs. \emph{Step 3:} Each shard runs a PBFT mechanism to validate the assigned transaction sets. At least $\tau$ IDs have to agree on the output of each shard. \emph{Step 4:} Final committee merges the agreed values of shards and create a final block, that will be added to blockchain. In this example, the attacker breaks the consensus protocol in Shard $1$ (i.e., BCP attack) and inserts fake transactions in the output of Shard $k$ (i.e., GFT attack). }} \label{fig:SysModel} \end{figure*} Committee participation in popular shard-based protocols such as Elastico~\cite{luu2016secure} happen by nodes generating verifiable IDs using some pre-defined PoW puzzle at the beginning of each epoch - only nodes that are able to generate valid IDs are able to participate in the consensus process in that epoch. However, it should be easy to see that the ID generation process easily lands itself to a Sybil attack. An adversary (valid node in the network) can leverage its computational or hash-power to generate a large number of Sybil IDs and increase its participation in shards. The adversary, by means of the generated Sybil IDs, can compromise the intra-committee consensus protocol to either prevent new transaction blocks from being added or to add fake transactions to the blockchain. Existing shard-based protocols assume that all nodes have the same hash-power, and thus the PoW based ID generation mechanism with an appropriate difficulty is not prone to such Sybil attacks. This assumption about the uniformity of hash-power across nodes generally does not hold, making such Sybil attacks feasible. Despite this, there has been very little effort within the research community to analytically study and combat the threat of Sybil attacks in shard-based blockchains. In this paper, we attempt to address this research gap. Specifically, we investigate two different types of Sybil attacks in a representative shard-based blockchain system such as Elastico: (i) Break Consensus Protocol (or BCP) attack where the goal of the adversary is to thwart the consensus process and (ii) Generate Fake Transaction (or GFT) attack where the goal of the adversary is to introduce fake or invalid transactions into the blockchain. By assuming a reasonable network and adversary model, we first derive bounds on the success probability of these attacks and then theoretically analyze the necessary conditions to achieve success in these attacks. We further validate our analytical results by means of numerical simulations for a variety of system and network parameters. These analytical results and simulations shed further light on the computational or hash-power requirement for an adversary to compromise the consensus protocol of shard-based protocols and the choice of system and network parameters that can significantly reduce the probability of such attacks. The remainder of this paper is structured as follows. In Section~\ref{sec:SYSModel}, we introduce our system model for a Sybil attack to shard-based blockchain. In section~\ref{sec:analytical}, we present our analytical results for the probability of a successful Sybil attack. Section~\ref{sec:numerical} presents the simulation results, following by related work and conclusions in Section~\ref{sec:ralated work} and Section~\ref{sec:conclusion}. \section{System Model} \label{sec:SYSModel} In this section, we first briefly outline the operational details of a shard-based permissionless blockchain system such as Elastico and then discuss the details of Sybil attacks that may be possible in such a network. \subsection{Shard-Based Blockchain Models} The operation of Elastico, and most other shard-based permissionless blockchain networks, can be fully described by means of four sequential steps that needs to be executed in each time period or epoch, as outlined in Figure~\ref{fig:SysModel}. We assume that the blockchain network comprises of $N$ nodes (or processors) with different computational capabilities or hash-power. This is in contrast with \cite{kokoris2018omniledger} and \cite{luu2016secure}, where it is assumed that all nodes have the same power. The hash-power, which specifies the number of hash computations that can be performed by a node per second, is denoted by $h$. To elucidate the presentation, Table~\ref{vartable} summarizes the symbols (and their meanings) used throughout the paper. \begin{table}[t] \caption{List of Notations} \vspace{-6pt} \begin{center} \rowcolors{2}{gray!20}{white} \begin{tabular}{l |p{6.5cm}} \hline \textbf{Symbol} &\textbf{Definition} \\ \hline \hline $N$ & Number of nodes in the network \\ $N^{*}$ & Total required IDs at each epoch ($N^{*} <N$) \\ $M$ & Number of IDs generated by the adversary \\ $t_i$ & The time of epoch $i$\\ $L$ & The length of output of secure hash function (bit)\\ $L^{t_i}$ & The length of target value (bit) at epoch $t_i$\\ $c$ & Capacity of each shard \\ $h$ & Hash-power of each processor\\ $s$ & Represent the number of shards ($2^{s}$) \\ $T_I$ & Initialization time needed for ID generations \\ $\tau$ & Consensus threshold \\ $x$ & Number of chosen adversary's IDs (Random Variable) \\ $Dif(t_i)$ & Difficulty of solving PoW puzzle at epoch $t_i$ \\ $p(t_i)$ & The probability of finding a correct ID at epoch $t_i$\\ $P\{x=n\}$ & The probability of selecting $n$ adversary's ID\\ $P_{c-\tau +1}$ & The probability of having at least $c-\tau +1$ adversary's IDs in one shard, if $n \in (c-\tau ,c]$ \\ $P'_{c-\tau +1}$ & The probability of having at least $c-\tau +1$ adversary's IDs in one shard, if $n \in (c,2^{s}(c-\tau)]$ \\ $P''_{c-\tau +1}$ & The probability of having at least $c-\tau +1$ adversary's IDs in one shard, if $n > 2^{s}(c-\tau)$ \\ $P_{\tau}$ &The probability of having at least $\tau$ adversary's IDs in one shard, if $n \in [\tau ,c]$ \\ $P'_{\tau}$ & The probability of having at least $\tau$ adversary's IDs in one shard, if $n \in (c,2^{s}(\tau -1)]$ \\ $P''_\tau$ & The probability of having at least $\tau$ adversary's IDs in one shard, if $n > 2^{s}(\tau -1)$ \\ $P_B$ & The probability of successful BCP attack\\ $P_G$ & The probability of successful GFT attack\\ \hline \end{tabular} \end{center} \label{vartable} \vspace{-6pt} \end{table} Similar to any permissionless system, nodes in Elastico do not have any predefined identity assigned by a trusted third-party. In the first step of Elastico, as shown in Figure\ref{fig:SysModel}, each node attempts to generate a verifiable and pseudorandom ID which will enable it to participate in the rest of the steps in that time period or epoch. The nodes use the solution of a Proof-of-Work (PoW) puzzle with a network-determined difficulty to decide if they have arrived at a valid ID, as described next. Let $Hash()$ be the hash function (Elastico employs $SHA-256$) employed by a node in the blockchain network and let $IP$ and $PK$ denote a node's network address and public key, respectively. A publicly-known pseudo-random string \emph{epochRandomness}, generated at the end of the previous epoch of the protocol (to avoid puzzle pre-computation), is used as a seed for the PoW puzzle. Each node in the first step of the protocol attempts to solve the PoW puzzle by finding a nonce such that $Hash(epochRandomness||IP||PK||nonce)$ is smaller than some network-determined \emph{target} value. The target value which determines the difficulty of the PoW puzzle is adapted by the network during each epoch based on the network-wide hash-power. Let's denote the target by $L^{t_i}$ bits, where $L$ is the size of the message digest (in terms of bits) and $t_i$ is corresponding the time epoch. In other words, a valid ID value during epoch $t_i$ must be smaller than $2^{L^{t_i}}$ and a node successful in generating such a valid ID assumes it as its own ID during the later steps of the the protocol. It should be noted that all nodes must generate their ID during a given \emph{initialization time} $T_I$, defined by the protocol. If they cannot solve the puzzle within this time, they will not possess a valid ID to join the rest of the network and participate in the protocol. It should be clear that nodes with a higher hash-power have a higher probability of solving the ID generation PoW puzzle and thus participating in the protocol, compared to nodes with lower hash-power. After ID generation, in step 2 the generated node IDs and distributed transactions (which may contain both valid and fake/invalid transactions) are randomly distributed (or partitioned) into different \emph{shards} or committees for validation. In Elastico, there exist a total of $2^{s}$ shards, where $s$ is a network-defined parameter. Each node will be placed in a shard according to the last $s$ bits of its ID. If the \emph{capacity} of each shard is denoted by $c$, then it is clear that the minimum number of valid IDs required to execute a single epoch (all steps) of a shard-based blockchain protocol such as Elastico is $N^* = 2^{s}\times c$. The processors discover IDs of other processors in their shard by communicating with each other. In the third step, processors of each shard simultaneously validate the transaction set assigned to that shard and agree on a consensus transaction set (within that shard) using the PBFT \cite{castro1999practical} and \cite{buchman2016tendermint} algorithm. Let $\tau$ denote the consensus threshold for each shard, i.e., if at least $\tau$ processors in a shard agree on a transaction set, then consensus within the shard is successful and the consensus transaction set is added to the shard's output. In other words, the consensus protocol within a shard successfully outputs a valid consensus transaction set even if $c - \tau$ nodes within the shard do not cooperate and/or behave maliciously. In Figure~\ref{fig:SysModel}, we see this case in Shards $2$ and $n-1$. Post the intra-shard consensus, the leader node within each shard sends the signed value of the consensus transaction set, along with the signatures of the contributing shard nodes, to a final consensus committee (step 4). The final committee of processors, chosen based on their ID, merges the consensus transaction sets from each shard to create a final block which is eventually appended to the blockchain. Each final committee member first validates that the values (consensus transaction sets) received from each shard is signed by at least $c/2 + 1$ members of the shard, and then computes the ordered set union of all inputs. Finally, nodes of final committee execute PBFT to determine the consensus final block, which is signed and gets appended as the next block to the blockchain. \subsection{Threat Model } \label{subsec:threatmodel} We assume that the adversary or attacker ($\mathcal{A}$) in this setup has enough hash-power to generate more than one IDs (during step 1, as outlined above) and attempts to launch a \emph{Sybil attack} to disrupt the operation of the shard-based blockchain protocol. Although we formally model adversarial capabilities assuming a shard-based blockchain protocol such as Elastico, the model (and the results) can easily be generalized to other shard-based protocols. Before outlining the specific Sybil attacks that could be carried out by the adversary, let us first characterize the difficulty of an adversary in generating Sybil nodes. As outlined earlier, each node or processor uses the solution of a PoW puzzle as an ID such that in epoch $t_i$ the selected ID must be smaller than some network-agreed target value $2^{L^{t_i}}$. Let $MaxTarget$ denote the maximum possible value of the target. As $MaxTarget$ is observed in the first epoch, $MaxTarget = 2^{L^{t_1}}$ (e.g., in Bitcoin $MaxTarget = 2^{224}$ \cite{o2014bitcoin}). Given $MaxTarget$, we define the \emph{difficulty} of solving a PoW puzzle in epoch $t_i$ as: \begin{equation}\label{eq:Diff} Dif(t_i)=\frac{MaxTarget}{2^{L^{t_i}}}. \end{equation} It is easy to see that, as the target value $2^{L^{t_i}}$ during a particular time epoch $t_i$ reduces, the PoW puzzle for ID computation becomes harder to solve for the nodes or processors, which is indicated by a higher difficulty value $Dif(t_i)$. Now, the probability of finding a valid ID during epoch $t_i$ is given by: \begin{equation}\label{eq:pti} p(t_i)=\frac{2^{L^{t_i}}}{2^{L}}. \end{equation} where $2^{L}$ denotes the message digest space of the hash function $Hash()$ using the PoW puzzle. Given Equation~(\ref{eq:Diff}), $p(t_i)$ can be rewritten as: \begin{equation}\label{pID1} p(t_i)=\frac{\frac{2^{L^{t_1}}}{Dif(t_i)}}{2^{L}}=\frac{2^{L^{t_1}}}{Dif(t_i)\times 2^L}. \end{equation} Equation~(\ref{pID1}) above represents the formal relationship between the difficulty of finding a valid PoW puzzle solution (i.e., ID in this case) and the success probability of solving the puzzle during epoch $t_i$. Now, let us assume that the adversary $\mathcal{A}$'s hash-power (or hash computation capability) is $h^{\mathcal{A}}$. In other words, during the initialization period $T_I$ (step 1) of an epoch $t_i$, $\mathcal{A}$ can generate a maximum of $h^{\mathcal{A}} \times {T_I}$ potential solutions (or message digests), of which only those that smaller than $2^{L^{t_i}}$ (target value during $t_i$) can be used as valid IDs. Let's further assume that $\mathcal{A}$ can find $M$ valid solutions or IDs (i.e., those that satisfy the puzzle or are within the target value) during $T_I$. Thus, $M$ can be calculated by: \begin{equation}\label{MID} M = p(t_i) \times {h^{\mathcal{A}} \times T_I}. \end{equation} Substituting for $p(t_i)$ from Equation~(\ref{pID1}) we get: \begin{equation}\label{MID2} M = \frac{2^{L^{t_1}}}{Dif(t_i)\times 2^L} \times {h^{\mathcal{A}} \times T_I}. \end{equation} Now, a PoW puzzle based identity or ID generation mechanism is said to be \emph{strictly Sybil-resistant} if and only if the value of $M$ in the mechanism can be restricted to less than 2. In other words, for the above PoW puzzle based identity or ID generation mechanism in Elastico to be strictly Sybil-resistant, the solution difficulty $Dif(.)$ and initialization time $T_I$ should be such that for a given adversary hash-power $h^{\mathcal{A}}$, $L$, and $L^{t_1}$, $M$ is always smaller than 2, i.e., $\mathcal{A}$ should be able to generate at most one ID during $T_I$. In this paper, we assume that the shard-based blockchain protocol's PoW-based identity generation mechanism is not strictly Sybil-resistant and that the adversary's hash-power is large enough to generate two or more than IDs (i.e., $M \geq 2$) during the initialization phase (step 1) of any epoch. The adversary then employs these numerous valid IDs for placing itself in multiple shards in order to carry out different types Sybil attacks (as described below), the goal of which is to subvert the correct operation of the blockchain protocol. In contrast to the adversary, we assume that each honest node in the network generates only a single valid ID during $T_I$ in each epoch. Now, lets describe in further details the two different types of Sybil attacks that can be carried out by the adversary. \noindent {\bf 1. Break Consensus Protocol (BCP) Attack:} In order to accomplish the BCP attack, the goal of which is to disrupt the shard-based consensus process, the adversary will need to generate more than $c-\tau$ valid IDs in a (target) shard. This threshold of valid IDs will enable the adversary to break the intra-shard consensus protocol in that shard, thereby preventing insertion of some transactions (specifically, from the target shard) into the blockchain. An instance of the BCP attack is depicted in Figure~\ref{fig:SysModel}, where the attacker successfully inserts more than $c-\tau$ Sybil IDs in Shard 1. \noindent {\bf 2. Generate Fake Transaction (GFT) Attack:} In order to accomplish the GFT attack, the goal of which is to include fake (including, double spending or invalid) transactions in the blockchain blocks by taking over the consensus process, the adversary will need to add at least $\tau$ valid IDs in a (target) shard. By doing so, the adversary aims to control and manipulate the consensus process (i.e., the PBFT algorithm) using his Sybil IDs so that intra-shard consensus could be arrived on a desired set of fake transactions, which eventually get inserted into the blockchain block after final consensus. Figure~\ref{fig:SysModel} illustrates the GFT attack on shard $k$. Our overarching goal is to determine bounds on the success probabilities, given different network and protocol parameters, of carrying out the BCP and GFT Sybil attacks described above. In the following section, we present theoretical analysis outlining the computation of these probability bounds. \section{Analytical results} \label{sec:analytical} The remaining operations (step 2 onward) of the shard-based blockchain protocol are initiated in each epoch only after receiving $N^*$ IDs from the ID pool generated during the initialization or identifier generation phase (step 1), as discussed earlier in Section \ref{sec:SYSModel}. Now, given the $M$ Sybil IDs (generated by the adversary) in the ID pool comprising of a total of $M+N-1$ IDs, our first task is analyze this ID selection. Let $x$ denote a random variable representing the number of Sybil IDs (generated by the adversary) chosen from the $M+N-1$ IDs generated during the initialization step (step 1). In other words, $x$ IDs belonging to the adversary while $N^*-x$ IDs belonging to the honest nodes are distributed among the various shards after the initialization period $T_I$ (step 1). The success probability of the various attacks described above is thus a function of the number of Sybil IDs that the adversary is able to generate and get distributed among the different shards in each epoch. From the discussion in Section~\ref{subsec:threatmodel}, it should be clear that if the number of Sybil IDs generated by an adversary is smaller than or equal to $c-\tau$, then the adversary cannot successfully execute the BCP or GFT attacks. Let $n$ denote the actual number of Sybil IDs chosen to be distributed among the various shards, i.e., $n \in \{ 1,2, \cdots, M \}$. Thus, we first need to calculate the probability $P\{x=n\}$ of selecting/choosing $n$ Sybil IDs or nodes from the entire pool of $M+N-1$ IDs in an epoch. This is given by the following Lemma. \begin{lemma} If $\mathcal{A}$ can generate $M$ Sybil IDs during $T_I$, then the probability of selecting $n$ Sybil IDs from the ID pool is \begin{equation}\nonumber P\{x=n\}=\frac{{\binom Mn} {N-1 \choose N^*- n}}{{M+N-1 \choose N^*}}. \end{equation} \label{pr} \end{lemma} \begin{proof} The proof of this lemma follows trivially from the fact that $x$ follows a hypergeometric distribution \cite{ross2014first}. \end{proof} The following subsections will be devoted to the computation of the adversary's success probability in executing BCP and GFT Sybil attacks. Recall that Table~\ref{vartable} summarizes the definition of main probability bounds. \subsection{Probability of Successful BCP Attack Recall that for successfully executing the BCP attack, $\mathcal{A}$ must have more than $c-\tau$ (Sybil) IDs in at least one shard. Hence, if $M \leq c - \tau$, $\mathcal{A}$ cannot launch BCP attacks, i.e., the probability of a successful BCP attack would be zero. Thus, to calculate the successful probability of a BCP attack, we first need to calculate the probability of having at least $c-\tau +1$ Sybil IDs in one shard, when $x=n$ Sybil IDs (generated by the adversary $\mathcal{A}$) have been chosen from the overall ID pool (at the end of the initialization step). The following lemma captures this probability. \begin{lemma} In a shard-based blockchain protocol, when $n$ Sybil IDs (generated by the adversary $\mathcal{A}$) have been chosen from the ID pool after the initialization step, the probability of having at least $c-\tau +1$ Sybil IDs in one shard is: \begin{equation}\nonumber P_{c-\tau +1}=\frac{2^{s}\sum\limits_{m=c-\tau +1}^{n}{n \choose m}{{N^*}-n \choose c-m}}{\binom {N^*}c}. \end{equation} where $c-\tau +1 \leq n$. \label{P(c-tau+1)} \end{lemma} \begin{proof} Given the number of shards (i.e., $2^s$), the capacity of each shard (i.e., $c$), and the total number of selected IDs (i.e., $N^*$) the sample space and the space of the desirable event (i.e., having at least $c-\tau +1$ Sybil IDs in one shard) will be \begin{equation}\label{total} \fontsize{8}{5} n(S)={{N^*} \choose c}{{N^*}-c \choose c}...{c \choose c}=\frac{{N^*}!}{c! c!...c!}=\frac{{N^*}!}{{c!}^{2^{s}}}. \end{equation} \begin{equation} \fontsize{8}{5} \begin{aligned} n(E)=& 2^{s}{n \choose c-\tau +1}{{N^*}-n \choose \tau -1}{{N^*}-c \choose c}..{c \choose c}\\&+2^{s}{n \choose c-\tau +2}{{N^*}-n \choose \tau -2}{T-c \choose c}..{c \choose c}\\&+...+2^{s}{n \choose n}{{N^*}-n \choose c-n}{{N^*}-c \choose c}..{c \choose c}. \end{aligned} \label{n(E1)} \end{equation} The probability $P_{c-\tau +1}$ can then be computed by \begin{equation}\nonumber \fontsize{8}{5} \begin{aligned} P_{c-\tau +1}=\frac{n(E)}{n(S)}=\frac{2^{s}\sum\limits_{m=c-\tau +1}^{n}{n \choose m}{{N^*}-n \choose c-m}}{{{N^*}\choose c}}. \end{aligned} \end{equation} \end{proof} \vspace{-10pt} Please note that the above closed-form solution is correct if $n \leq c$. But for the values of $n$ bigger than $c$ we cannot employ the same calculation and deriving a closed-form solution was not possible. In the following analysis we employ numerical analysis to obtain these values, and we leave this derivation for our future work. The following theorem calculates the successful probability of BCP attack, when the number of Sybil IDs is smaller than or equal to $c$. \begin{theorem}\label{thm:BCP1} In a shard-based blockchain protocol, when the number of Sybil IDs (generated by the adversary $\mathcal{A}$) is smaller than or equal to $c$ and greater than $c-\tau$, the probability of a successful BCP attack in at least one shard is: \begin{equation} P_{B}=\frac{2^{s}\sum\limits_{n=c- \tau +1}^{M}\sum\limits_{m=c- \tau +1}^{n}{M \choose n}{N-1 \choose {N^*}-n}{n \choose m}{{N^*}-n \choose c-m}}{{M+N-1 \choose {N^*}}{{N^*} \choose c}}. \end{equation} \end{theorem} \begin{proof} The probability of a successful BCP attack in at least one shard can be computed as: \begin{equation} \fontsize{8}{5} \begin{aligned} P_{B}&=P\{x=c-\tau +1\}P_{c-\tau +1} + P\{x=c-\tau +2\}P_{c-\tau +1}\\&+...+P\{x=M\}P_{c-\tau +1} =(\sum\limits_{n=c-\tau +1}^{M}P\{x=n\})P_{c-\tau +1}. \end{aligned} \label{thm:BCP1-1} \end{equation} Given Lemma \ref{pr} and Lemma \ref{P(c-tau+1)}, we can rewrite $P_B$ by: \begin{equation}\nonumber \fontsize{8}{5} \begin{aligned} &P_{B}=(\frac{\sum\limits_{n=c-\tau +1}^{M}{M \choose n}{N-1 \choose {N^*}- n}}{{M+N-1 \choose {N^*}}})(\dfrac{2^{s}\sum\limits_{m=c-\tau +1}^{n}{n \choose m}{{N^*}-n \choose c-m}}{{{N^*}\choose c}})\\ &=\frac{2^{s}\sum\limits_{n=c-\tau +1}^{M}\sum\limits_{m=c-\tau +1}^{n}{M \choose n}{N-1 \choose {N^*}- n}{n \choose m}{{N^*}-n \choose c-m}}{{M+N-1 \choose {N^*}}{{N^*}\choose c}}. \end{aligned} \end{equation} \end{proof} Having computed this probability, we now attempt to determine the impact of different shard-based blockchain system design parameters on the robustness against such Sybil attacks. In the following two theorems, we present similar success probability estimations for higher values of $M$. \begin{theorem}\label{thm:BCP2} In a shard-based blockchain protocol, when the number of Sybil IDs (generated by the adversary $\mathcal{A}$) is smaller than or equal to $2^s(c-\tau)$ and greater than $c$, the probability of a successful BCP attack in at least one shard is: \begin{equation} \fontsize{8}{5} \begin{aligned} P_{B} &=\frac{2^{s}\sum\limits_{n=c-\tau +1}^{c}\sum\limits_{m=c-\tau +1}^{n}{M \choose n}{N-1 \choose {N^*}- n}{n \choose m}{{N^*}-n \choose c-m}}{{M+N-1 \choose {N^*}}{{N^*}\choose c}} \\ &+\frac{\sum\limits_{n=c+1}^{M}{M\choose n}{N-1\choose {N^*}-n}}{{M+N-1\choose {N^*}}}P'_{c-\tau +1}. \end{aligned} \end{equation} \end{theorem} \begin{proof} Similar to the proof of Theorem~\ref{thm:BCP1}, this probability can be calculated by: \begin{equation} \fontsize{8}{5} \nonumber \begin{aligned} P_{B}&=(\sum\limits_{n=c-\tau +1}^{c}P\{x=n\})P_{c-\tau +1}+(\sum\limits_{n=c+1}^{M}P\{x=n\})P'_{c-\tau +1}. \end{aligned} \end{equation} And we can replace $P_{c-\tau +1}$ to find the probability of successful attack by \begin{equation} \fontsize{8}{5} \nonumber \begin{aligned} P_{B}=&\frac{2^{s}\sum\limits_{n=c-\tau +1}^{c}\sum\limits_{m=c-\tau +1}^{n}{M \choose n}{N-1 \choose {N^*}- n}{n \choose m}{{N^*}-n \choose c-m}}{{M+N-1 \choose {N^*}}{{N^*}\choose c}}\\&+\frac{\sum\limits_{n=c+1}^{M}{M\choose n}{N-1\choose {N^*}-n}}{{M+N-1\choose {N^*}}}P'_{c-\tau +1}. \end{aligned} \end{equation} \end{proof} \begin{theorem}\label{thm:BCP3} In a shard-based blockchain protocol, when the number of Sybil IDs (generated by the adversary $\mathcal{A}$) is greater than $2^s(c-\tau)$, the probability of a successful BCP attack in at least one shard is: \begin{equation} \begin{aligned} &P_{B} =\frac{2^{s}\sum\limits_{n=c-\tau +1}^{c}\sum\limits_{m=c-\tau +1}^{n}{M \choose n}{N-1 \choose {N^*}- n}{n \choose m}{{N^*}-n \choose c-m}}{{M+N-1 \choose {N^*}}{{N^*}\choose c}}\\ &+\frac{\sum\limits_{n=c+1}^{2^{s}(c-\tau)}{M\choose n}{N-1\choose {N^*}-n}}{{M+N-1\choose {N^*}}}P'_{c-\tau +1}+\frac{\sum\limits_{n=2^{s}(c-\tau)+1}^{min(M,{N^*})}{M\choose n}{N-1\choose {N^*}-n}}{{M+N-1\choose {N^*}}}. \end{aligned} \end{equation} \end{theorem} \begin{proof} Similar to earlier proofs, we can compute this probability by: \begin{equation} \fontsize{8}{5} \begin{aligned} P_B &=(\sum\limits_{n=c-\tau +1}^{c}P\{x=n\})P_{c-\tau +1}+(\sum\limits_{n=c+1}^{2^{s}(c-\tau)}P\{x=n\})P'_{c-\tau +1}\\&+(\sum\limits_{n=2^{S}(c-\tau)+1}^{min(M,{N^*})}P\{x=n\})P''_{c-\tau +1}. \end{aligned} \label{thm:BCP3-1} \end{equation} Since $n > 2^{s}(c-\tau )$, $c-\tau$ Sybil IDs (generated by the adversary) will definitely reside in each shard. In other words, given the total number of Sybil IDs generated by the adversary, at least $c-\tau +1$ of these IDs will be placed in at least one shard. When this happens, $\mathcal{A}$ can compromise the consensus protocol of this shard with probability 1. So $P''_{c-\tau +1}=1$. Moreover based on Lemma \ref{pr} and Lemma \ref{P(c-tau+1)}, Equation \eqref{thm:BCP3-1} can be rewritten as: \begin{equation}\nonumber \fontsize{8}{5} \begin{aligned} &P_{B}=\frac{2^{s}\sum\limits_{n=c-\tau +1}^{c}\sum\limits_{m=c-\tau +1}^{n}{M \choose n}{N-1 \choose {N^*}- n}{n \choose m}{{N^*}-n \choose c-m}}{{M+N-1 \choose {N^*}}{{N^*}\choose c}}\\&+\frac{\sum\limits_{n=c+1}^{2^{s}(c-\tau)}{M\choose n}{N-1\choose {N^*}-n}}{{M+N-1\choose {N^*}}}P'_{c-\tau +1}+\frac{\sum\limits_{n=2^{s}(c-\tau)+1}^{min(M,{N^*})}{M\choose n}{N-1\choose {N^*}-n}}{{M+N-1\choose {N^*}}}. \end{aligned} \end{equation} \end{proof} \vspace{-12pt} In the following, we will similarly analyze the success probability of an adversary in executing the GFT attack. \subsection{Probability of a Successful GFT Attack} In GFT attack, $\mathcal{A}$ would like to change the output of at least one shard, and insert his favorite transactions in the blockchain. The calculation of successful probability of this attack is similar to the previous section (i.e., BCP attack), but the number of required Sybil IDs is different. In this attack, $\mathcal{A}$ must have more than $\tau$ Sybil IDs in at least one shard. Hence, if $M < \tau$, the adversary cannot successfully execute the GFT attacks (i.e., probability of success is zero). In order to estimate the success probability of an adversary in carrying out GFT attacks, we first need to calculate the probability of having at least $\tau$ Sybil IDs in one shard, when a total of $x=n$ Sybil IDs (generated by the adversary) have been chosen from the pool of all generated IDs (during the initialization step). The following lemma calculates this probability. \begin{lemma} In a shard-based blockchain protocol, when $n$ Sybil IDs (generated by the adversary $\mathcal{A}$) have been chosen or selected from the ID pool after the initialization step, the probability of having at least $\tau $ Sybil IDs in one shard is: \begin{equation}\nonumber \fontsize{8}{6} P_\tau =\frac{2^{s}\sum\limits_{m=\tau}^{n}{n \choose m}{{N^*}-n \choose c-m}}{{{N^*}\choose c}}. \end{equation} where $\tau \leq n$. \label{P tau} \end{lemma} \begin{proof} According to the Lemma \ref{P(c-tau+1)}, the sample space is equal to Equation~\eqref{total}. Similarly, by considering all possible combinations of Sybil ID distributions within the shards, we can compute the space of the desirable event (i.e., having at least $\tau $ Sybil IDs in one shard) by: \begin{equation}\nonumber \fontsize{8}{6} \begin{aligned} n(E)=& 2^{s}{n \choose \tau}{{N^*}-n \choose c-\tau}{{N^*}-c \choose c}..{c \choose c}\\&+2^{s}{n \choose \tau +1}{{N^*}-n \choose c-\tau -1}{{N^*}-c \choose c}..{c \choose c}\\&+...+2^{s}{n \choose n}{{N^*}-n \choose c-n}{{N^*}-c \choose c}..{c \choose c} \end{aligned} \label{n(E2)} \end{equation} The probability $P_\tau $ can then be computed by taking the ratio of the event space to the sample space as: \begin{equation} P_\tau =\dfrac{n(E)}{n(S)}=\dfrac{2^{s}\sum\limits_{m=\tau}^{n}{n \choose m}{{N^*}-n \choose c-m}}{{{N^*}\choose c}}. \end{equation} \end{proof} \vspace{-6pt} Similar to BCP attack, the above closed-form solution is correct if $n \leq c$. Finding a closed-form solution for the case where $n > c$ is a part of our future challenges. The following theorem calculates the successful probability of GFT attack, when the number of Sybil IDs is smaller than or equal to $c$. \begin{theorem}\label{thm:GFT1} In a shard-based blockchain protocol, when the number of Sybil IDs (generated by the adversary $\mathcal{A}$) is smaller than or equal to $c$ and greater than $\tau -1$, the probability of a successful GFT attack in at least one shard is: \begin{equation} P_{G}=\frac{2^{s}\sum\limits_{n=\tau}^{M}\sum\limits_{m=\tau }^{n}{M \choose n}{N-1 \choose {N^*}-n}{n \choose m}{{N^*}-n \choose c-m}}{{M+N-1 \choose {N^*}}{{N^*} \choose c}}. \end{equation} \end{theorem} \begin{proof} This probability can be computed similar to the proof of Theorem~\ref{thm:BCP1} as: \begin{equation} \fontsize{8}{5} \begin{aligned} P_{G}&=(\sum\limits_{n=\tau}^{M}P\{x=n\})P_\tau. \end{aligned} \label{thm:GFT1-1} \end{equation} Based on Lemma \ref{pr} and Lemma \ref{P tau}, Equation~\eqref{thm:GFT1-1} can be rewritten as: \begin{equation}\nonumber \fontsize{8}{5} \begin{aligned} P_{G}&=(\dfrac{\sum\limits_{n=\tau}^{M}{M \choose n}{N-1 \choose {N^*}- n}}{{M+N-1 \choose {N^*}}})(\dfrac{2^{s}\sum\limits_{m=\tau}^{n}{n \choose m}{{N^*}-n \choose c-m}}{{{N^*}\choose c}}) \\& =\dfrac{2^{s}\sum\limits_{n=\tau}^{M}\sum\limits_{m=\tau}^{n}{M \choose n}{N-1 \choose {N^*}- n}{n \choose m}{{N^*}-n \choose c-m}}{{M+N-1 \choose {N^*}}{{N^*}\choose c}}. \end{aligned} \end{equation} \end{proof} Having computed this probability, we now attempt to determine the impact of different shard-based blockchain system design parameters on the robustness against such Sybil-based GFT attacks. In the following two theorems, we present similar success probability estimations for higher values of $M$. \begin{theorem}\label{thm:GFT2} In a shard-based blockchain protocol, when the number of Sybil IDs (generated by the adversary $\mathcal{A}$) is smaller than or equal to $2^s(\tau -1)$ and greater than $c$, the probability of a successful GFT attack in at least one shard is: \begin{equation}\label{Eqn_PG} \fontsize{8}{5} \begin{aligned} P_{G} &=\frac{2^{s}\sum\limits_{n=\tau}^{c}\sum\limits_{m=\tau}^{n}{M \choose n}{N-1 \choose {N^*}- n}{n \choose m}{{N^*}-n \choose c-m}}{{M+N-1 \choose {N^*}}{{N^*}\choose c}}+\frac{\sum\limits_{n=c+1}^{M}{M\choose n}{N-1\choose {N^*}-n}}{{M+N-1\choose {N^*}}}P'_\tau . \end{aligned} \end{equation} \end{theorem} \begin{proof} Similar to the proof of Theorem~\ref{thm:GFT1}, this probability can be calculated by: \begin{equation}\nonumber \fontsize{8}{5} \begin{aligned} P_{G}&= (\sum\limits_{n=\tau}^{c}P\{x=n\})P_\tau+(\sum\limits_{n=c+1}^{M}P\{x=n\})P'_\tau . \end{aligned} \label{thm:GFT2-1} \end{equation} It is then easy to show that $P_G$ is derived by Equation~(\ref{Eqn_PG}). \end{proof} \begin{theorem}\label{thm:GFT3} In a shard-based blockchain protocol, when the number of Sybil IDs (generated by the adversary $\mathcal{A}$) is greater than $2^s(\tau -1)$, the probability of a successful GFT attack in at least one shard is: \begin{equation}\label{eqn_GFT3} \fontsize{8}{5} \begin{aligned} P_{G} &=\frac{2^{s}\sum\limits_{n=\tau}^{c}\sum\limits_{m=\tau}^{n}{M \choose n}{N-1 \choose {N^*}- n}{n \choose m}{{N^*}-n \choose c-m}}{{M+N-1 \choose {N^*}}{{N^*}\choose c}}+\frac{\sum\limits_{n=c+1}^{2^{s}(\tau -1)}{M\choose n}{N-1\choose {N^*}-n}}{{M+N-1\choose {N^*}}}P'_\tau \\&+\frac{\sum\limits_{n=2^{s}(\tau -1)+1}^{min(M,{N^*})}{M\choose n}{N-1\choose {N^*}-n}}{{M+N-1\choose {N^*}}}. \end{aligned} \end{equation} \end{theorem} \begin{proof} Similarly, the probability of a successful GFT attack can be computed as: \begin{equation} \fontsize{8}{5} \begin{aligned} P_{G}&=(\sum\limits_{n=\tau}^{c}P\{x=n\})P_\tau+(\sum\limits_{n=c+1}^{2^{s}(\tau -1)}P\{x=n\})P'_\tau\\&+(\sum\limits_{n=2^{s}(\tau-1)+1}^{min(M,{N^*})}P\{x=n\})P''_\tau . \end{aligned} \label{thm:GFT3-1} \end{equation} Since $n > 2^{s}(\tau -1 )$, $\tau -1$ Sybil IDs (generated by the adversary) will definitely reside in each shard. In other words, given the total number of Sybil IDs generated by the adversary, at least $\tau$ of these IDs will be placed in at least one shard. When this happens, $\mathcal{A}$ can compromise the PBFT consensus protocol to change the output produced by this shard with probability 1. So $P''_{\tau}=1$ and we can rewrite Equation~\eqref{thm:GFT3-1} as shown by Equation~(\ref{eqn_GFT3}). \end{proof} In the following section, we will verify our closed-form solutions with the help of numerical simulations. \begin{figure*}[t] \centering \begin{subfigure}[t]{0.3\textwidth} \includegraphics[width=\linewidth]{BCPverification14nodes.png} \caption{} \label{BCP verification 14 nodes} \end{subfigure} \hspace{12pt} \begin{subfigure}[t]{0.3\textwidth} \includegraphics[width=\linewidth]{BCPverification200nodes.png} \caption{} \label{BCP verification 200 nodes} \end{subfigure} \hspace{12pt} \begin{subfigure}[t]{0.3\textwidth} \includegraphics[width=\linewidth]{GFTverification20nodes.png} \caption{} \label{GFT verification 20 nodes} \end{subfigure} \caption{Our model verification. (a) BCP attack simulations for $N=14$ with 4 shards. (b) BCP attack simulation with $N=200$ and 4 shards. (c) GFT attack simulation with $N=20$ and 4 shards.} \label{verification} \end{figure*} \section{Numerical Results and Model Verifications} \label{sec:numerical} We validate the correctness of our analytical results discussed earlier by conducting extensive numerical simulations as outlined next. We implement a Python-based simulator for Elastico~\cite{luu2016secure} and simulate the BCP and GFT Sybil attacks by considering an adversary with different hash-powers. Success probabilities of these BCP and GFT attacks during simulations is computed and compared with our previous determined analytical results. We investigate the effect of various system parameters on the success probability of these attacks, including, number of shards (i.e., $2^s$), capacity of each shard (i.e., $c$), total number of participating nodes in the network (i.e., $N$) and the threshold for consensus (i.e., $\tau$). \begin{figure*}[t!] \centering \begin{subfigure}[t]{0.24\textwidth} \includegraphics[width=\linewidth]{BCPfordiffsc100.png} \caption{ \label{fig_sim_num_shard_1} \end{subfigure} \begin{subfigure}[t]{0.24\textwidth} \includegraphics[width=\linewidth]{BCPfors2diffc.png} \caption{ \label{fig_sim_cap_shard_1} \end{subfigure} \begin{subfigure}[t]{0.24\textwidth} \includegraphics[width=\linewidth]{BCPfors3c100diffN.png} \caption{ \label{fig_sim_num_node_1} \end{subfigure} \begin{subfigure}[t]{0.24\textwidth} \includegraphics[width=\linewidth]{BCPfors2c100difftau.png} \caption{ \label{fig_sim_tau_1} \end{subfigure} \caption{The effect of (a) number of shards, (b) capacity of shard, (c) number of nodes, and (d) $\tau$ on successful BCP attack.} \label{BCP Attack} \end{figure*} \subsection{Validation of Analytical Results} We start by considering a small network of 14 participating nodes (i.e., $N=14$) and 4 shards (i.e., $s=2$), each with a capacity of 3 nodes (i.e., $c=3$) undergoing a BCP attack by assuming different adversarial hash-rates. Figure~\ref{BCP verification 14 nodes}, which represents the probability of a successful BCP attack under this simulated scenario, shows that results from our simulations align very closely to those obtained from our analytical results (i.e., Theorems~\ref{thm:BCP1}, \ref{thm:BCP2} and \ref{thm:BCP3}). Here, the hash-rate (shown on the x-axis) is computed as the ratio of the adversary's hash-power ($\mathcal{A}$) to the average hash power of the entire network. Figure~\ref{BCP verification 200 nodes} shows similar results for a larger network with parameters $N=2 00$, $c=50$, and $s=2$. Even in this case, it can be observed that our analytical results are in line with the simulated behavior of the Elastico network. We similarly verify the validity of our analytical results (Theorems~\ref{thm:GFT1}, \ref{thm:GFT2} and \ref{thm:GFT3}) for the GFT attacks. Figure \ref{GFT verification 20 nodes} shows the success probability of the GFT attacks in an Elastico blockchain network with parameters $N=20$, $c=4$, and $s=2$, and these results confirm that our analytical computation of these probabilities was correct. Next, for each attack scenario, we will employ simulations to demonstrate the impact of different system parameters on the success probability of the attacks. \subsection{BCP Numerical Analysis} Figure~\ref{BCP Attack} shows the success probability of BCP attacks for different hash-powers of the adversary. Figure~\ref{fig_sim_num_shard_1} shows that the BCP attack probability increases when we increase the number of shards. In this experiment, the capacity ($c$) of each shard is set to 100 (based on Elastico) and the value of $\tau $ is set to $\frac{2}{3}$. Our results show that an adversary who has 25\% of the hash-power of network can compromise (and manipulate) the consensus algorithm employed by the shard-based protocol (e.g., PBFT). Figure~\ref{fig_sim_cap_shard_1} shows that the probability of successful BCP attacks decreases when the capacity ($c$) of each shard increases. We also evaluated the effect of the number of active/participating nodes on the probability of successful BCP attacks. We execute the simulations by setting the shard capacity ($c$) to 100, number of shards ($s$) to 4 and the threshold value $\tau$ to $\frac{2}{3}$. Results from these simulations show that the adversary ($\mathcal{A}$) needs to have 33\% of the hash-power of whole network to lunch a successful BCP attack. But accumulating 33\% of the network's hash-power will become more difficult by increasing the number of total nodes. Finally, we investigate the effect of $\tau$ on the success probability of BCP attacks. The results are shown in Figure~\ref{fig_sim_tau_1}. We vary the value of $\tau$ from $0.52$ and $0.75$. Our results show that if the value of $\tau$ changes from $\tau _1$ to $\tau _2$, the adversary needs about $1-(\tau _2 - \tau _1)$ hash-power of the previous hash-power, in order to achieve the same attack success probability for $\tau_1$. For example, if the BCP attack success probability of an adversary with hash-power $HP$ was $P_B$ with $\tau =\frac{2}{3}$, then the adversary would need to accumulate a hash-power of $0.91 \times HP$ in order to achieve the same success probability ($P_B$) with $\tau = 0.75$. \begin{figure*}[!h] \centering \begin{subfigure}[t]{0.24\textwidth} \includegraphics[width=\linewidth]{GFTdiffsc100.png} \caption{ \label{fig_sim_num_shard_2} \end{subfigure} \begin{subfigure}[t]{0.24\textwidth} \includegraphics[width=\linewidth]{GFTs2diffc.png} \caption{ \label{fig_sim_cap_shard_2} \end{subfigure} \begin{subfigure}[t]{0.24\textwidth} \includegraphics[width=\linewidth]{GFTs3c100diffN.png} \caption{ \label{fig_sim_num_node_2} \end{subfigure} \begin{subfigure}[t]{0.24\textwidth} \includegraphics[width=\linewidth]{GFTs2c100difftau.png} \caption{ \label{fig_sim_tau_2} \end{subfigure} \caption{The effect of (a) number of shards, (b) shard capacity, (c) number of nodes, and (d) $\tau$ on successful GFT attack. \label{GFT Attack} \end{figure*} \subsection{GFT Numerical Analysis} Figure~\ref{GFT Attack} shows the success probability of GFT attacks for different values of the adversary's hash-power. As shown in Figure~\ref{fig_sim_num_shard_2}, if the adversary's hash-power is less than 50\% of the network hash-power, it cannot execute a successful GFT attack. We also observe that the GFT attack probability increases when the number of shards increases. Moreover, Figure~\ref{fig_sim_cap_shard_2} shows that shard capacity ($c$) increases, the GFT attack success probability decreases. It should be noted that this trend is observable only when the adversary's hash-power is less than 65\% of the total network hash-power. If the adversary has more than 65\% of the total network hash-power, increasing the capacity ($c$) has no significant impact on $P_G$ Figure~\ref{fig_sim_num_node_2} shows the effect of the total number of participating nodes in the network on the success of the GFT attack. Our simulation results show that if the adversary holds 65\% of the network's hash-power, it can successfully perform the GFT attack when the total number of participating nodes $2000$ or less. However, as the total number of participating nodes increases (e.g., $3000$), the success of the GFT attack is not guaranteed. Figure~\ref{fig_sim_tau_2} shows the impact of $\tau$ on the success of GFT attacks. Here we see a similar trend as in the case of BCP attacks, where with higher values of $\tau$ the adversary needs more hash-power to conduct a successful GFT attack. Table~\ref{tab:Final} summarizes a series of simulation of Sybil attack with different parameters. The results show that we can avoid successful GFT and BCP attack with an optimal selection of number of shards and their capacity even if the adversary's hash-power is about 25\% of the average hash-power of the network. But if the adversary's hash-power is more than 33\%, it can successfully deploy BCP attack. However, with maximum of 16 shards with capacity of 600, we can decrease the probability of successful GFT to less than $0.001$. We believe these results can help designers to avoid attacks \begin{table}[t] \caption{Numerical Evaluation of Sybil attacks.}\label{tab:Final} \centering \begin{tabular}{|c|c|c|c|c|c|} \hline \textbf{$h^{\mathcal{A}}$} & \textbf{$\tau$} & {\#shards} & \textbf{$c$} & $P_{B}$ & $P_{G}$ \\ \hline $25\%$ & $\frac{2}{3}$ & At most $16$ & At least $600$ & $\leq 10^{-4}$ & 0 \\ \hline $[33\%, 53\%]$ & $\frac{2}{3}$ & - & - & $\geq0.8$ & $\leq 0.005$ \\ \hline $56\%$ & $\frac{2}{3}$ & At most $16$ & At least $600$ & $1$ & $\leq 0.001$\\ \hline Above $66\%$ & $\frac{2}{3}$ & - & - & $1$ & $\geq 0.75$ \\ \hline \end{tabular} \end{table} \section{Related Work} \label{sec:ralated work} To put our current research effort in perspective, we now briefly outline some other efforts in the literature towards improving the scalability and security of permissionless blockchains. Bitcoin-NG \cite{eyal2016bitcoin} was the first attempt to improve the transaction throughput of Bitcoin's consensus protocol \cite{nakamoto2008bitcoin} by employing the concept of \textit{microblocks}. Due to the significant drawbacks of leader based consensus protocols such as Bitcoin and Bitcoin-NG, the research community's focus shifted to employing committee-based consensus algorithms \cite{bracha1987log} for permissionless blockchain systems. For instance, Decker et al. \cite{decker2016bitcoin} proposed one of the first committee-based consensus protocols, named \emph{PeerCensus}, followed by several other proposals \cite{AbrahamMNRS16,pass2017hybrid,kogias2016enhancing,gilad2017algorand} in a similar direction. The poor scalability of single committee consensus solutions motivated the design of \emph{multiple committee} based blockchain consensus protocols, where the main idea is to split the pending transaction set among multiple shards, which then processes these shards in parallel. \emph{RSCoin} \cite{DanezisM:RSCoin} was proposed as a shard-based blockchain for centrally-banked cryptocurrencies, while \emph{Elastico} \cite{luu2016secure} was the first fully distributed shard-based consensus protocol for public blockchains. Recently proposed shard-based protocols, such as, \emph{Omniledger} \cite{kokoris2017omniledger} and \emph{Rapidchain} \cite{zamanirapidchain} attempt to improve upon the scalability and security guarantees of Elastico, while \emph{PolyShard} \cite{li2018polyshard} proposes to employ techniques from the \emph{coded computing} paradigm \cite{yu2018lagrange} to simultaneously improve transaction throughput, storage efficiency and security. Recently, several novel approaches for shard-based consensus protocols for blockchains have also been proposed \cite{dang2018towards,secure2018zilliqa,poon2017plasma}, and a good review of that various shard-based blockchain protocols can be found in \cite{wang2019sok}. In the direction of security-related analysis and securing shard-based blockchain protocols, Jusik Yun et al.\cite{yun2019trust} observed that, as the number of validators per-shard in shard-based blockchain protocols is generally smaller than the number of validators in traditional single leader-based protocols, it makes shard-based protocols more vulnerable to 51\% attacks. They then present a novel Trust-Based Shard Distribution scheme, which distributes the assignment of potential malicious nodes in the network to shards, in order to solve this problem. The vulnerability of blockchain consensus protocols, including shard-based protocols, to attacks that employ Sybil nodes \cite{douceur2002sybil} is clear and well-documented \cite{sankar2017survey,conti2018survey}. In this direction, TrustChain \cite{otte2017trustchain} proposed a novel Sybil-resistant algorithm, called NetFlow, which overcomes Sybil attacks in traditional single leader blockchain architectures by determining and employing node trustworthiness during the consensus process. In the first mathematical efforts, researchers in \cite{hafid2019new} analyze the security of three shard-based blockchains (\cite{luu2016secure}, \cite{kokoris2018omniledger} and \cite{zamani2018rapidchain}). They computed the upper bound for the probability of increasing the number of malicious nodes for one committee and so for each epoch using tail inequalities for sums of bounded hypergeometric and binomial distributions. But they don't have any simulation to proof their probability results. \section{Conclusion} \label{sec:conclusion} In this paper, we presented an analytical model to calculate the probability of successful Sybil attack to shard-based permissionless blockchains. As an example, we modeled Elastico and defined two types of Sybil attacked, named BCP (Break Consensus Protocol) and GFT (Generate Fake Transaction). We showed that we can calculate the probability of successful attacks given different system parameters, such as number of shards, the capacity of shards, and the number of nodes in the blockchain network. The results have been verified with numerical simulations. We showed that by carefully design our blockchain network we can avoid Sybil attack. Our results showed that Elastico is not robust against a Sybil attack, with an adversary who has 25\% hash-power of the network. In this case the adversary can break the consensus protocol in at least on shard with probability equal to 0.2. \balance \bibliographystyle{IEEEtran}
1,108,101,565,895
arxiv
\section{Introduction} The Koszul property for graded associative algebras \cite{priddy} has been generalized in several directions, including algebras with defining relations in just one degree ($d$-Koszul algebras \cite{berger}) and algebras with defining relations in multiple degrees ($\mathcal{K}_2$ algebras \cite{csk2alg}). $d$-Koszul algebras share many of the nice homological properties of Koszul algebras, but are not closed under several standard operations. The family of $\mathcal{K}_2$ algebras, which includes the $d$-Koszul algebras, has the advantage of being closed under graded Ore extensions, regular normal extensions, and tensor products. Interpolating between $d$-Koszul and $\mathcal{K}_2$ algebras leads one to look for Koszul-like homological properties in algebras with defining relations in just two degrees. This idea was considered independently by Vatne \cite{Vatne} and Green \& Marcos \cite{greenmarcos}, who each investigate graded algebras with defining relations in degree 2 and exactly one other degree. We compare these two approaches to find sufficient conditions for such an algebra to be $\mathcal{K}_2$, and answer two questions posed by Green \& Marcos. Let $\kk$ be a field and $d$ an integer greater than 2. We consider graded $\kk$-algebras of the form $R=\mathbb{T}(V)/I$ where $V$ is a finite dimensional vector space, $\mathbb{T}(V)$ is the free algebra generated by $V$ and $I$ is a homogenous ideal which can be generated by elements in $\mathbb{T}(V)_2$ and $\mathbb{T}(V)_d$. Properties of $I$ determine the structure the bi-graded Yoneda algebra $E(R):= \oplus_{i,j} \Ext_R^{i,j}(\kk,\kk)$, where $i$ refers to the cohomological degree and $j$ is the internal degree that $E(R)$ inherits from the grading on $R$. We let $E^i(R)$ denote $\oplus_j E^{i,j}(R)$. \begin{definition}\label{d:koszul} $R$ is Koszul if $E(R)$ is generated as a $\kk$-algebra by $E^{1}(R)$. $R$ is $\mathcal{K}_2$ if $E(R)$ is generated as a $\kk$-algebra by $E^1(R)$ and $E^2(R)$. $R$ is $d$-Koszul if $E(R)$ is generated as a $\kk$-algebra by $E^1(R)$ and $E^2(R)$, and also $I$ is generated in degree $d$. \end{definition} Each of these definitions requires $E(R)$ to be generated in the lowest possible degrees. One can determine whether or not an algebra is Koszul or $d$-Koszul just by knowing the bi-degrees in the corresponding Yoneda algebra. (Specifically, for $A$ to be $d$-Koszul, we need $E^{i,j}(A) = 0$ unless $j = \delta(i)$, where $\delta(2m) = dm$ and $\delta(2m +1) = dm+1$. Note that Koszul and 2-Koszul are synonomous.) In contrast, $E(R)$ can have the same bi-degrees as an algebra generated in degrees 1 and 2 even when $R$ is not actually $\mathcal{K}_2$. To explore the connections between these definitions, it is helpful to consider the quadratic generators of $I$ separately from the degree $d$ generators. Let $I_2$ denote a linearly independent set of quadratic relations, and $J$ a set of degree $d$ relations, so that $I$ is the ideal generated by $I_2$ and $J$. Note that different choices for $J$ can produce the same algebra $R$. Let $A$ be the algebra $\mathbb{T}(V)/\langle I_2\rangle$ and let $B = \mathbb{T}(V)/\langle J\rangle$, so that the algebra $R$ can be realized as either $A/\langle J\rangle$ or $B/\langle I_2\rangle$. \section{Almost linear resolutions} It would be nice if the Koszul property of $A$ and the $d$-Koszul property of $B$ would combine to imply that $R$ is $\mathcal{K}_2$, but this is not necessarily the case, as is shown in example \ref{counter} below. Indeed, any two of the algebras $A$, $B$ and $R$ can have good homological behavior while the third is recalcitrant. Remark 7.5 in \cite{ConShel} illustrates the case where $R$ is $\mathcal{K}_2$, $B$ is $3$-Koszul, and yet $A$ is not Koszul. In the following example, $R$ is $\mathcal{K}_2$, $A$ is Koszul, but the Yoneda algebra of $B$ fails to be generated in low degrees. \begin{example} Let $V=\{x,y\},$ $I_2=\{xy-yx\}$ and $J=\{xyx\}$. Then $A$ is commutative and $xyx$ is a regular element in $A$, hence by \cite{csk2alg}*{Corollary 9.2}, $R$ is $\mathcal{K}_2$. But $B$, as a monomial algebra, fails to be $3$-Koszul by Proposition 3.8 in \cite{berger}. \end{example} Clearly different hypotheses are required to get good behavior from $R$. In \cite{Vatne}, Vatne considers the case were $A$ is Koszul, and $R$ has an {\it almost linear} resolution as an $A$-module. \begin{definition} $R$ has an almost linear resolution as an $A$-module if $\Ext_A^{i}(R,\kk)=\Ext_A^{i,d-1+i}(R,\kk)$ for all $i>0$. \end{definition} Vatne shows that if $A$ is Koszul, $d>3$ and $R$ has an almost linear resolution as an $A$-module, then $E(R)$ has the correct bi-degrees for $R$ to be a $\mathcal{K}_2$ algebra. In fact $R$ is $\mathcal{K}_2$ in this case, as the following direct corollary of Theorem 5.15 in \cite{ConShel} shows. \begin{proposition}\label{vatne-answer} If $A$ is Koszul, $d \geq 3$, and $R$ has an almost linear resolution as an $A$-module, then $R$ is $\mathcal{K}_2$. \end{proposition} In this case of monomial algebras, the almost linear condition is relatively easy to check. This fact motivates our concluding theorem. \begin{proposition}\label{monom} Suppose that $I_2$ and $J$ consist of monomials and that no element of $J$ contains any element of $I_2$ as a connected subword. Then $_AR$ has an almost linear resolution if and only if $\Ext_A^1(R,\kk)=\Ext^{1,d}_A(R,\kk)$. In this case, $R$ is $\mathcal{K}_2$. \end{proposition} \begin{proof} If $_AR$ has an almost linear resolution, then by definition\\ $\Ext^1_A(R, \Bbbk) = \Ext^{1,d}_A(R, \Bbbk)$. Now, suppose $\Ext^1_A(R, \Bbbk) = \Ext^{1,d}_A(R, \Bbbk)$. In the remainder of this proof, let $\pi_A : \mathbb{T}(V) \rightarrow A$ be defined by $\pi_A(s) := s + \langle I_2 \rangle$. Let $\mathcal{M}$ be the set of monomials $u \in \mathbb{T}(V)$ with $\pi_A(u) \not = 0$. Note that $A \otimes A_+^{\otimes \bullet}$ is a projective resolution of $_AR$. We will construct a subresolution $A \otimes Q_\bullet$ of $A \otimes A_+^{\bullet}$, which is a minimal projective resolution. We construct $Q_\bullet$ by choosing a monomial basis $\mathcal{B}_i$ for each vector space $Q_i$. Because $\Ext^1_A(R, \Bbbk) = \Ext^{1,d}_A(R, \Bbbk)$, the left-ideal in $A$ generated by $\pi_A(J)$ is equal to the two-sided ideal generated by the same elements. Thus we may begin by setting $\mathcal{B}_1 = \{\pi_A(s): s \in J\}$. Now, suppose that $\mathcal{B}_i$ consists of elements of the form \[\pi_A(u_i) \otimes \pi_A(u_{i-1}) \otimes \cdots \otimes \pi_A(u_1)\] where each $u_t \in \mathcal{M}$. Then, we set \[\begin{split}\mathcal{B}_{i+1} := \{ &\pi_A(u_{i+1}) \otimes \pi_A(u_i) \otimes \pi_A(u_{i-1}) \otimes \cdots \otimes \pi_A(u_1) \\ &: u_{t} \in \mathcal{M}, \pi_A(u_i) \otimes \pi_A(u_{i-1}) \otimes \cdots \otimes \pi_A(u_1) \in \mathcal{B}_i,\\ &\text{ and } \pi_A(u_{i+1}) \text{ is a minimal left-annihilator of $\pi_A(u_i)$.}\}\end{split}\] Therefore, every $\mathcal{B}_i$ will consist of pure tensors of monomials. Furthermore, since $A$ is a quadratic monomial algebra, each minimal left-annihilator is linear, and so \[\mathcal{B}_i \subset A_1^{\otimes i -1} \otimes \spn_\Bbbk J.\] Therefore, $A \otimes Q_\bullet$ is an almost linear resolution of $_AR$. Since $A$ is monomial, it is also Koszul, and thus $R$ is $\mathcal{K}_2$ by \ref{vatne-answer}. \end{proof} \section{2-$d$-determined algebras} In \cite{greenmarcos}, Green and Marcos study the case where the bi-degrees of $E(R)$ are no greater than the bi-degrees of a $\mathcal{K}_2$ algebra with defining relations in degree 2 and $d$. They call such an algebra 2-$d$-determined. Like Vatne, Green and Marcos assume that $R$ is a quotient of a Koszul algebra $A$, but they do so via Gr\"obner bases. They assume that $I$ has a reduced Gr\"obner basis $\mathpzc{g}=\mathpzc{g}_2\cup \mathpzc{g}_d,$ so that $A=\mathbb{T}(V)/\langle \mathpzc{g}_2\rangle$ and $B=\mathbb{T}(V)/\langle \mathpzc{g}_d\rangle$. At the end of \cite{greenmarcos}, Green and Marcos ask three questions. In Theorems \ref{answer1} and \ref{answer2} we provide negative answers to the first two questions (which we have rephrased slightly)\footnote{In their formulation, Green and Marcos consider quotients of graph algebras. We only consider connected-graded algebras, which suffice to answer the questions in the negative.}: \begin{enumerate} \item If $C$ is an $2$-$d$-determined, then is the $\Ext$-algebra $\E(C)$ finitely generated? \label{q:fingen} \item If $C$ is a $2$-$d$-determined algebra and the $\Ext$-algebra $\E(C)$ is finitely generated, is $C$ a $\mathcal{K}_2$ algebra (assuming that the global dimension of $C$ is infinite)?\label{q:k2} \end{enumerate} We use the following construction (see \cite{ppquadalg}*{\S III.1}): \begin{definition} Let $A$ and $B$ be graded algebras. The \textbf{free product} of $A$ and $B$ is the algebra \[A \sqcup B := \bigoplus_{\substack{i \geq 0\\ \epsilon_1, \epsilon_2 \in \{0, 1\}}} A_+^{\otimes \epsilon_1} \otimes (B_+ \otimes A_+)^{\otimes i} \otimes B_+^{\otimes \epsilon_2}.\] \end{definition} This related result will be of importance. \begin{proposition}[c.f. {\cite{ppquadalg}*{Proposition III.1.1}}]\label{p:freeprodext} For graded algebras $A$ and $B$, \[\E(A\sqcup B) \simeq \E(A) \sqcap \E(B).\] \end{proposition} Suppose $V$ has an ordered basis $x_1 < x_2 < \cdots < x_n$. Then we can order the monomials of $\mathbb{T}(V)$ by degree-lexicographical order. This induces a filtration $F$ on $A$, $\Tor_A(\Bbbk, \Bbbk)$ and $\E(A)$. (See, for example, \cite{ppquadalg}*{Chapter IV} and \cite{phan}.) With this filtration $F$, we now have several versions of the $\Ext$-functor. First, there is the \emph{ungraded} $\Ext$-functor (over $A$-modules and $\gr^F A$-modules), which is the derived functor of the ungraded $\Hom$-functor. Next, since $A$ and its associated graded algebra $\gr^F A$ are both graded with respect to an internal degree, we have the $\mathbb{N}$-\emph{graded} $\Ext$-functor (over $A$-modules and $\gr^F A$-modules), the derived functor of the $\mathbb{N}$-graded $\Hom$ functor. Finally $\gr^F A$ is graded by the monoid of monomials in $\mathbb{T}(V)$, and hence we have a monomial-graded $\Ext$-functor (over $\gr^F A$-modules). We will use the following result: \begin{lemma}[{\cite{phan}*{Theorem 1.2}}]\label{l:bigradedmono} Let $\gr^F \E(A)$ be the associated graded algebra of \emph{ungraded} algebra $\E(A)$ under the filtration $F$, and $\E(\gr^F A)$ be the \emph{graded} $\Ext$-algebra with respect to the monomial grading. Then there is a bigraded algebra monomorphism \[\Lambda: \gr^F \E(A) \hookrightarrow \E(\gr^F A).\] \end{lemma} In the case where each $\E^{i,j}(A)$ is finite-dimensional, the graded and ungraded versions coincide (see \cite{phan}*{Lemma 1.4}). A slight modification of the proof of \cite{phan}*{Lemma 2.11} yields: \begin{lemma}\label{l:gradedtrans} If $\E^{i,j}(A)$ is finite-dimensional for every $i,j$, then \[\dim \E^{i,j}(A) = \dim \bigoplus_{|\alpha| = j} (\gr^F \E^i(A))_\alpha\] where $|\alpha|$ is the length of the monomial $\alpha$. \end{lemma} To answer to question (\ref{q:fingen}), let \begin{align*} A &:= \frac{\Bbbk\left<a, b, c, d, e, f, l, m\right>}{\left<bc - ef, ae, da-lm, cl\right>}, \\ B &:= \frac{\Bbbk\left<z\right>}{ \left<z^4\right>},\\ \intertext{and} C &:= A \sqcup B. \end{align*} We order the monomials in \[\Bbbk\left<a, b, c, d, e, f, l, m, z\right>\] by degree-lexicographical order. This creates a filtration $F$ on $A, B,$ and $C$, as well as their corresponding $\Ext$-algebras. Using Bergman's diamond lemma in \cite{bergman}, we see that \[\gr^F A \simeq \frac{\Bbbk\left<a, b, c, d, e, f, l, m\right>}{\left<ef, ae, lm, cl, abc, cda, \right>}.\] Consider the structure of $\E(\gr A)$. We construct a basis for a vector space $V_\bullet \subseteq (\gr A)_+^\bullet$ so that $\gr A \otimes V_\bullet \subseteq \gr A \otimes (\gr A)_+^\bullet$ is a minimal projective resolution of $_{\gr A}\Bbbk$. This is done by applying the left annihilator algorithm described in \cite{csk2alg}. Figure \ref{f:csgraphfingen} depicts the minimal left annihilation of monomials in the basis for $V_\bullet$. Consider all paths ending with a first-degree monomial. By tensoring the vertices in all such paths we obtain a basis for $V_\bullet$. For example, $ab \otimes cd \otimes ab \otimes c$ is a basis element for $V_4$. Other examples and applications of these graphs can be found in \cite{wakeforest}, where the graphs are used to characterize finiteness properties of Yoneda algebras. In Figure \ref{f:csgraphfingen}, the dotted lines represent left-annihilation from an inessential relation (e.g. $ab$ left-annihilates $cd$ because of the essential relation $abc$). \begin{figure} \begin{tikzpicture} \matrix (m) [matrix of math nodes, row sep=1em, column sep=2em, text height=1.5ex, text depth=0.25ex] {cd & a\\ & b \\ ab & c \\ & d \\ & e \\ & f \\ & l \\ & m \\ }; \path[->] (m-1-1) edge (m-1-2); \path[->] (m-1-1) edge [bend right=70, dotted, very thick] (m-3-1); \path[->] (m-3-1) edge [dotted, very thick] (m-1-1); \path[->] (m-3-1) edge (m-3-2); \path[->] (m-1-2) edge [bend left=70] (m-5-2); \path[->] (m-3-2) edge [bend left=70] (m-7-2); \path[->] (m-5-2) edge [bend left=70] (m-6-2); \path[->] (m-7-2) edge [bend left=70] (m-8-2); \end{tikzpicture} \caption{A basis for $V_\bullet$ can be constructed from the paths ending in a first-degree monomial in this graph.\label{f:csgraphfingen}} \end{figure} Likewise, we set $W_{2i} := \Bbbk (z^3 \otimes z)^{\otimes i}$ and $W_{2i+1} := \Bbbk z \otimes W_{2i}$, so that $B \otimes W_\bullet \subseteq B \otimes B_+^{\otimes \bullet}$ is a minimal projective resolution of $_B\Bbbk$. (As with $V_\bullet$, we can visualize a basis for $W_\bullet$ using Figure \ref{f:csgraphfingenB}.) \begin{figure}[h] \begin{tikzpicture} \matrix (m) [matrix of math nodes, row sep=1em, column sep=2em, text height=1.5ex, text depth=0.25ex] {z^3 & z\\}; \path[->] (m-1-1) edge (m-1-2); \path[->] (m-1-2) edge [bend right=70] (m-1-1); \end{tikzpicture} \caption{A basis for $W_\bullet$ can be constructed from the paths ending in the first-degree monomial $z$ in this graph.\label{f:csgraphfingenB}} \end{figure} \begin{theorem}\label{answer1} $C$ is $2$-$4$-determined, but $E(C)$ is not finitely generated.\end{theorem} \begin{proof} First, it's clear that the ideal of relations for $C$ is generated in degrees $2$ and $4$. Now, by examining Figure \ref{f:csgraphfingen}, we see that $\E^{i,j}(\gr^F A) = 0$ unless $j \leq 2i - 1.$ Thus, by Lemmas \ref{l:bigradedmono} and \ref{l:gradedtrans}, we know that $\E^{i,j}(A) = 0$ unless $j \leq 2i-1$. Likewise, we see that $\E^{i,j}(B) = 0$ unless $j \leq \delta(i)$ (where $\delta$ is as defined in the paragraph after Definition \ref{d:koszul}). Now, since $2i-1 \leq \delta(i)$ for every $i$, applying Proposition \ref{p:freeprodext}, we see that $C$ is $2$-$4$-determined. By Proposition \ref{p:freeprodext}, to show that $\E(C)$ is not finitely generated, it suffices to show that $\E(A)$ is not finitely generated. We shall exhibit a projective resolution for $_A \Bbbk$. Consider the sequence \begin{equation}\label{e:resfora} \begin{split}\cdots \xrightarrow{M_{i+1}} A(-2i+2)^{2} \xrightarrow{M_i} \cdots \xrightarrow{M_5} A(-6)^2 \xrightarrow{M_4} A(-4)^2 \xrightarrow{M_3} A(-2)^{4}\\ \xrightarrow{M_2} A(-1)^{8} \xrightarrow{M_1} A \xrightarrow{M_0} \Bbbk \rightarrow 0\end{split} \end{equation} where the maps are right multiplication by the matrices \setcounter{MaxMatrixCols}{20} \begin{align*} M_1 &= (a, b, c, d, e, f, l, m)^T,\\ M_2 &= \left(\begin{matrix} 0 & 0 & 0 & 0 & a & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & c & 0 \\ 0 & 0 & b & 0 & 0 & -e & 0 & 0 \\ -d & 0 & 0 & 0 & 0 & 0 & 0 & l \end{matrix}\right),\\ M_3 &= \left(\begin{matrix} cd & 0 & 0 & 0 \\ 0 & ab & 0 & 0 \\ \end{matrix}\right),\\ M_{2i} & = \left(\begin{matrix} ab & 0 \\ 0 & cd \end{matrix}\right) \text{ for $i \geq 2$, and}\\ M_{2i+1} & = \left(\begin{matrix} cd & 0 \\ 0 & ab \end{matrix}\right) \text{ for $i \geq 2$.} \end{align*} Given any fixed integer $N$, noncommutative Gr\"obner bases can be used to calculate the dimensions of $\E^{i,j}(A)$ for $j < N$ . We have used the computer program {\tt Bergman} \cite{Bergmansoftware} for this purpose. Note that (\ref{e:resfora}) satisfies $\img M_i \subset \ker M_{i-1}$. Indeed, (\ref{e:resfora}) is clearly exact up to the matrix $M_2$. Gr\"obner bases calculations show that $\E^{3,3}(A) = 0$ and $\E^{3,5}(A) = 0$. From above, we know that $\E^{3,j}(A) = 0$ for $j > 5$, and so (\ref{e:resfora}) is exact up to the matrix $M_3$. Likewise, Gr\"obner bases calculations show that $\E^{4, 4}(A)$, $\E^{4,5}(A) = 0$, and $E^{4,7}(A) = 0$. From above, we know that $\E^{4,j}(A) = 0$ for $j > 7$, and so (\ref{e:resfora}) is exact up to the matrix $M_4$. However, $\img M_4 = \ker M_3$ implies that $\img M_{i+1} = \ker M_i$ for all $i \geq 3$. It follows that (\ref{e:resfora}) is exact. Therefore, we have \[\E^{i}(A) = \begin{cases} \E^{i, i}(A) & \text{if $i = 0, 1$,}\\ \E^{i, 2i - 2}(A) & \text{if $i \geq 2$.} \end{cases} \] Fix $i \geq 3$. If the elements of $\E^{i, 2i-2}(A)$ were generated by lower cohomological-degree elements of $\E(A)$, then the multiplication map \begin{equation} \label{e:multmap} \bigoplus_{\substack{0 < j < i\\ 0< k < 2i-2}} \E^{j, k}(A) \otimes \E^{i - j, 2i - k - 2}(A) \rightarrow \E^{i, 2i-2}(A)\end{equation} would be surjective. We will show that this is not the case. Suppose $2 \leq j < i$. Then for $E^{j,k}(A)$ to be nonzero, we would need $k = 2j - 2$. This implies $2i - k - 2 = 2(i-j)$, which is neither $i-j$ nor $2(i - j) - 2$, and so $E^{i-j,2i-k-2}(A)=0$. On the other hand, suppose $j = 1$. Then for $E^{1,k}(A)$ to be nonzero, we would need $k = 1$. This implies $2i - k - 2 = 2i - 3$. However, $i - j = i - 1 \geq 2$, and so $E^{i-j,2i-k-2}(A)=E^{i-1,2i-3}(A)=0$. In either case, the map in (\ref{e:multmap}) is zero for $i \geq 3$. Hence, for $i \geq 3$, $\E^i(A)$ is not generated by lower cohomological-degree elements of $\E(A)$. Therefore, $\E(A)$ is not finitely generated. \end{proof} Now we answer question (2). \begin{theorem}\label{answer2} There exists a 2-$d$-determined algebra (with infinite global dimension) which is not $\mathcal{K}_2$ even though its Yoneda algebra is finitely generated. \end{theorem} \begin{proof} Let \begin{align*} A &:= \frac{\Bbbk\left<a, b,n, p, q, r, s, t, u, v, w, x, y\right>}{\left<\begin{matrix}np-nq, np-nr, ps-pt, qt-qu, rs-ru, sv- sw, tw-tx,\\ uv-ux, sv-sy, tw-ty, ux-uy, va-vb, wa-wb, xa-xb\end{matrix}\right>}, \\ B &:= \frac{\Bbbk\left<z\right>}{ \left<z^4\right>},\\ \intertext{and} C &:= A \sqcup B. \end{align*} The algebra $A$ appears in \cite{cassidy}, and the following minimal projective resolution for $_A\Bbbk$ is given: \[0 \rightarrow A(-5) \rightarrow A(-3)^7 \rightarrow A(-2)^{14} \rightarrow A(-1)^{13} \rightarrow A.\] Note that $A$ is a quadratic algebra which is not Koszul, and so $A$ is not $\mathcal{K}_2$. However, as $A$ has finite global dimension, $E(A)$ must be finitely generated. As shown above, the algebra $B$ satisfies $\E^{2i}(B) = \E^{2i, 4i}(B)$, $\E^{2i+1}(B) = \E^{2i+1, 4i+1}(B)$, and $\dim \E^i(B) = 1$. $B$ has infinite global dimension, but since it is $\mathcal{K}_2$, $E(B)$ is finitely-generated. It follows from Proposition \ref{p:freeprodext}, that $C$ is $2$-$4$-determined, has infinite global dimension, and is not $\mathcal{K}_2$. \end{proof} There is a small omission in the statement of Theorem 20 in \cite{greenmarcos}. From their earlier proofs it is clear the authors intended to include the hypotheses ``\dots and $\mathpzc{g}_d$ is a Gr\"obner basis for the ideal it generates.'' The following example shows that without this hypothesis, the conclusions of Theorem 20 are not valid. \begin{example}\label{counter} Let $V=\{a,x,y,z\}$, ordered by $z>y>x>a$. Then $\mathpzc{g}=\mathpzc{g}_2\cup \mathpzc{g}_4=\{xa,az,ay\} \cup\{y^2z^2, x^2y^2+a^4\}$ forms a Gr\"obner basis. $A=\displaystyle\frac{\kk\langle a,x,y,z\rangle}{\langle \mathpzc g_2\rangle}$ is a quadratic monomial algebra and hence Koszul, and $B=\displaystyle\frac{\kk\langle a,x,y,z\rangle}{\langle \mathpzc g_d\rangle}$ has global dimension 2 and thus is 4-Koszul. But over the algebra $$R= \frac{\kk\langle a,x,y,z\rangle}{\langle x^2y^2+a^4,y^2z^2,xa,az,ay\rangle},$$ the module $\kk$ has a minimal projective resolution of the form \[0 \rightarrow R(-6) \rightarrow R(-7)\oplus R(-6)\oplus R(-5)\oplus R(-3)^2 \rightarrow R(-4)^2\oplus R(-2)^{3}\\ \rightarrow \] \[ R(-1)^{4} \rightarrow R \rightarrow \kk \rightarrow 0.\] This means that $E(R)^{3,6}\ne 0$ and so $R$ is neither $\mathcal{K}_2$ nor 2-4-determined. \end{example} We can establish sufficient conditions for $R$ to be $\mathcal{K}_2$ by merging hypotheses from Vatne with those of Green and Marcos. Our last theorem shows that we need only slightly stronger hypotheses on the Gr\"obner basis to guarantee that $R$ is $\mathcal{K}_2$. \begin{proposition} Let $\mathpzc{g}_2$, $\mathpzc{g}_d$ and $\mathpzc{g}_2\cup \mathpzc{g}_d$ be Gr\"obner bases such that $\mathpzc{g}_2\cup \mathpzc{g}_d$ has no redundant elements (i.e. any set of defining relations for $R$ has at least $|\mathpzc g_2|+|\mathpzc g_d|$ elements). If either \begin{enumerate} \item[(i)] $B=\mathbb{T}(V)/\langle \mathpzc g_d \rangle$ is $d$-Koszul, or\\ \item[(ii)] $\Ext_{\gr A}^1(\gr R,\kk)=\Ext^{1,d}_{\gr A}(\gr R,\kk)$,\\ \end{enumerate} then $R$ is $\mathcal{K}_2$. \end{proposition} \begin{proof} Using the filtration defined by the Gr\"obner bases we create the monomial algebras $\gr A$, $\gr B$ and $\gr R$. We will show that $\gr R$ is a $\mathcal{K}_2$ algebra. For case $(i)$, assume that $B$ is $d$-Koszul. Then $grB$ is $d$-Koszul by \cite{greenmarcos}*{Theorem 10}. It follows from \cite{greenmarcos}*{Theorem 14} that $\gr R$ is 2-$d$-determined, and hence $\mathcal{K}_2$ by \cite{greenmarcos}*{Theorem 16}. For case $(ii)$, if $\Ext_{\gr A}^1(\gr R,\kk)=\Ext^{1,d}_{\gr A}(\gr R,\kk)$ then by proposition \ref{monom}, $\gr R$ has an almost linear resolution over $\gr A$ and is therefore $\mathcal{K}_2$. In either case, $\gr R$ is a monomial $\mathcal{K}_2$ algebra. Since $\mathpzc g_2 \cup\mathpzc g_d$ has no redundant elements, it forms an essential Gr\"obner basis in the sense of \cite{phan}. Thus by \cite{phan}*{Theorem 1.7}, $R$ itself is $\mathcal{K}_2$. \end{proof} Condition $(ii)$ is certainly not necessary. Is condition $(i)$ necessary? \bibliographystyle{alpha}
1,108,101,565,896
arxiv
\section{Introduction} Discriminative models in machine learning, like neural networks, have achieved impressive performance in a variety of applications. Models in this class, however, suffer from the problem of overgeneralization, whereby the whole input space is partitioned between the set of target classes specified during training, and generally lack the possibility to reject a novel sample as not belonging to any of those. A main issue with overgeneralization is in the context of \emph{open set recognition} \cite{scheirer2013_opensetrecog_1vssetmachine} and \emph{open world recognition} \cite{bendale2015towards}, where only a limited number of classes is encountered during training while testing is performed on a larger set that includes a potentially very large number of unknown classes that have never been observed before. An example is shown in Figure \ref{fig:overgeneralization_and_fooling} where a linear classifier is trained to discriminate between handwritten digits `0' and `6'. As digit `9' is not present in the training set, it is here wrongly classified as `6'. In general, instances of classes that are not present in the training set will fall into one of the partitions of the input space learnt by the classifier. The problem becomes worse in real world applications where it may be extremely hard to know in advance all the possible categories that can be observed. Further, the region of meaningful samples in the input space is usually small compared to the whole space. This can be easily grasped by randomly sampling a large number of points from the input space, for example images at a certain resolution, and observing that the chance of producing a recognizable sample is negligible. Yet, discriminative models may assign a high confidence score to such random images, depending on the learnt partition of the input space. This is indeed observed with \emph{fooling} \cite{fooling}, for which it was shown to be possible to generate input samples that are unrecognizable to humans but get classified as a specific target class with high confidence (see example in Figure \ref{fig:overgeneralization_and_fooling}). Fooling in particular may lead to security problems in critical applications. \begin{figure}[!t] \centering \includegraphics[width=0.3\linewidth]{overgeneralization_and_fooling.pdf} \caption{ A linear classifier is trained to recognize exclusively pictures of digits `0' and `6'. Digit `9' was never observed during training, but in this example it is wrongly classified as digit `6'. This is an example of overgeneralization. A similar problem is `fooling', whereby it is possible to generate images that are unrecognizable to humans but are nonetheless classified as one of the known classes with high confidence, for example here the noise-looking picture in the bottom-left corner that is classified as digit `0'. } \label{fig:overgeneralization_and_fooling} \end{figure} As suggested in \cite{fooling}, these problems may be mitigated or solved by using generative models, that rather than learning the posterior of the class label $P(y|X)$ directly, learn the joint distribution $P(y,X)$ from which $P(X)$ can be computed. Modeling the distribution of the data would then give a model the capability to identify input samples as belonging to known classes, and to reject those that are believed to belong to unknown ones. Apart from mitigating the problem of overgeneralization, modeling the distribution of the data would also be useful for applications in novelty and outlier detection \cite{markou2003novelty} and incremental learning \cite{bendale2015towards}, broadening the range of applications the same model could be used in. Estimating the marginal probability $P(X)$ is, however, not trivial. Luckily, computing the full distribution may not be necessary. The results in this work suggest that identification of high-density regions close to the local maxima of the data distribution is sufficient to correctly identify which samples belong to the distribution and which ones are to be rejected. Specifically, it is possible to identify and classify the critical points of the data distribution by exploiting recent work that has shown that in denoising \cite{denoising_autoencoder} and contractive \cite{contractive_autoencoder} autoencoders, the reconstruction error tends to approximate the gradient of the log-density. A measure of a confidence score can then be computed as a function of this gradient Here, a set of experiments is presented to compare the empirical performance of the proposed model with baselines and with the COOL (Competitive Overcomplete Output Layer) \cite{cool} model that has been recently applied to the problem of fooling. \section{Overview of Previous Work} The simplest way to limit overgeneralization in a given classifier is to set a threshold on the predicted outputs and rejecting any sample below its value (for example \cite{phillips2011evaluation, hendrycks2016baseline}). The output of the model is thus treated as an estimate of the confidence score of the classifier. This approach, however, was shown to be sensitive to the problem of fooling \cite{fooling}. Alternatively, a confidence score may be computed based on the k-Nearest-Neighbor algorithm (e.g., \cite{hautamaki2004outlier, zhao2009anomaly}). Another way to mitigate the problem, as done in classical object recognition, is to use a training set of positive samples complemented with a set of negative samples that includes instances belonging to a variety of `other' classes. This approach however does not completely solve the problem, and it is usually affected by an unbalanced training set due to the generally larger amount of negatives required. As the potential amount of negatives can be arbitrarily large, a further problem consists in gathering an amount of data sufficient to approximate their actual distribution, which is made even worse by the fact that the full set of negative categories may not be known when training the system. For example, in the context of object recognition in vision, high-resolution images may represent any possible image class, the majority of which is likely not known during training. The use of negative training instances may nonetheless mitigate the effect of categories that are known to be potentially observed by the system. The problem of overgeneralization is further present in the context of `open set recognition', that was formally defined by Scheirer and colleagues \cite{scheirer2013_opensetrecog_1vssetmachine}. In this framework, it is assumed that a classifier is trained on a set of `known' classes and potentially on a set of `known unknown' ones (e.g., negative samples). Testing, however, is performed on a larger set of samples that include `unknown unknown' classes that are never seen during training. Models developed to address the problem of open set recognition focus on the problem of `unknown unknown' classes \cite{scheirer2014_wsvm}. The seminal paper that gave the first formal definition of the problem proposed the 1-vs-Set Machine algorithm as an extension to SVM that is designed to learn an envelope around the training data using two parallel hyperplanes, with the inner one separating the data from the origin, in feature space \cite{scheirer2013_opensetrecog_1vssetmachine}. Scheirer and colleagues then proposed the Weibull-calibrated SVM (W-SVM) algorithm to address multi-class open set recognition \cite{scheirer2014_wsvm}. Another interesting approach was recently applied to deep neural networks with the OpenMax model \cite{opensetdeepnetworks}, that works by modeling the class-specific distribution of the activation vectors in the top hidden layer of a neural network, and using the information to recognize outliers. Related to the problem of open set recognition is that of `open world recognition', in which novel classes first have to be detected and then learnt incrementally \cite{bendale2015towards}. This can be seen as an extension to open set recognition in which the `unknown unknown' classes are discovered over time, becoming `novel unknowns'. The new classes are then labelled, potentially in an unsupervised way, and become 'known'. The authors proposed the Nearest Non-Outlier (NNO) algorithm to address the problem. A special case of open set recognition is 1-class recognition, in which training is performed on samples from a single class, with or without negative samples. The 1-Class SVM algorithm was proposed to address this problem \cite{oneclasssvm}, by fitting a hyperplane that separates all the data points from the origin, in feature space, maximizing its distance from the origin. The algorithm has been applied in novelty and outlier detection \cite{oneclasssvm_noveltydetection}. Variants of the algorithm like Support Vector Data Description (SVDD) have also been used to learn an envelope around points in the dataset \cite{oneclasssvm_support_vector_data_description_svdd}. Other systems have tried to estimate the boundaries of the data by computing the region of minimum volume in input space containing a certain probability mass \cite{park2010computable}. Finally, a specific sub-problem of overgeneralization is `fooling' \cite{fooling}. The ``Competitive Overcomplete Output Layer'' (COOL) model \cite{cool} was recently proposed to mitigate the problem of fooling. COOL works by replacing the final output layer of a neural network with a special COOL layer, constructed by replacing each output unit with $\omega$ ones (the degree of overcompleteness). The $\omega$ output units for each target class are then made to compete for activation by means of a softmax activation that forces them to learn to recognize different parts of the input space, overlapping only within the region of support of the data generating distribution. The network can then compute a confidence score as the product of the activation of all the units belonging to the same target class, that is high for inputs on which a large number of units agrees on, and low in regions far from the data distribution, where only few output units are active. \section{Proposed Solution} \label{sec:methods} The solution presented here is based on a novel measure of confidence in the correct identification of data points as belonging to the training distribution, or their rejection. Ideally, such a confidence score would be a function of the data probability $p(\mathbf{x})$. Computing the full distribution may however not be necessary. In particular, the problem can be simplified with the identification of points belonging to the data manifold as points that are close to the local maxima of the data generating distribution. It has been recently shown that denoising \cite{denoising_autoencoder} and contractive \cite{contractive_autoencoder} autoencoders implicitly learn features of the underlying data distribution \cite{alain_bengio_2013, bengio_2013}, specifically that their reconstruction error approximates the gradient of its log-density \begin{equation} \label{eqn:autoencoder_score} \frac{\partial \log p(\mathbf{x})}{\partial \mathbf{x}} \propto r(\mathbf{x})-\mathbf{x} \end{equation} for small corruption noise ($\sigma \rightarrow 0$). $r(\mathbf{x})=Dec(Enc(\mathbf{x}))$ is the reconstructed input. Larger noise is however found to work best in practice. The result has been proven to hold for any type of input (continuous or discrete), any noise process and any reconstruction loss, as long as it is compatible with a log-likelihood interpretation \cite{bengio_2013}. A similar interpretation suggested that the reconstruction error of regularized autoencoders can be used to define an energy surface that is trained to take small values on points belonging to the training distribution and higher values everywhere else \cite{ebgan}. Thus, critical points of the data distribution correspond to points with small gradient of the log-density, that is small reconstruction error (Equation \ref{eqn:autoencoder_score}). Those are indeed points that the network can reconstruct well, and that it has thus hopefully experienced during training or has managed to generalize to well. A confidence score can thus be designed that takes high values for points on the data manifold, that is points near the local maxima of the log-density of the data distribution, and small values everywhere else. We note however that this approach cannot distinguish between local minima, maxima or saddle points (Figure \ref{fig:2dvisualization} shows such an example), and may thus assign a high confidence score to a small set of points not belonging to the target distribution. Here the problem is addressed by scaling the computed confidence by a function $\Gamma(\mathbf{x})$ that favours small or negative curvature of the log-density of the data distribution, which can in turn be computed from the diagonal of the Hessian, estimated from the Jacobian of the reconstruction function as shown in \cite{alain_bengio_2013} \begin{equation} \label{eqn:hessian} \frac{\partial^2 \log p(\mathbf{x})}{\partial \mathbf{x}^2} \propto \frac{\partial r(\mathbf{x})}{\partial \mathbf{x}} - I \end{equation} A variety of functions may be defined with the desired characteristics, exploiting Equations \ref{eqn:autoencoder_score} and \ref{eqn:hessian}. One possible way, that we will use throughout this paper, is to compute the confidence score $\tilde{c}(\mathbf{x})$ as \begin{equation} \label{eqn:pseudo_confidence} \tilde{c}(\mathbf{x}) = \exp\left(-\frac{\alpha}{D} \|r(\mathbf{x})-\mathbf{x}\|_2\right) \Gamma(\mathbf{x}) \end{equation} \begin{align} \label{eqn:curvature_function} \Gamma(\mathbf{x}) = \begin{cases} 1 \hspace{2.3cm} \text{if } \gamma(\mathbf{x}) \leq 0 \\ \exp(-\beta \gamma(\mathbf{x})) \hspace{0.5cm} \text{if } \gamma(\mathbf{x})>0 \end{cases} \end{align} \begin{equation} \label{eqn:curvature} \gamma(\mathbf{x}) = \frac{1}{D} \sum_i \left( \frac{\partial r_i(\mathbf{x})}{\partial x_i} - 1 \right) \end{equation} where $D$ is the dimensionality of the inputs $\mathbf{x} = \left(x_1, x_2, \ldots, x_D \right)$, $\alpha$ a parameter that controls the sensitivity of the function to outliers and $\beta$ a parameter that controls the sensitivity to $\gamma(\mathbf{x})$, which is proportional to the average of the diagonal elements of the Hessian of the log-density at $x$ (from Equation \ref{eqn:hessian}). The first component of $\tilde{c}(\mathbf{x})$ identifies the extrema points of the log-density of the data (from Equation \ref{eqn:autoencoder_score}), while $\Gamma(\mathbf{x})$ is used to limit high values of the confidence scores to the maxima only (i.e., to points predicted to lie near the data manifold). A classifier can finally be modified by scaling its predicted output probabilities $y$ by $\tilde{c}(\mathbf{x})$ computed using a denoising autoencoder trained together with the classifier \begin{equation} \label{eqn:classifier_corrected_output} \tilde{\mathbf{y}} = \tilde{c}(\mathbf{x}) \mathbf{y} \end{equation} If the outputs of the classifier are normalized, for example using a softmax output, this can be interpreted as introducing an implicit `reject' option with probability $1-\tilde{c}(\mathbf{x})$. The confidence score proposed here, however, was not designed as a probability estimate. In the experiments presented here, the classifier is constructed as a fully connected softmax layer attached on top of the top hidden layer of an autoencoder with symmetric weights (i.e., attached to the output of the encoder), in order to keep the number of weights similar (minus the bias terms of the decoder) to an equivalent feed-forward benchmark model, identical except for its lack of the decoder. In general, keeping the autoencoder separate from the classifier or connecting the two in more complex ways will work, too, as well as using a classifier that is not a neural network. In case the autoencoder and the classifier are kept separate, the autoencoder is only used to infer information about the data distribution. Pairing the systems together, however, might provide advantages outside the scope of the present work, like enabling a degree of semi-supervised learning. The autoencoder may also be further improved by replacing it with the discriminator of an EBGAN \cite{ebgan} to potentially learn a better model of the data. \section{Experiments} \subsection{2D example} The model was first tested on a 2D classification task to visualize its capacity to learn the support region of the input space of each training class. Three target distributions were defined as uniform rings with thickness of $0.1$, inner radius of $0.6$ and centers $(-1, 1)$, $(1, 1)$ and $(1, -1)$. The training distributions are shown in Figure \ref{fig:2dvisualization}A. Training was performed with minibatches of size $64$ using the Adam optimizer \cite{adam_optimizer} for a total of $50000$ update steps. As shown in Figure \ref{fig:2dvisualization}B, the model learned to correctly identify the support region of the target distributions. On the contrary, the uncorrected classifier partitioned the whole space into three regions, incorrectly labeling most points (panel C). The confidence score computed by the model presented here (panel D) helps the system to prevent overgeneralization by limiting the decisions of the classifier to points likely to belong to one of the target distributions. Panel D further evidences the different contributions of the two factors used to compute the confidence score. A measure of proximity to any local extrema of the data generating distribution (top part of panel D) is modulated to remove the local minima using the $\Gamma(\mathbf{x})$ function of the local curvature of the log probability of the distribution (bottom part of panel D). It is important to observe, however, that the $\Gamma(\mathbf{x})$ function may reduce the computed confidence score of valid samples. Different types of applications may benefit from its inclusion or exclusion, depending on whether more importance is given to the correct rejection of samples that do not belong to the training distribution or to their correct classification, to the expense of a partial increase in overgeneralization. \begin{figure}[!t] \centering \includegraphics[width=0.4\linewidth]{vis_a40_784_b5_sigma02_net784_200_200_3_sym_dae_50kiters.pdf} \caption{ The system presented here is trained to classify points sampled from three uniform ring distributions. \textbf{A.} $1000$ data points are sampled from each of the target distributions. \textbf{B.} Labeling $y$ of each point in the input space without scaling of the classifier's output by the confidence score. \textbf{C.} Labeling $\tilde{\mathbf{y}}$ of each point in the input space scaled by the computed confidence score. Regions in white are assigned low confidence scores. \textbf{D.} Top: confidence score without $\Gamma(\mathbf{x})$. Bottom: estimate of the curvature of the log-distribution of the data ($\Gamma(\mathbf{x})$). The confidence score $\tilde{c}(\mathbf{x})$ is the product of the two functions. The panel in \textbf{B} is the product of the classifier's output (\textbf{C}) and the confidence score. } \label{fig:2dvisualization} \end{figure} \subsection{Fooling} \label{sec:fooling} The model presented in this paper was next tested on a benchmark of fooling on the MNIST \cite{mnist} and Fashion-MNIST \cite{fmnist} datasets similar to the one proposed in \cite{cool}. However, contrary to the previous work, the classification accuracy of the models is reported as a `thresholded classification accuracy', that is by additionally requiring test samples to be assigned a confidence score higher than the threshold used to consider a fooling instance valid. This metric should indeed be reported alongside the fooling rate for each model, as otherwise a model that trivially limits the confidence scores of a network to a fixed value lower than the threshold used to consider fooling attempts to be valid would by definition never be fooled. The same model would however never classify any valid sample above that same threshold. This metric thus proves useful to compare different models with varying degrees of sensitivity to overgeneralization. The fooling test was performed by trying to fool a target network to classify an input that is unrecognizable to humans into each target class (digits $0$ to $9$). The fooling instances were generated using a Fooling Generator Network (FGN) consisting of a single layer perceptron with sigmoid activation and an equal number of input and output units (size of $(28,28)$ here). Most importantly, the FGN produces samples with values bounded in $(0,1)$ without requiring an explicit constraint. Fooling of each target digit was attempted by performing stochastic gradient descent on the parameters of the FGN to minimize the cross-entropy between the output of the network to be fooled and the specific desired target output class. Fooling of each digit was attempted for $20$ trials with different random inputs to the FGN, each trial consisting of up to $10000$ parameter updates, as described in \cite{cool}. In the first test we compared three models, a plain Convolutional Neural Network (CNN), the same CNN with a Competitive Overcomplete Output Layer (COOL) \cite{cool}, and a network based on the system described in Section \ref{sec:methods}, built on the same CNN as the other two models with the addition of a decoder taking the activation of the top hidden layer of the CNN as input, to compute the dAE-based confidence score $\tilde{c}(\mathbf{x})$. The denoising autoencoder (dAE) was trained with corruption of the inputs by additive Gaussian noise. All the models were trained for a fixed $100$ epochs. Fooling was attempted at two different thresholds, $90\%$ and $99\%$, in contrast to the previous work that used only the $99\%$ one \cite{cool}. Comparing the models at different thresholds indeed gives more information about their robustness and may amplify their differences, thus improving the comparison. Tables \ref{table:results1_fooling_mnist} and \ref{table:results1_fooling_fmnist} reports the results for the three models, with the further splitting of the denoising autoencoder model in two separate tests, using either a separate decoder (\emph{dAE asym}) or building the decoder as a symmetric transpose of the encoder (with the addition of new bias parameters; \emph{dAE sym}). The table reports the thresholded classification accuracy for all the models together with the original, unthresholded one. Fooling was measured as the proportion of trials ($200$ total, $20$ repetitions of $10$ digits) that produced valid fooling samples within the maximum number of updates. The average number of updates required to fool each network is reported in parentheses. The full set of parameters used in the simulations is reported in Appendix \ref{appendix:foolingmnist}. The model presented here outperformed the other two at both thresholds, while also retaining a high thresholded classification accuracy, even at high thresholds. As in the previous protocol \cite{cool}, the cross-entropy loss used to optimize the FGN was computed using the unscaled output $y$ of the network. \begin{table}[!t] \renewcommand{\arraystretch}{1.5} \scriptsize \caption{MNIST fooling results} \label{table:results1_fooling_mnist} \centering \hspace{-0.1in} \begin{tabular}{|c| c c c | c c |} \hline \multicolumn{1}{|c|}{Model} & \multicolumn{3}{c|}{Accuracy} & \multicolumn{2}{c|}{Fooling Rate (Avg Steps)} \\ \multicolumn{1}{|c|}{} & \multicolumn{1}{c}{0\%} & \multicolumn{1}{c}{$90\%$} & \multicolumn{1}{c|}{$99\%$} & \multicolumn{1}{c}{$90\%$} & \multicolumn{1}{c|}{$99\%$} \\ \hline CNN & 99.35\% & 99.23\% & 99\% & 100\% (63.5) & 99\% (187.1) \\ \hline COOL & 99.33\% & 98.1\% & 93.54\% & 34.5\% (238.8) & 4.5\% (313.4) \\ \hline dAE sym & 98.98\% & 98.11\% & 96.8\% & \textbf{0\% (-)} & \textbf{0\% (-)} \\ \hline dAE asym & 99.14\% & 98.41\% & & \textbf{0\% (-)} & \\ \hline \end{tabular} \end{table} \begin{table}[t!] \renewcommand{\arraystretch}{1.5} \scriptsize \caption{Fashion-MNIST fooling results} \label{table:results1_fooling_fmnist} \centering \hspace{-0.1in} \begin{tabular}{|c| c c c | c c |} \hline \multicolumn{1}{|c|}{Model} & \multicolumn{3}{c|}{Accuracy} & \multicolumn{2}{c|}{Fooling Rate (Avg Steps)} \\ \multicolumn{1}{|c|}{} & \multicolumn{1}{c}{0\%} & \multicolumn{1}{c}{$90\%$} & \multicolumn{1}{c|}{$99\%$} & \multicolumn{1}{c}{$90\%$} & \multicolumn{1}{c|}{$99\%$} \\ \hline \hline CNN & 91.65\% & 90.91\% & 89.27\% & 100\% (113.0) & 30.5\% (902.0) \\ \hline COOL & 91.23\% & 87\% & 65.3\% & \textbf{0\% (-)} & \textbf{0\% (-)} \\ \hline dAE sym & 91.59\% & 77.8\% & 64.87\% & \textbf{0\% (-)} & \textbf{0\% (-)} \\ \hline \end{tabular} \end{table} As the difference between the autoencoders using a symmetric versus asymmetric decoder was found to be minimal on MNIST, the symmetric autoencoder was used for all the remaining experiments, so that the three models had a similar number of parameters ($1.31M$ for CNN and dAE, $1.35M$ for COOL). We further observed that the results in Table \ref{table:results1_fooling_mnist} were different from those reported in \cite{cool}. Specifically, the fooling rate of the COOL was found to be significantly lower than that reported ($47\%$), as well as the average number of updates required to fool it (more than $5000$). The major contributor to this difference was found to be the use of Rectified Linear Units (ReLUs) in the experiments reported here, compared to sigmoid units in the original study. This was shown in a separate set of simulations where all the three models used sigmoid activations instead of ReLUs and a fixed fooling threshold of $99\%$. In this case the thresholded classification accuracy of the models was slightly higher ($98.39\%$ for the plain CNN, $96.55\%$ for COOL, and $96.58\%$ for dAE), but it was matched with a significant increase in the fooling rate of the COOL model ($95.5\% (2203.9)$; plain CNN $91\% (519.2)$, dAE $0\%$). Other variations in the protocol that could further account for the differences found could be the different paradigm for training ($100$ fixed training epochs versus early stopping on a maximum of $200$ epochs) and a slightly different network architecture, that in the present work used a higher number of filters at each convolutional layer. Next, the effect of the learning rate used in the fooling update steps was investigated by increasing it from the one used in the previous study ($\eta=0.00001$) to the one used to train the models $\eta=0.001$, expecting a higher fooling rate. The threshold was set to $90\%$. Indeed, the plain CNN was found to be fooled on $100\%$ of the trials in just $2.66$ updates, while the dAE based model was still never fooled. COOL, on the other hand, significantly decreased in performance, with a fooling rate of $56.5\%$ ($878.3$ average updates). Finally, the COOL and dAE models were tested by attempting to fool their confidence scores directly, rather than their output classification scores, in contrast to \cite{cool} (i.e., using $\tilde{\mathbf{y}}$ instead of $\mathbf{y}$ for the cross-entropy loss used to update the FGN). A threshold of $99\%$ was used. Interestingly, the COOL model was never fooled, while the model described here was fooled on $1\%$ of the trials, although requiring a large number of updates ($5470.8$ on average). Also, it was found that while adding $L_2$ regularization to the weights of the dAE model led to a significantly higher fooling rate ($100\%$ rate in $6500.3$ average updates for $\lambda_{L_2}=10$), the generated samples actually resembled real digits closely, and could thus not be considered examples of fooling. This shows that the dAE model, when heavily regularized, is capable of learning a tight boundary around the high density regions of the data generating distribution, although at the cost of reducing its thresholded accuracy ($87.84\%$ for $\lambda_{L_2}=10$). The set of generated samples is shown as Supplementary Figure D for $\lambda_{L_2}=\{10,100\}$. An example of the generated fooling samples is reported in Figure \ref{fig:fooling_samples}, showing instances from the main results of table \ref{table:results1_fooling_mnist} for the plain CNN and COOL, and for the experiment with fooling the confidence scores directly for the dAE model. \begin{figure}[!t] \centering \includegraphics[width=0.6\linewidth]{main_fooling_examples.pdf} \caption{ Visualization of a set of generated fooling samples from the main results of Table \ref{table:results1_fooling_mnist}. The samples from the plain CNN and the COOL models were computed by trying to fool each system's output classification scores above a threshold of $90\%$. As fooling was unsuccessful on the dAE model in this case, the results reported here were taken from the simulations in which fooling was performed directly on the output scaled by the confidence score ($\tilde{\mathbf{y}}$). } \label{fig:fooling_samples} \end{figure} \subsection{Open Set Recognition} The three models that were tested on fooling, a plain CNN, COOL \cite{cool} and the dAE model described in this paper were next compared in the context of open set recognition. Open set recognition was tested by building a set of classification problems with varying degrees of openness based on the MNIST and Fashion-MNIST datasets. Each problem consisted in training a target model only on a limited number of `known` training classes (digits) and then testing it on the full test set of $10$ digits, requiring the model to be able to reject samples hypothesized to belong to `unknown' classes. The degree of openness of each problem was computed similarly to \cite{scheirer2013_opensetrecog_1vssetmachine}, as $$ openness = 1 - \sqrt{\frac{num\_training\_classes}{num\_total\_classes}} $$ where $num\_training\_classes$ is the number of `known' classes seen during training and $num\_total\_classes$ is $10$ for both datasets. A high value of openness reflects a larger number of unknown classes seen during testing than that of classes experienced during training. The number of training classes was varied from $1$ to $10$, reflecting the full range of degrees of openness offered by the dataset. For each fixed number of training classes used in training, the models were trained for $10$ repetitions on different random subsets of the digits, to balance between easier and harder problems depending on the specific digits used. The same subsets of digits were used for all the three models. Correct classification was computed as a correct identification of the class label and a confidence score above a classification threshold of $99\%$, while correct rejection was measured as either assigning a low classification score (below $99\%$) or classifying the input sample as any of the classes not seen during training (for simplicity, the networks used a fixed number of output units for all the problems, with the target outputs corresponding to the `unknown' classes always set to zero). The models were trained for a fixed $100$ epochs for each task. Figure \ref{fig:opensetrecognition_99pct} reports the results of the experiment. Like in the previous published benchmarks on open set recognition \cite{scheirer2013_opensetrecog_1vssetmachine, scheirer2014_wsvm, opensetdeepnetworks}, the performance of the models for each degree of openness (indexed by $i$) was computed as the F-measure, the harmonic mean of the precision and recall scores, averaged across all the repetitions for the same degree of openness. $$ F_i = 2 \times \frac{precision_i \times recall_i}{precision_i + recall_i} $$ Results from a similar experiment with a lower threshold of $90\%$ are available as Supplementary Figure F. \begin{figure}[!t] \centering \includegraphics[width=0.8\linewidth]{combined_osr.pdf} \caption{ Comparison of the three models on a benchmark of open set recognition. The F-measure was computed for each model on problems created from the MNIST (left) and Fashion-MNIST (right) datasets by only using a limited number of `known' classes during training while testing on the full test set (e.g., training on classes $0$ and $3$ but testing on all classes $[0,9]$), requiring the models to be able to reject samples belonging to `unknown' classes. Higher values for the openness of a problem reflect a smaller number of classes used during training. The curves are averaged across $10$ runs using different sub-sets of digits. Error bars denote standard deviation. } \label{fig:opensetrecognition_99pct} \end{figure} \subsection{1-Class Recognition} \label{sec:oneclassrecog} The limit of open set recognition in which a single training class is observed during training, that is the problem of 1-class recognition, was next explored, comparing the model presented in this paper with COOL \cite{cool} and 1-Class SVM \cite{oneclasssvm}. A separate 1-class recognition problem was created from the MNIST and Fashion-MNIST datasets for each target class. For each problem the models were trained using only samples from the selected class, while they were tested on the full test set of $10$ digits. No negative samples were used during training. Each model was trained for $100$ epochs on each problem. Figure \ref{fig:oneclass_recog_results} shows the results as a ROC curve averaged over the curves computed for each of the $10$ 1-class recognition problems. The dAE based model outperforms the other two, with an Area Under the Curve ($AUC$) of $0.964$, compared to $AUC=0.952$ of 1-Class SVM and $AUC=0.753$ of COOL. \begin{figure}[!t] \centering \includegraphics[width=0.8\linewidth]{combined_out_oneclassrecognition_dae_vs_ocsvm.pdf} \caption{ ROC curves averaged over $10$ 1-class recognition problems, one for each class in MNIST (left) and Fashion-MNIST (right), for three models, the dAE model described in this paper, 1-Class SVM \cite{oneclasssvm} and COOL \cite{cool}. } \label{fig:oneclass_recog_results} \end{figure} \section{Discussion} The confidence score that was introduced in this paper was found to perform better than a set of competing models in open set recognition and 1-class recognition. The system was also found to be significantly more robust to the problem of fooling than the state of the art COOL model. Together, these results show that it is possible to use information about the data generating distribution implicitly learnt by denoising autoencoders in meaningful ways, even without explicitly modeling the full distribution. It is to be noted that when comparing the results to the COOL model we used the same degree of overcompleteness ($\omega=10$) as in the original paper. However, fine tuning of the parameter and in particular using higher values may achieve higher performance on the benchmarking tasks used here. Also, similarly to the original COOL paper, fooling was attempted on the output of the classifier, rather than directly on the confidence scores. This gives an advantage to systems in which the confidence score is computed in more complex ways, not directly dependendent on the output of the classifier. However, further tests as presented in Section \ref{sec:fooling} showed that the system presented here significantly outperforms the other models even when fooling is attempted directly on the confidence scores. In this particular case, it was further found that training the denoising autoencoder with heavy regularization resulted in generated samples resembling real digits, thus showing that the model had learnt a tight boundary around the data manifold. It is interesting that the Energy-Based GAN (EBGAN) \cite{ebgan} makes use of the reconstruction error of a denoising autoencoder in a way compatible with the interpretation proposed here. In particular, it uses it as an approximated energy function that is learnt by the autoencoder to take low values for points belonging to the training distribution and high values everywhere else. As we have seen in Equation \ref{eqn:autoencoder_score}, it has been shown that the reconstruction error of denoising autoencoders is proportional to the gradient of the log-density of the data. Thus, small absolute values of the reconstruction error correspond to extrema points of the distribution, not limited to local maxima but also including minima and saddle points. If Figure \ref{fig:2dvisualization} were a good example of the dynamics of the system even on more complex data, then the problem of local minima and saddle points may be limited. However, if that was not the case, then EBGAN might learn to generate samples from regions of local minima of the data distribution, which may not be desirable. It would be interesting to modify the system using the $\Gamma(\mathbf{x})$ function described here (Equation \ref{eqn:curvature_function}) in order to correctly isolate only the local maxima of the distribution. It would also be interesting to apply the regularization function used in EBGAN to the present model, adding a Pulling-away Term (PT) that forces learnt representations to be maximally different for different data points, by attempting to orthogonalize each pair of samples in a minibatch \cite{ebgan}. The stronger regularization may help the denoising autoencoder to learn a better representation of the data manifold, thus improving the confidence score $\tilde{c}(\mathbf{x})$. Further improvements in the performance of the system may be achieved by separating the classifier and the denoising autoencoder, although combining the two may have other advantages, like adding a degree of semi-supervised learning or regularization of the autoencoder. It may also be possible to train an autoencoder to reconstruct hidden representations produced by pre-trained models, thus relying on more stable feature vectors rather than high-dimensional raw inputs. \section{Conclusion} This paper presented a novel approach to address the problem of overgeneralization in neural networks by pairing a classifier with a denoising or contractive autoencoder that is used to compute a confidence score that assigns high values only for input vectors likely to belong to the training data distribution. In particular, recognition of an input as belonging to the distribution is performed by using an approximation of the gradient of the log-density and its curvature at the specific input point, and using this information to determine whether it lies close to a local maximum of the distribution. We have further explored the application of the system in the context of open set recognition. In general, the model presented here could be used in more complex architectures to allow for incremental and continual learning, by learning to recognize the regions of input space that have already been explored and learnt and potentially provide for different training regimes in the unexplored parts, in case new samples from those regions were to be observed in the future. For example, it may be applied to a system to allow for adding novel target classes even after deployment, without requiring a full re-training that may be costly in terms of compute time required, especially for large models. Similar to open set recognition is also 1-class recognition, that has proven to be a challenging problem. Building systems capable of robust 1-class recognition has critical applications in the detection of novelty, outliers and anomalies. In conclusion, developing discriminative models capable of capturing aspects of the data distribution, even without explicitly modeling it, can prove very useful in a large number of practical applications, and future work on the topic will be highly beneficial. Here a system was presented to address the problem and was shown to perform better than other previously proposed systems on a set of benchmarks. \section*{APPENDIX} \section*{Details of the simulations} \renewcommand{\thesubsection}{\alph{subsection}} The models were trained on a cross-entropy loss by Stochastic Gradient Descent using the ADAM algorithm \cite{adam_optimizer} with $\eta=0.001$, $\beta_1=0.9$ and $\beta_2=0.999$. Tensorflow \cite{tensorflow} was used for the experiments. \subsection{2D example} The dAE model used parameters $\alpha=40$, $\beta=5$ and $\sigma=0.2$, and a symmetric denoising autoencoder with inputs of size $2$ and two hidden layers both of size $200$. The classifier was a fully-connected layer attached to the top hidden layer of the autoencoder and had $3$ output units. Training was performed for $50000$ steps with minibatches of size $64$. The three target distributions were defined as uniform rings with thickness of $0.1$ and inner radius of $0.6$, centered at the three points $(-1, 1)$, $(1, 1)$ and $(1, -1)$. \subsection{Fooling} \label{appendix:foolingmnist} The models compared are a regular CNN, the same CNN with the output layer replaced with a COOL layer (degree of overcompleteness $\omega=10$, as in \cite{cool}), and the same CNN with the addition of a decoder connected to the top hidden layer of the CNN, to complete the denoising autoencoder used to compute the confidence score $\tilde{c}(\mathbf{x})$. The architecture of the CNN is $\{Conv2D(1 \rightarrow 32, 5 \times 5),~ MaxPool(2 \times 2),~ Conv2D(32 \rightarrow 64, 5 \times 5),~ MaxPool(2 \times 2),~ FullyConnected(64 \rightarrow 400),~ FullyConnected(400 \rightarrow 10)\}$. Each layer is followed by a ReLU non-linearity, except for the output layer that is followed by a softmax. Fooling was attempted for $20$ times for each digit, each for up to $10000$ update steps with a learning rate for updating the FGN set to $\eta=0.00001$ as in \cite{cool}. Training was performed for $100$ epochs for each model. The dAE model was trained with additive Gaussian noise with zero mean and $\sigma=0.2$ for MNIST, $\sigma=0.1$ for Fashion-MNIST, and parameters $\beta=10$ and $\alpha$ variable depending on the threshold used ($\alpha=20$ for the $90\%$ classification threshold, $\alpha=3$ for the $99\%$ threshold on MNIST, and $\alpha=2$ for the $99\%$ threshold on Fashion-MNIST). All models trained on the Fashion-MNIST dataset used $L_2$ regularization with $\lambda_{L_2}=10$ (i.e., CNN, COOL and dAE). \subsection{Open Set Recognition} The Open Set Recognition tests used the same models as for the MNIST fooling ones, with a single threshold of $99\%$. \subsection{1-Class Recognition} The COOL and dAE models used the same parameters as the other experiments, except for the MNIST experiments in which $L_2$ regularization of the weights was used ($\lambda_{L_2}=10$) for the dAE model, as well as $\sigma=0.3$. 1-Class SVM was trained using the scikit-learn library \cite{scikit-learn}, and used $\nu=0.1$ and an RBF kernel ($\gamma=0.1$). \bibliographystyle{unsrt
1,108,101,565,897
arxiv
\section{Introduction} Complex completely integrable Hamiltonian systems can be typically constructed starting from a locus $\mathcal{M}$ in the moduli space $\mathrm{Bun}_G(\Sigma)$ of holomorphic $G$-bundles or sheaves of certain type on a complex holomorphic symplectic surface $\Sigma$ with a structure of Lagrangian elliptic fibration $\Sigma \to X$, where the fibers $\Sigma_x$ are possibly degenerate elliptic curves, and $X$ is an algebraic curve typically called the base curve, see for example Section~0.3.6 in~\cite{Donagi:2000dr} and Section~3.8.2 in Donagi's lectures in~\cite{mason2003geometry} and~\cite{MR1397059,Donagi:1997dp,MR2042693,Donagi:2000dr} for more complete details. Indeed, the symplectic structure on $\Sigma$ induces the symplectic structure on the space $\mathcal{M}$ which becomes the phase space of integrable system, the structure of Lagrangian fibration $\Sigma \to X$ induces the structure of Lagrangian fibration on $\mathcal{M}$, and the fact that the fibers $\Sigma_x$ are abelian varieties (possibly degenerate elliptic curves) induces the structure of abelian varieties on the Lagrangian fibers in~$\mathcal{M}$. There are three cases to consider depending on whether the elliptic fibers are generically cusped elliptic, nodal elliptic or smooth elliptic. \looseness=-1 (1) Fibers are cusped elliptic. If $X$ is an algebraic curve, and $\Sigma \to X$ is a cotangent bundle whose fibers are compactified to cusped elliptic curves, this construction produces algebraic integrable system called Hitchin system on the curve $X$ \cite{Donagi1995,hitchin1987,MR1300764}. Hitchin system is an example of an abstract Higgs bundle on~$X$ valued in an abelian group $K$ over $X$ for the case when the group~$K$ is the canonical line bundle on $X$ endowed with natural linear additive group structure in the fiber direction. The Higgs field $\phi(x)$ is a holomorphic 1-form valued in the Lie algebra adjoint bundle $\operatorname{ad} \mathfrak{g}$. The respective integrable system is of additive type in the fiber direction. (2) Fibers are nodal elliptic. If $\Sigma \to X$ is a fibration whose fibers are nodal elliptic curves, then $\mathrm{Bun}_{G}(\Sigma)$ is equivalently described as a moduli space of multiplicative Higgs bundles $\mathrm{mHiggs}_G(X)$, that is moduli space of pairs $(P, g)$ where $P$ is a principal $G$-bundle on $X$, and Higgs field $g (x)$ is a section of Lie group adjoint bundle $\operatorname{ad} G$. The respective integrable system is of multiplicative type in vertical direction. In Donagi's lectures in \cite[Section 3.9]{MR2042693} one finds a remark on three types of integrable system in the fiber direction corresponding to the three types of connected 1-dimensional complex groups: an elliptic curve, the multiplicative group $G_m = \mathbb{C}^{\times}$ and the additive group $G_a = \mathbb{C}$, and that the latter two can be considered as groups of non-singular points in the elliptic case in the nodal and cuspidal limit respectively. He goes on to clarify that Hitchin systems are associated to the cuspidal type and principal bundles on smooth elliptic fibrations to smooth elliptic type, and then asks ``Is there an interesting geometric interpretation of the remaining ``trigonometric'' case, where the values are taken in multiplicative group $G_{m}$?'' We believe so and we refer to several geometrical perspectives on the multiplicative case further in the introduction. For the basic definitions see \cite{Cherkis:2001gm,Elliott2018,Frenkel2011,MR1974589,Nekrasov:2012xe}. (3) Fibers are smooth elliptic. If $\Sigma \to X$ is an elliptically fibered complex surface with generically smooth fibers, the corresponding case was studied in \cite{Donagi:1997dp,Friedman:1997yq}. Using Loojienga description of moduli space of $G$-bundles on a smooth elliptic fiber as a space conjugacy classes in the affine Kac--Moody Lie group $\hat G$ \cite{Loojienga:1976}, we can also interpret $\mathrm{Bun}_{G}(\Sigma)$ as a moduli space $\mathrm{mHiggs}_{\hat G}(X)$ of multiplicative Higgs bundles for the affine Kac--Moody group $\hat G$. The respective integrable system is of elliptic type in vertical direction. The case (1) of additive Higgs bundles (Hitchin systems) received large amount of attention in the mathematical literature in the context of geometrical Langlands correspondence and in the physical literature in the context of $6d$ $(2,0)$ superconformal self-dual tensor theory compactified on algebraic complex curve $X$ for $G$ of ADE type \cite{Alday:2009aq,Beilinson,Kapustin:2006pk,Nekrasov:2010ka}. Quantization of additive Higgs bundles on the curve $X$ relates to the theory of Kac--Moody current algebras on $X$, conformal blocks of $W$-algebra on $X$ with punctures, D-modules on $\mathrm{Bun}_{G}(X)$, and monodromy problems for various related differential equations. The case (2) of multiplicative Higgs bundles on a complex curve $X$ appeared first in the context of current Poisson--Lie groups $G(x)$ with spectral parameter $x \in X$. A~Poisson--Lie group is a Lie group equipped with Poisson structure compatible with the group multiplication law. There is a standard way to equip $G(x)$ with Poisson structure called quadratic Skylanin bracket given a holomorphic no-where vanishing differential 1-form on $X$ (possibly with poles). Quantization of this Poisson structure leads to the theory of quantum groups \cite{Drinfeld:1986,MR1062425} which have been discovered in the context of the inverse scattering method, quantum integrable spin chains, Yang--Baxter equation and $R$-matrix with spectral parameter. The standard horizontal trichotomy of the rational, trigonometric or elliptic $R$-matrix corresponds to taking the base $X$ to be the $\mathbb{P}^{1}$ with 1-form with a single quadratic pole (rational type), the $\mathbb{P}^{1}$ with 1-form with two simple poles (trigonometric type), or smooth elliptic curve (elliptic type). For the smooth elliptic base curve $X$ the multiplicative Higgs bundle was studied in \cite{MR1974589}, following \cite{MR1346215, MR1334607}. Independently, the definition of multiplicative Higgs bundles was given in~\cite{Frenkel2011} where they were called $G$-pairs. On another hand, multiplicative Higgs bundles on $X$ have been studied as periodic monopoles on real three-dimensional Riemannian manifold $X \times S^1$ via the monodromy map \cite{Charbonneau2008,Cherkis:2000cj,Cherkis:2001gm,Cherkis:2000ft,Gorsky:1997jq,Gorsky:1997mw,Nekrasov:2012xe}. The relation between quantization of the moduli space of monopoles on $\mathbb{R}^3$ and Yangian has been proposed in \cite{Gerasimov:2005qz} and further work in this direction has been in \cite{MR3248988}. Recently a quantization of the holomorphic symplectic phase space of the moduli space of monopoles on $X \times S^1$ by a formal semi-holomorphic Chern--Simons functional on $X \times S^1 \times \mathbb{R}_t$, where $\mathbb{R}_{t}$ is the time direction, has been studied in \cite{Costello:2013zra, Costello:2017dso}. For simple Lie groups $G$ of the ADE type these moduli spaces appear as Coulomb branches of the moduli space of vacua of the $\mathcal{N}=2$ supersymmetric ADE quiver gauge theory on~\mbox{$\mathbb{R}^3 \times S^1$} \cite{Braverman:2016pwk,Braverman:2016wma,Nakajima:2015txa, Nekrasov:2012xe}. Some constructions from the world of additive Higgs bundles have their versions in the world of multiplicative Higgs bundles \cite{Elliott2018} leading to difference equations and their monodromy problems~\cite{birkhoff1913generalized, sauloy2004isomonodromy}, $q$-geometric Langlands correspondence \cite{Aganagic:2017smx}, $q{-}W$ algebras \cite{Kimura:2015rgi, Nekrasov:2015wsu,Nekrasov:2013xda}. \looseness=-1 The goal of this paper is to present very concretely a Darboux coordinate system on a moduli space ${\rm GL}_{r}$ multiplicative Higgs bundles of degree 1 on the rational base $X = \mathbb{P}^1_x$. The base curve~$X$ is equipped with a holomorphic one-form ${\rm d} x$ that has the quadratic pole at $x_\infty = \infty$. The holomorphic one-form ${\rm d}x$ together with the Killing form on the Lie algebra induces the quadratic Sklyanin Poisson structure with the classical $\mathfrak{r}$-matrix of rational type in the spectral parameter~$x$. Equivalently, we are studying degree $1$ symplectic leaves in the rational Poisson--Lie group ${\rm GL}_{r}(\mathcal{K}_{\mathbb{P}^1})$, where $\mathcal{K}_{\mathbb{P}^1}$ denotes the field of rational functions on $\mathbb{P}^1$, and degree $1$ means that all matrix elements $(L_{ij}(x))_{1 \leq i,j \leq r}$ of the multiplicative Higgs field $g(x)$ in the defining representation of ${\rm GL}_{r}$ by $r \times r$ matrices~$L_{ij}(x)$ are degree~1 polynomials of~$x$, i.e., linear functions of~$x$. \looseness=-1 By concrete presentation we mean introduction of explicit Darboux coordinates (canonically conjugated set of $(\underline p, \underline q) = \big(p_I, q^I\big)$ variables with $\big\{p_I, q^{J}\big\} = \delta_I^J$) and presentation of explicit formulae for the matrix elements $L_{ij}(x)$ in terms of $\big(p_I, q^J\big)$. The complete set of commuting Hamiltonian functions is obtained from the coefficients of the spectral determinant of $L(x)$. The matrix $L(x)$ valued in functions on the phase space is called Lax matrix and its matrix elements satisfy quadratic Sklyanin Poisson brackets, see in particular \cite{babelon:hal-00101459, FaddeevBook} but also the recent lecture notes~\cite{Torrielli:2016ufi}. \looseness=-1 The quadratic Sklyanin Poisson brackets can be also defined as semi-classical limit of the quantum Yang--Baxter equation \cite{Sklyanin:121210,Sklyanin:1982tf}. In this paper we find all rational solutions of degree~$1$ in the spectral parameter $x$ associated to the classical Yang--Baxter equation defined by the rational $\mathfrak{gl}(r)$-invariant $\mathfrak{r}$-matrix, cf.~\cite{Belavin1982}. In another note we plan to consider the trigonometric case associated to the base curve being a punctured nodal elliptic curve $X =\mathbb{C}^{\times}_x$ equipped with the holomorphic one-form $\frac{{\rm d}x}{x}$ that has simple pole at $x = 0$ and $x = \infty$, a related work appears in~\cite{2017arXiv170801795F}. The case of $G = {\rm GL}_2$ is well studied. Here the Sklyanin relation admits three different elementary types of non-trivial solutions with matrix elements linear in the spectral parameter~$x$ that yield integrable models. These solutions are called the $2 \times 2$ elementary Lax matrices for the Heisenberg magnet, the DST chain and the Toda chain. For an overview we refer the reader to lecture notes of Sklyanin~\cite{sklyanin00}. For higher rank $r$, to the best knowledge of the authors the explicit presentation of all linear solutions is missing in the literature. The case regular at the infinity $x_\infty \in X$ has been described in \cite{ShapiroThesis} and many other places. Some partial cases of Toda like solutions for irregular case have been described in \cite{Gorsky:1997jq,Gorsky:1997mw,Meneghelli:thesis}. The classifying labels appeared in \cite{Haouzi:2016ohr}. In the quantum case some solutions to the Yang--Baxter equation were studied in connection to non-compact spin chains and Baxter $Q$-operators, in particular for the case of $\mathfrak{gl}_r$ we refer the reader to \cite{Bazhanov:2010jq, Derkachov:2006fw}. The solutions relevant for non-compact spin chains can be obtained by realising the quantum $R$-matrix in terms of an infinite-dimensional oscillators algebra which is also known as free-field realisation, see, e.g.,~\cite{DiFrancesco:1997nk}. The Lax matrices relevant for $Q$-operators are certain degenerate solutions in the sense that the term proportional to the spectral parameter is not the identity matrix but a matrix of lower rank. These Lax matrices can be obtained from the non-degenerate case through a limiting procedure as discussed in \cite{Bazhanov:2008yc} for $\mathfrak{gl}(2|1)$, \cite{Gorsky:1997jq,Gorsky:1997mw} for $\mathfrak{gl}_3$ or directly from the universal $R$-matrix as shown in~\cite{Boos:2010ss} for~$\mathfrak{gl}(3)$. Vice versa to the limiting procedure and as discussed in \cite{Bazhanov:2010jq}, one can also obtain the Lax matrices of non-compact spin chains by fusing the degenerate solutions relevant for $Q$-operators. Here we follow the strategy of fusion in order to construct a family of ${\rm GL}_r$ Lax matrices $L(x)$ whose matrix elements are linear in spectral parameter~$x$. The discrete data of labels in our family is specified by two partitions $\lambda$ and $\mu$ such that the total size is $|\lambda| + |\mu| = r$ and whose columns $\lambda_i^{t}$, $\mu_i^{t}$ are restricted by $r$. In addition to the discrete partition labels $(\lambda, \mu)$ we have a sequence of complex labels. There is a complex parameter $x_i$ assigned to each column $\lambda_i^{t}$ of the partition $\lambda$. Geometrically speaking, each pair $\big(\lambda_i^{t}, x_i\big)$ describes a type of singularity of the multiplicative Higgs field $g(x)$ at finite point $x_i \in \mathbb{C}= \mathbb{P}^1 \setminus \{x_\infty\}$ given by the conjugacy class of $ (x - x_i)^{\check \omega_{\lambda_i^{t}}}$ where $\check \omega_k$ denotes $k$-th fundamental co-weight of ${\rm GL}_{r}$: that is the highest weight of the $k$-th antisymmetric power of the fundamental representation for the Langlands dual group ${\rm GL}_r$. Such highest weight is encoded by the column of height $\lambda_i^{t}$ in the partition~$\lambda$. Equivalently, in the neighborhood of the point $x_i$ in the spectrum of the $r \times r$ Lax matrix $L(x)$ there are exactly $\lambda_i^{t}$ eigenvalues which vanish linearly as~$x$ approaches~$x_i$, and the remaining $r - \lambda_i^{t}$ eigenvalues are regular non-zero at~$x_i$. The partition $\mu$ specifies a dominant co-weight of singularity of the multiplicative Higgs field at the infinity point $x_{\infty} \in \mathbb{P}^1$, or equivalently the asymptotics of the eigenvalues of the Lax matrix $L(x)$ as $x \to \infty$: given $r$ rows $(\mu_j)_{j \in [1, \dots, r]}$ of the partition $\mu$, the $j$-th eigenvalue of the Lax matrix $L(x)$ has asymptotics $(x^{-1})^{\mu_j-1}$ as $x \to \infty$. We remark that the restriction on the total size of two partitions $|\mu| + \sum_{i} \lambda_i^{t} =r $ is a~consequence of the restriction of the present paper to consider only Lax matrices whose matrix elements are linear functions of~$x$. In the complete classification, if we allow higher degree of $x$ in the matrix elements, which is not in the scope of the present paper, the label of a singularity at any finite point $x_i$ is an arbitrary dominant ${\rm GL}_{r}$ co-weight described by an arbitrary partition~$\lambda_{i}$, so that if rows of partition $\lambda_i$ are denoted by $(\lambda_{ij})_{j \in [1, \dots, r]}$ then $j$-th eigenvalue of the Lax matrix $L(x)$ behaves as $ (x - x_i)^{\lambda_{ij}}$ as $ x \to x_i$. We leave for another note the presentation of explicit formulae for complete classification of the symplectic leaves of the degree $d$ whose matrix elements are degree $d$ polynomials of~$x$ for $|\mu| + \sum_{i} \lambda_i^{t} = d r$. (By looking at the determinant of $g(x)$ we see that the moduli space is non-empty only if the total size $|\mu| + \sum_{i} \lambda_i^{t}$ is integral multiple of rank~$r$, cf., e.g.,~\cite{MR1974589,Nekrasov:2012xe}. This condition means that the total dominant co-weight summed over all singularities $\check \omega_{\rm tot}$ belongs to the lattice of co-roots.\footnote{In the monopole picture, the topological degree of gauge bundle induced on a surface enclosing all singularities is trivial. The topological degree is an element in $\pi_1(G) \simeq \check \Lambda / \check Q$, where $\check \Lambda$ and $\check Q$ denote the lattice of co-weights and co-roots.} To summarize, near every singularity on $\mathbb{CP}^{1}$ in a local coordinate $w$ such that $w =0$ is a position of singularity, we have asymptotics $[g(w)] \sim w^{\omega^{\vee}}$ where $\omega^{\vee}\colon \mathbb{C} \to T_{G}$ is a co-weight (either $\lambda_i^{t}$ or $\mu$) that characterizes the singularity. Normally, because the total degree ($U(1)$-charge) vanishes, the sum of degrees of all co-weights $\omega$ must vanish. We have chosen to shift the notational representation of the singularity co-weight at infinity by adding $1$ to each row of the co-weight $\omega_{\infty}^{\vee}$ so that is described by a positive partition $\mu$. In consequence, the sum over all partitions $\lambda_i$'s and $\mu$ is $r$ is no longer zero but $r$, since there are $r$ rows in $\omega_{\infty}^{\vee}$, and each has been increased by $1$ in our notations: $\mu_j = \omega_{\infty,j} + 1$ for each row $j =1,\dots, r$. In our solutions we can obtain higher (non-fundamental co-weight) singularities at finite point $x_{*}$ by collision of several fundamental singularities at $x_{i_1}, x_{i_2}, \dots , x_{i_k}$ which are associated to some columns $\lambda_{i_1}^{t}, \dots, \lambda_{i_k}^{t}$ of the partition $\lambda$ by sending all of them to the common point $ x_{*}$. In this case, generically, the multiplicative Higgs field $g(x)$ develops the singularity at point $x_{*}$ specified by a higher (non-fundamental) co-weight $\sum\limits_{j=1}^{k} \check \omega_{\lambda_{i_j}^{t}}$. \looseness=-1 As we will see, all Lax matrices regular at the infinity $x_\infty$, that is $\mu=\varnothing$ in the current notations, and arbitrary $\lambda$ can be obtained by the fusion procedure of the elementary Lax matrices used in the $Q$-operator construction~\cite{Bazhanov:2010jq}. Also the case regular at infinity has been described in~\cite{ShapiroThesis}, where it was shown that degree~1 rational symplectic leaves for $G = {\rm GL}_{r}$ correspond to the co-adjoint orbits in the dual Lie algebra $\mathfrak{gl}_r^{*}$. The parametrization by Darboux coordinates of the holomorpic symplectic co-adjoint orbits in $\mathfrak{gl}_{r}^{*}$ identical to the present paper has been proposed in \cite{babich2016birational}. Then we proceed to build Lax matrices irregular at infinity from the fusion of a certain set of elementary Lax matrices whose irregularity at infinity is of the simplest type. Let us clarify the geometrical meaning of fusion. A Lax matrix $L_{\underline{\lambda}, \underline{x}, \mu} (x; \underline{p}, \underline{q})$ with a certain prescribed type of singularities at $\underline{x}, x_\infty$ parametrizes by a system of Darboux coordinates $(\underline{p}, \underline{q})$ a finite-dimensional symplectic leaf in the infinite-dimensional Poisson--Lie group $\mathcal{G} = {\rm GL}_{r}(\mathcal{K}_{\mathbb{P}^1})$ where $\mathcal{K}_{\mathbb{P}^1}$ denotes the field of rational functions on $\mathbb{P}^1$. More geometrically, a Lax matrix $L(x, \underline{p}, \underline{q})$ is a universal group valued (multiplicative) Higgs field on a Darboux chart in the second factor of $\mathbb{C}_{x} \times \mathcal{M}_{_{\underline{\lambda}, \underline{x}, \mu} }$ represented in $r \times r$ matrices, where $\mathcal{M}_{{\underline{\lambda}, \underline{x}, \mu} }$ is a moduli space of multiplicative Higgs fields of a certain type $({\underline{\lambda}, \underline{x}, \mu})$, and complex spectral plane $\mathbb{C}_{x}$ is the domain of the Higgs field $g(x)$. So for us a Lax matrix $L_{{\underline{\lambda}, \underline{x}, \mu}}$ is a composition of Darboux chart parametrization \begin{gather*} \mathbb{C}^{2 d_{\underline{\lambda}, \mu}} \to \mathcal{M}_{{\underline{\lambda}, \underline{x}, \mu}} \end{gather*} with a universal Higgs field map \begin{gather*} \mathbb{C}_{x} \times \mathcal{M}_{\underline{\lambda}, \underline{x}, \mu} \to \mathrm{Mat}_{r \times r}. \end{gather*} Suppose we are given a symplectic leaf $\mathcal{M}_{\underline{\lambda}, \underline{x}, \mu} \subset \mathcal{G}$ described by a Lax matrix $L_{\underline{\lambda}, \underline{x}, \mu} (x; \underline{p}, \underline{q})$ and a symplectic leaf $\mathcal{M}'_{\underline{\lambda}',\underline{x}', \mu'} \subset \mathcal{G}$ described by a Lax matrix $L_{\underline{\lambda}', \underline{x}', \mu'} (x; \underline{p'}, \underline{q'})$. By definition of Poisson--Lie group structure on $\mathcal{G}$ the group multiplication map \begin{gather}\label{eq:groupm} m\colon \ \mathcal{G} \times \mathcal{G} \to \mathcal{G} \end{gather} is a Poisson map, i.e., the pushforward of the product Poisson structure on $\mathcal{G} \times \mathcal{G}$ coincides with the Poisson structure on $\mathcal{G}$. The symplectic leaves $\mathcal{M}$, $\mathcal{M}'$ are, in particular, co-isotropic submanifolds of $\mathcal{G}$, hence $ \mathcal{M} \times \mathcal{M}'$ is a co-isotropic submanifold of $\mathcal{G} \times \mathcal{G}$. Now, since the group multiplication map~$m$ in~\eqref{eq:groupm} is a Poisson map, and since the Poisson map preserves the co-isotropic property of the submanifolds, the image $m(\mathcal{M} \times \mathcal{M}') \subset \mathcal{G}$ is a co-isotropic subspace. The $\mathcal{G}$-elements in the co-isotropic subspace $m(\mathcal{M} \times \mathcal{M}') \subset \mathcal{G}$ are represented by Lax matrices \begin{gather}\label{eq:fusion1} L_{\underline{\lambda}, \underline{x}, \mu} (x; \underline{p}, \underline{q}) L_{\underline{\lambda}', \underline{x}', \mu'} (x; \underline{p'}, \underline{q'}) \end{gather} and their type of singularities is typically a combination of the types of singularities of $(\underline \lambda, \underline x, \mu)$ and $(\underline{\lambda}', \underline x', \mu')$. However, $m(\mathcal{M} \times \mathcal{M}') \subset \mathcal{G}$ is not in general a symplectic leaf but a co-isotropic submanifold, and we can further slice it into symplectic leaves by determining the set of Casimir functions~$\tilde q'$ on $m(\mathcal{M} \times \mathcal{M}')$ and a set of new conjugated coordinates $\underline{\tilde p}$, $\underline{ \tilde q}$. We find that \begin{gather*} L_{\underline{\lambda}, \underline{x}, \mu} (x; \underline{p}, \underline{q}) L_{\underline{\lambda}', \underline{x}', \mu'} (x; \underline{p'}, \underline{q'}) = \tilde C(\underline{\tilde q}') \tilde{L}_{{\underline{\tilde{\lambda}}, \underline{\tilde{x}}, \tilde{\mu}}} (x; \underline{\tilde p}, \underline{\tilde q}) \end{gather*} with the canonical transformation \begin{gather*} {\rm d}\underline{p} \wedge {\rm d}\underline{q} + {\rm d}\underline{p}' \wedge {\rm d}\underline{q}' = {\rm d} \tilde {\underline{p}} \wedge {\rm d} \tilde {\underline{q}} + {\rm d} \tilde {\underline{p}} ' \wedge {\rm d} \tilde {\underline{q}'} . \end{gather*} Notice that the conjugate variables $\tilde p'$ to the Casimir functions $\tilde q'$ on $\tilde S$ do not appear on the right side of (\ref{eq:fusion1}). The Lax matrices $ \tilde{L}_{{\underline{\tilde{\lambda}}, \underline{\tilde{x}}, \tilde{\mu}}} (x; \underline{\tilde p}, \underline{\tilde q})$ represent elements of $\mathcal{G}$ in a new symplectic leaf $\mathcal{M}_{{\underline{\tilde{\lambda}}, \underline{\tilde{x}}, \tilde{\mu}}}$ covered by Darboux coordinates $\underline{\tilde p}$, $\underline{\tilde q}$. The symplectic leaves $\mathcal{M}_{\underline{\lambda}, \underline{x}, \mu} $ arise as moduli spaces of multiplicative Higgs bundles of certain type~\cite{Elliott2018}, and like additive Higgs bundles (Hitchin system), the symplectic leaves $\mathcal{M}_{\underline{\lambda}, \underline{x}, \mu} $ support the structure of an algebraic completely integrable system. In fact, the moduli spaces $\mathcal{M}_{\underline{\lambda}, \underline{x}, \mu} $ can be also interpreted as moduli spaces of $U(r)$ monopoles on 3-dimensional Riemannian space $\mathbb{R}^2 \times S^1$ where $\mathbb{R}^2 \simeq \mathbb{C} = \mathbb{P}^1 \setminus \{x_\infty\}$, and consequently \cite{Cherkis:2000cj,Cherkis:2001gm,Cherkis:2000ft,Nekrasov:2012xe} as moduli spaces of vacua of certain $\mathcal{N}=2$ supersymmetric quiver gauge theories on~$\mathbb{R}^{3} \times S^1$ of quiver type $A_{r-1}$. The complex parameters $x_i \in \mathbb{C}$ which specify the position of singularities of the Lax matrix~$ L_{\underline{\lambda}, \underline{x}, \mu} $ play the role of the masses of the fundamental multiplets attached to the quiver node~$\lambda_{i}^{t}$ in the $A_{r-1}$ quiver diagram (i.e., the node associated to a simple root dual to the fundamental co-weight $\lambda_{i}^{t}$), and at the same time they play the role of the complex part of the coordinates of the positions of the Dirac singularities of the $U(r)$ monopoles on $\mathbb{R}^2 \times S^1$ under identification $\mathbb{R}^2 \simeq \mathbb{C}$. For polynomial Lax matrices that we consider in this paper the eigenvalues of $L(x)$ at the singularities $\underline{x}$ can have only zeros and no poles, thus the corresponding periodic monopoles can have only negatively charged Dirac singularities. If the partition $\mu$ is empty, then the corresponding $A_{r-1}$ quiver gauge theory is $\mathcal{N}=2$ superconformal theory, and corresponding monopoles on $\mathbb{R}^2 \times S^1$ are regular at infinity. Non-empty partition $\mu$ corresponds to monopoles on $\mathbb{R}^2 \times S^1$ with non-trivial growth (or charge) at infinity controlled by~$\mu$, or to the Coulomb branches of asymptotically-free quiver gauge theories with $\beta$-function controlled by~$\mu$. Consequently, the integrable system supported on a symplectic leaf $\mathcal{M}_{\underline{\lambda}, \underline{x}, \mu} $ is identical to Seiberg--Witten integrable system for a certain $A_{r-1}$ quiver gauge theory. The complete set of commuting Hamiltonians functions $H_{ij}$ can be extracted from the spectral determinant of the associated Lax matrix \begin{gather}\label{eq:spectral1} \det \big( y - g_{\infty} L_{\underline{\lambda}, \underline{x}, \mu} (x; \underline{p}, \underline{q})\big) = \sum_{i,j} H_{ij} x^i y^j \end{gather} by taking coefficients at the monomials $x^i y^j$ where the appearing pairs of indices $(i,j)$ can be described by certain profiles like Newton diagrams. The spectral curves~(\ref{eq:spectral1}) coincide with the spectral curves of the integrable systems studied in~\cite{Nekrasov:2012xe,Nekrasov:2013xda}. Equivalently, since the determinant can be expanded in terms of the characters $\tr_{R_k}$ of the $k$-th external powers of the fundamental representation, the commuting Hamiltonians are expressed as coefficients at powers of $x$ in the characters~$ \tr_{R^k} L(x)$. We remark that by switching the role of variables $x \in \mathbb{C}$ and $y \in \mathbb{C}^{\times}$ (fiber-base duality) the spectral curve (\ref{eq:spectral1}) of multiplicative Higgs bundle on $X$ can be also interpreted as the spectral curve of additive Higgs bundle (Hitchin system) on $Y = \mathbb{C}^{\times} = \mathbb{P}^{1}_{0, \infty}$. This is a peculiarity related to the fact that we are considering the rational case of the base $X = \mathbb{P}^{1}$ corresponding to the monopoles on $\mathbb{R}^2 \times S^1$ and $4d$ quiver gauge theories rather than the trigonometric or elliptic base $X$ corresponding to the monopoles on $\mathbb{R} \times S^1 \times S^1$ or $S^1 \times S^1 \times S^1$ that relate to~$5d$ or~$6d$ quiver gauge theories compactified on $S^1$ or $S^1 \times S^1$, and also that we take the gauge group to be of type ${\rm GL}_{r}$. In this situation, the moduli space of $U(r)$ monopoles on $\mathbb{R}^2 \times S^1$ with several singularities has alternative presentation (Nahm duality) as ${\rm GL}_n$ Hitchin moduli space on $\mathbb{C}^{\times}$ with $r$ singularities where $n$ depends on the number and type of the singularities of the multiplicative Higgs bundle on~$X$ \cite{Cherkis:2000cj, Cherkis:2001gm,Cherkis:2000ft,Nekrasov:2012xe}. \looseness=-1 Anyways, the fusion method of this paper allows us to analyze the multiplicative Higgs bundles in more general cases, which we leave for a future work, when Nahm duality of multiplicative Higgs bundle to a Hitchin system is not known. In particular, in the future one can study classification of symplectic leaves with matrix elements of higher degree in~$x$, one can analyze trigonometric case with the base curve $X$ is $\mathbb{C}^{\times} = \mathbb{P}^{1} \setminus \{0, \infty\}$ or elliptic case when the base curve~$X$ is a smooth elliptic curve like \cite{MR1974589} and consider arbitrary complex reductive Lie groups~$G$. The article is organised as follows. In Section~\ref{sec:main} we remind and set notations about Poisson Lie groups, Sklyanin brackets and Lax matrices. In Section~\ref{sec:lambda} we build the Lax matrices for arbitrary partitions $\underline{\lambda}$ and empty $\mu=\varnothing$ from certain elementary building blocks by fusion. Similarly, in Section~\ref{sec:mupart} we build Lax matrices for arbitrary partitions $\mu$ with $\underline{\lambda}=\varnothing$ again employing certain elementary solutions using a slightly modified fusion procedure. In Section~\ref{sec:lmpart} we combine the solutions of Sections~\ref{sec:lambda} and \ref{sec:mupart} to write down the Lax matrices for arbitrary $\underline{\lambda}$ and $\underline{\mu}$. In Section~\ref{sec:specdet} we study the spectral determinant of the derived Lax matrices and compare our results with~\cite{Nekrasov:2012xe}. In Section~\ref{sec:higherdegree} we say a few words on higher degree symplectic leaves. In Section~\ref{sec:quantum} we consider the quantization of the algebra of functions and the integrable system. \section{Rational Poisson--Lie group and Sklyanin brackets}\label{sec:main} Let $X =\mathbb{P}^1$ be the base curve equipped with the differential holomorphic volume form $dx$ that has a single quadratic pole at $x_{\infty} \in \mathbb{P}^{1}$. Fix a Killing form $\tr$ on $\mathfrak{g}$. Then the residue pairing \begin{gather*} \tr \oint_{x =0} f(x) g(x) {\rm d}x \end{gather*} induces the metric on $\mathfrak{g}_{D} = \mathfrak{g}((x))$ with respect to which $\mathfrak{g}[[x]]$ and $x^{-1} \mathfrak{g}\big[x^{-1}\big]$ are isotropic subspaces and we have $\mathfrak{g}_{D} = \mathfrak{g}_{+} \oplus \mathfrak{g}_{-}$. This splitting induces the structure of the Lie bi-algebra on $\mathfrak{g}_{+}$, which means that the space of functions on $\mathfrak{g}_{+}$ is equipped with the Poisson bracket (induced from the Lie bracket on $\mathfrak{g}_{-}$). The data $(\mathfrak{g}_{D}, \mathfrak{g}_{+}, \mathfrak{g}_{-})$ is called Manin triple. The Poisson bracket on the functions on $\mathfrak{g}_{+}$ can be extended to the Poisson bracket on the functions on the Lie group $G_{+}$ with the Lie algebra $\mathfrak{g}_{+}$, and the resulting bracket is called Sklyanin quadratic bracket with the rational $\mathfrak{r}$-matrix. The space of rational multiplicative Higgs fields on $X = \mathbb{P}^{1}$ with a fixed framing of the gauge bundle at $x_\infty$ forms a Poisson--Lie group~\cite{Elliott2018}. In the following we consider gauge group $G = {\rm GL}_{r}$ and for a Higgs field $g(x)$ we call $L(x)$ the representation of $g(x)$ by $r \times r$ matrix valued functions $L(x)$ called Lax matrices. The space of Lax matrices $L(x)$ carries the quadratic Poisson bracket of rational Sklyanin type \begin{gather}\label{eq:skl} \{L(x)\otimes I ,I \otimes L(y)\}=[L(x)\otimes L(y),\mathfrak{r}(x-y)] , \end{gather} the quantization of which gives quantum Yang--Baxter equation \cite{Sklyanin:1982tf}. Here the $I $ denotes the $r\times r$ identity matrix, and the classical rational $\mathfrak{r}$-matrix of $\mathfrak{gl}(r)$ is \begin{gather}\label{eq:perm} \mathfrak{r}(x)=x^{-1}\mathbb{P} ,\qquad\text{with}\quad \mathbb{P}=\sum_{a,b=1}^r e_{ab}\otimes e_{ba} . \end{gather} The bracket on the right-hand-side of \eqref{eq:skl} denotes the commutator $[X,Y]=XY-YX$. In a~system of Darboux coordinates $\big(\underline{p},\underline{q}\big)=\big(p_{I}, q^{I}\big)$, the Poisson bracket is \begin{gather*} \{X,Y\}=\sum_{I}\left(\frac{\partial X}{\partial p_{I}}\frac{\partial Y}{\partial q^{I}}-\frac{\partial X}{\partial q^{I}}\frac{\partial Y}{\partial p_{I}}\right), \end{gather*} where we sum over all conjugate variables $(\underline{p},\underline{q})$ in the Lax matrices $L$. In index notations, the Poisson bracket of matrix elements~(\ref{eq:perm}) reads as follows \begin{gather* \{ L_{ij}(x), L_{kl}(y) \} = - \frac{1} {x - y} (L_{kj}(x) L_{il}(y) - L_{kj}(y) L_{il}(x)) . \end{gather*} The solutions to the Sklyanin relation \eqref{eq:skl} that appear in this paper are labelled by two partitions \begin{gather*} \lambda=(\lambda_1,\lambda_2,\ldots,\lambda_r) ,\qquad \mu=(\mu_1,\mu_2,\ldots,\mu_r) , \end{gather*} with $\lambda_1\geq \lambda_2\geq \cdots\geq \lambda_r$ and $\mu_1\geq \mu_2\geq \cdots\geq \mu_r$ where $\lambda_i,\mu_i\in \mathbb{Z}_{\geq 0}$. The total number $|\lambda|$ of elements in the partition $\lambda$ combined with total number $|\mu|$ of elements in the partition $\mu$ is equal to~$r$. We study solutions $L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})$ whose matrix elements are no higher than of degree 1 in the spectral parameter $x$. We can assume that \begin{gather*} L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})=x\times \diag(\underbrace{0,\ldots,0}_{\mu_1^t},\underbrace{1,\ldots,1}_{r-\mu^t_1})+M_{\lambda,\underline{x},\mu}(\underline{p},\underline{q}) . \end{gather*} Here $\mu_1^t$ denotes the first column, i.e., the first element in the transposed partition $\mu^t=\big(\mu_1^t,\mu_2^t,\ldots,\mu_r^t\big)$ and $M_{\lambda,\underline{x},\mu}(\underline{p},\underline{q})$ denotes an $r\times r$ matrix which is independent of the spectral parameter $x$. In total the matrix $M_{\lambda,\underline{x},\mu}(\underline{p},\underline{q})$ contains \begin{gather}\label{eq:dimform} d_{\lambda,\mu}=\frac{1}{2}\left( r^2-\sum_{i=1}^{\lambda_1}\big(\lambda_i^t\big)^2-\sum_{i=1}^{\mu_1}\big(\mu_i^t\big)^2\right) , \end{gather} pairs of variables $\big(p_I,q^I\big)$, i.e., $I=1,2,\ldots, d_{\lambda,\mu}$. Again the transposed partition is denoted as $\lambda^t=\big(\lambda_1^t,\lambda_2^t,\ldots,\lambda_r^t\big)$, and $\lambda_{i}^{t}$ are called columns. The dimension of the corresponding symplectic leaf or the moduli space of multiplicative Higgs bundles will be given by \begin{gather*} \dim_{\mathbb{C}} \mathcal{M}_{\lambda,\underline{x},\mu} = 2 d_{\underline{\lambda}, \mu} . \end{gather*} We fix the singularity of the $L(x)$ at points $x \to x_i$ to be of the form $[g(x)] \sim ( x - x_{i})^{\check \omega_{\lambda_{i}^{t}}}$, up to a~regular factor, where $\check \omega_{k}$ is the $k$-th fundamental co-weight associated in Young notations to a~column of height $k$, and at infinity $x \to x_\infty$ we take the singularity to be $[g(x)] \sim (x^{-1})^{\sum_i \check \omega_{\mu^{t}_i}- \check \omega_{r}} $. Here $\check \omega_{r}$ is a co-weight associated to the column of height $r$ and denoting the diagonal homomorphism ${\rm GL}_1 \to T_{{\rm GL}_{r}}$ where $T$ stands for the maximal torus, that is a co-weight dual to the weight of the determinant line representation. The determinant of $L(x)$ determined by the partition $\lambda$ is a polynomial of degree $|\lambda|$ with roots $x_i$ of degeneracy $\lambda_i^t$: \begin{gather}\label{eq:detlax} \det L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})=\prod_{i=1}^{\lambda_1}(x-x_i)^{\lambda_i^t} . \end{gather} The explicit form of the matrices $L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})$ is given in Section~\ref{sec:lambda} for $\mu=\varnothing$, in Section~\ref{sec:mupart} for $\lambda=\varnothing$ and for arbitrary $\lambda$ and $\mu$ with $|\lambda| + |\mu| = r$ in Section~\ref{sec:lmpart}. As explained in the introduction, we can allow parameters $x_i$ to collide in which case the dominant co-weight $\check \omega_{*}$ of the singularity at the collision point $x_{*}$ is represented by a partition composed of several columns from $\lambda$, and is equal to the sum of the fundamental co-weights associated to each individual column in $\lambda$. In this way we get a symplectic leaf $\mathcal{M}_{\underline{\lambda}, \underline{x}, \mu}$ whose singularity type at $x_{*} \in \underline{x}$ is described by a partition $\lambda_{*} \in \underline{\lambda}$. Bearing this in mind, in the following we assume that parameters $x_i$ are assigned to individual columns $\lambda_i^{t}$ of a single partition~$\lambda$. \section[Degree 1 symplectic leaves regular with fundamental singularity at infinity]{Degree 1 symplectic leaves regular\\ with fundamental singularity at infinity}\label{sec:lambda} In this section we focus on the ${\rm GL}_r$ Lax matrices that correspond to arbitrary partitions $\lambda$ of size $|\lambda|$ and a single column $\mu$-partition, $\mu=1^{[r-|\lambda|]}$. In particular if $|\lambda| = r$ then $\mu$ is empty. Since $\mu$ is a single column, the singularity at infinity is specified by a fundamental co-weight. The associated $A_{r-1}$ quiver gauge theory with fundamental hypermultiplets~\cite{Nekrasov:2012xe} differs from the conformal class by absence of a~single fundamental multiplet in the node~$\mu_1^{t}$. We will assume that each element of the transposed partition $\lambda^t$, i.e., each column $\lambda^{t}_i$ of the partition $\lambda$ specifies a singularity of the Lax matrix~$L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})$ at point $ x = x_i$ of the type~$\check{\omega}_{\lambda_i^{t}}$. Here $\check{\omega}_{k}$ denotes a fundamental co-weight of ${\rm GL}_r$ of the form $( \underbrace{1,\ldots,1}_{k},\underbrace{0, \ldots, 0}_{r-k})$ in the basis dual to the standard basis of weights of the defining representation. This type of ${\rm GL}_r$ Lax matrices can be obtained by fusion of the fundamental solutions associated to a single column $\lambda = 1^{[|\lambda|]}$ and a single column $\mu = 1^{[r-|\lambda|]}$. The fundamental solutions are given in Section~\ref{sec:eleml}, and the fusion is described in Section~\ref{sec:fusel2}. The Lax matrices for arbitrary partitions $\lambda$ are given in Section~\ref{sec:fusel}. We closely follow~\cite{Bazhanov:2010jq} where the elementary building blocks were derived, the factorisation was discussed on the quantum level and a closed formula for the Lax matrices was obtained for the case $\lambda=(r)$, see also~\cite{Derkachov:2006fw}. \subsection[Fundamental $(\lambda,\mu)$ orbits]{Fundamental $\boldsymbol{(\lambda,\mu)}$ orbits}\label{sec:eleml} \begin{figure}[t] \centering \begin{tikzpicture} \foreach \a in {1} { \begin{scope}[shift={(0.5*\a-0.1,1)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {1} { \begin{scope}[shift={(0.5*\a-0.1,0.5)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {1} { \begin{scope}[shift={(0.5*\a-0.1,0)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {6} { \begin{scope}[shift={(0.5*\a-0.1,0.5)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {6} { \begin{scope}[shift={(0.5*\a-0.1,0)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \node [below ] at (0.65,0) {$x_1$}; \node [below ] at (3.15,0) {$\infty$}; \draw [decorate,decoration={brace,amplitude=5pt},xshift=80pt,yshift=-14.5pt] (0,0.5) -- (0,1.5)node [black,midway,xshift=-10pt] {\footnotesize $\mu$}; \draw [decorate,decoration={brace,amplitude=5pt},xshift=7pt,yshift=-14.5pt] (0,0.5) -- (0,2)node [black,midway,xshift=-10pt] {\footnotesize $\lambda$}; \end{tikzpicture} \caption{Single column partition for $r=5$ with $\lambda=1^{[3]}$ and $\mu=1^{[2]}$.}\label{fig:singcol} \end{figure} The fundamental building blocks are $r\times r$ matrices that correspond to the partition \begin{gather*} \mu=(\underbrace{1,\ldots,1}_{|\mu|}) ,\qquad \lambda=(\underbrace{1,\ldots,1}_{|\lambda|}) , \end{gather*} with $r=|\lambda|+|\mu|$, see Fig.~\ref{fig:singcol}. They contain $|\lambda|\cdot|\mu|$ pairs of conjugate variables $(p_{ij},q_{ji})$ where $1\leq i\leq |\mu|$ and $|\mu|< j \leq r$ and can be written as \begin{gather}\label{eq:elax} L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})= \left( \begin{BMAT}[5pt]{c:c}{c:c}I&-P_{\mu,\lambda}\\ Q_{\lambda,\mu}& (x-x_1)I-Q_{\lambda,\mu}P_{\mu,\lambda} \end{BMAT}\right) . \end{gather} Here the upper diagonal block is of the size $|\mu|\times |\mu|$ and the lower one of size $|\lambda|\times |\lambda|$. The block matrices on the off-diagonal are parametrized as follows \begin{gather*} (P_{\mu,\lambda})_{i,j} = p_{i, |\mu|+j}, \qquad (Q_{\lambda,\mu})_{i,j} = q_{|\mu|+i, j}. \end{gather*} The letter $I$ denotes the identity matrix of appropriate size. In particular we have $L_{1^{[r]},\underline{x},\varnothing}(x)=(x-x_1)I$ and $L_{\varnothing,\varnothing,1^{[r]}}=I$. The matrices $L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})$ satisfy the Sklyanin relation \eqref{eq:skl} as can be verified by a direct computation using \begin{gather*} \{(P_{\mu,\lambda})_{i,j},(Q_{\lambda,\mu})_{k,l}\}=\delta_{i,l}\delta_{k,j} . \end{gather*} Consequently one finds \begin{gather*} \{(Q_{\lambda,\mu}P_{\mu,\lambda})_{i,j},(Q_{\lambda,\mu})_{k,l}\}=+(Q_{\lambda,\mu})_{i,l}\delta_{k,j} ,\qquad \{(Q_{\lambda,\mu}P_{\mu,\lambda})_{i,j},(P_{\mu,\lambda})_{k,l}\}=-(P_{\mu,\lambda})_{k,j}\delta_{i,l} , \end{gather*} and \begin{gather*} \{(Q_{\lambda,\mu}P_{\mu,\lambda})_{i,j},(Q_{\lambda,\mu}P_{\mu,\lambda})_{k,l}\}=\delta_{k,j}(Q_{\lambda,\mu}P_{\mu,\lambda})_{i,l}-\delta_{i,l}(Q_{\lambda,\mu}P_{\mu,\lambda})_{k,j} , \end{gather*} which is sufficient in order to check the Sklyanin Poisson bracket. It is instructive to see that $L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})$ is factorized into a product of upper diagonal, diagonal and lower diagonal matrices: \begin{gather*} L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})= \left( \begin{BMAT}[5pt]{c:c}{c:c} I& 0 \\ Q_{\lambda,\mu} & I \end{BMAT}\right) \left( \begin{BMAT}[5pt]{c:c}{c:c} I& 0 \\ 0 & (x-x_1)I \end{BMAT}\right) \left( \begin{BMAT}[5pt]{c:c}{c:c} I& -P_{\mu,\lambda} \\ 0 & I \end{BMAT} \right) . \end{gather*} The determinant is \begin{gather*} \det L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})=(x-x_1)^{|\lambda|} . \end{gather*} \subsection{Canonical coordinates on regular orbits}\label{sec:fusel} In this section we will construct solutions $L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})$ for arbitrary partitions with $\lambda$ composed of columns $\lambda_i^{t}$ and a single column partition $\mu^t = \big(\mu_1^{t}\big)$. The columns $\lambda_i^{t}$ are associated to fundamental singularities at $x = x_i$ of type $\lambda_i^{t}$, which means that the singularity of $L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})$ is in the conjugacy class of \begin{gather*} \operatorname{diag}(\underbrace{ (x - x_i), \dots, (x-x_i)}_{\lambda_i^{t}}, 1, \dots, 1), \qquad i=1,\ldots,\lambda_1, \end{gather*} i.e., distinct $\lambda_i^{t}$ eigenvalues of $L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})$ are vanishing linearly at $x = x_i$. The column $\mu_1^{t}$ describes a fundamental singularity at $x = \infty$ which means that the singularity of $L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})$ at $ x \to \infty$ is in conjugacy class of \begin{gather*} x \operatorname{diag}\big(\underbrace{ x^{-1}, \dots, x^{-1}}_{\mu_1^{t}}, 1, \dots, 1\big) . \end{gather*} \begin{figure}\centering \begin{tikzpicture} \foreach \a in {1,2,3,4} { \begin{scope}[shift={(0.5*\a,0)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {1,2,3} { \begin{scope}[shift={(0.5*\a,0.5)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {1} { \begin{scope}[shift={(0.5*\a,1)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \node [below ] at (0.75,0) {$x_1$}; \node [below ] at (1.25,0) {$x_2$}; \node [below ] at (1.75,0) {$x_3$}; \node [below ] at (2.25,0) {$x_4$}; \foreach \a in {0,1} { \begin{scope}[shift={(4,0.5*\a)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \node [below] at (4.25,0) {$\infty$}; \end{tikzpicture} \caption{Regular partition with $\lambda=(4,3,1)$, $\mu=(1,1)$ and $r=10$.} \end{figure} We will prove recursively that regular Lax matrices $L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})$ can be parametrized as a~block matrix \begin{gather}\label{eq:laxregular} L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q}) = \left(\begin{BMAT}[5pt]{c:c}{c:c} I& - P_{\mu,\lambda} \\ Q_{\lambda, \mu} &xI- J_{\lambda, \lambda} - Q_{\lambda, \mu} P_{\mu, \lambda} \end{BMAT}\right) , \end{gather} where the upper-left block is of size $\mu_1^{t} \times \mu_1^{t}$ and bottom-right block is of size $|\lambda| \times |\lambda|$. The matrix elements of block $P_{\mu, \lambda}$ and block $Q_{\lambda, \mu}$ are canonically conjugated variables with \begin{gather*} \{ (P_{\mu,\lambda})_{ij}, (Q_{\lambda, \mu})_{kl} \} = \delta_{il} \delta_{jk} \end{gather*} and the matrix elements of $J_{\lambda,\lambda}$ satisfy the algebra of $\lambda \times \lambda$-matrices with respect to the Poisson brackets{\samepage \begin{gather}\label{eq:gln} \{J_{ij},J_{kl}\}=\delta_{il} J_{kj}-\delta_{kj} J_{il} , \end{gather} while Poisson commuting with matrix elements of $P_{\mu, \lambda}$ and $Q_{\lambda, \mu}$.} The matrix elements of the $|\lambda| \times |\lambda|$ matrix $J_{\lambda,\lambda}$ have an explicit parametrization in terms of the canonically conjugated coordinates as follows \begin{gather}\label{eq:Jmatrix} J_{\lambda,\lambda} = Q_{\lambda, \lambda} ( X_{\lambda} + [P_{\lambda, \lambda} Q_{\lambda, \lambda}]_{+})Q_{\lambda, \lambda}^{-1} , \end{gather} cf.~\cite{babich2016birational, ShapiroThesis} and Appendix~\ref{sec:twisted-flag}. Here $X_\lambda$ denotes the diagonal matrix \begin{gather}\label{eq:Xlambda} X_{\lambda}=\diag (\underbrace{x_1,\ldots,x_1}_{\lambda_1^t},\underbrace{x_2,\ldots,x_2}_{\lambda_2^t},\ldots,\underbrace{x_{\lambda_1},\ldots,x_{\lambda_1}}_{\lambda_{\lambda_1}^t}) . \end{gather} The corresponding blocks on the diagonal are of the size $\lambda^t_1,\ldots, \lambda_{\lambda_1}^t$. The matrix $[P_{\lambda, \lambda} Q_{\lambda, \lambda}]_+$ is strictly upper block triangular and reads \begin{gather* [P_{\lambda, \lambda} Q_{\lambda, \lambda}]_+=\left( \begin{BMAT}[5pt]{c:c:c:c:c}{c:c:c:c:c} 0& \hat{P}_{1,2}& \hat{P}_{1,3}&\cdots& \hat{P}_{1,\lambda_1}\\ 0&0& \hat{P}_{2,3}&\cdots& \hat{P}_{2,\lambda_1}\\ 0&0&0&\ddots&\vdots\\ 0&0&0&0&\hat{P}_{\lambda_1-1,\lambda_1}\\ 0&0&0&0&0\\ \end{BMAT} \right) . \end{gather*} Here the matrices $\hat{P}_{ij}$ are of the size $\lambda_{i}^t\times \lambda_{j}^t$ and explicitly given by \begin{gather}\label{eq:pt} \hat{P}_{ij}=(P_{\lambda, \lambda})_{ij}+\sum_{k=j+1}^{\lambda_1}(P_{\lambda, \lambda})_{ik}(Q_{\lambda, \lambda})_{kj} . \end{gather} The matrix $Q_{\lambda, \lambda}$ is lower triangular and only depends on the variables $\underline{q}$ while $P_{\lambda,\lambda}$ is upper triangular and only depends on the variables $\underline{p}$. They read \begin{gather} P_{\lambda,\lambda}=\left( \begin{BMAT}[5pt]{c:c:c:c:c}{c:c:c:c:c} 0&P_{1,2}&P_{1,3}&\cdots&P_{1,\lambda_1}\\ 0&0&P_{2,3}&\cdots&P_{2,\lambda_1}\\ 0&0&0&\ddots&\vdots\\ 0&0&0&\ddots&P_{\lambda_1-1,\lambda_1}\\ 0&0&0&0&0\\ \end{BMAT} \right) ,\nonumber\\ Q_{\lambda,\lambda}=\left( \begin{BMAT}[5pt]{c:c:c:c:c}{c:c:c:c:c} I &0&0&0&0\\ Q_{2,1}&I &0&0&0\\ Q_{3,1}&Q_{3,2}&I &0&0\\ \vdots&\vdots&\ddots&\ddots&0\\ Q_{\lambda_1,1}&Q_{\lambda_1,2}&\cdots&Q_{\lambda_1,\lambda_1-1}&I \\ \end{BMAT}, \right)\label{eq:Umatrix} \end{gather} where $Q_{ij}$ and $P_{ij}$ denote $\lambda_i^t\times\lambda_j^t$ block matrices explicitly given by \begin{alignat*}{3} & (Q_{ij})_{kl} = q_{\ell(i)+k,\ell(j)+l}, \qquad && k \in [1,\lambda_i^{t}], \quad l \in [1, \lambda_j^{t}], & \\ & (P_{ij})_{kl}=p_{\ell(i) + k , \ell(j) + l}, \qquad && k \in [1,\lambda_i^{t}], \quad l \in [1, \lambda_j^{t}] .& \end{alignat*} Here we defined $\ell(i)=|\mu|+\sum\limits_{k=1}^{i-1}\lambda_k^t$. The realization (\ref{eq:Jmatrix}) of the $\mathfrak{gl}(|\lambda|)$ algebra, also known as free field representation, can be constructed as algebra of twisted differential operators on the flag variety $G/P_{\lambda, +}$. Here $G = {\rm GL}(|\lambda|)$ and $P_{\lambda, +}$ denotes a parabolic subgroup of ${\rm GL}(|\lambda|)$ whose Levi is $\prod_{i} {\rm GL}(\lambda_i^{t})$. The big cell of the flag variety $G/P_{\lambda,+}$ is identified with the $\lambda^{t}$-blocks unipotent subgroup $N_{\lambda, -}$ whose elements are represented by matrices $Q_{\lambda, \lambda}$ as in~(\ref{eq:Umatrix}). In the classical limit twisted differential operators in $J_{\lambda, \lambda}$ form a co-adjoint orbit $O_{X_{\lambda}}$ in the dual Lie algebra $\mathfrak{g}^{*}$ for $\mathfrak{g} = \mathfrak{gl}(|\lambda|)$ of the semi-simple element $X_{\lambda}$ (\ref{eq:Xlambda}). See details in Appendix~\ref{sec:twisted-flag}. The number of pairs of conjugate variables in the Lax matrix \eqref{eq:laxregular} agrees with~\eqref{eq:dimform}. There are $\mu_1^t\times |\lambda|$ pairs in $P_{\mu,\lambda}$, $Q_{\lambda,\mu}$ and $\sum\limits_{i<j}\lambda_i^t\lambda_j^t$ in~$J_{\lambda,\lambda}$. Further we verify that the determinant of~\eqref{eq:laxregular} agrees with~\eqref{eq:detlax}. \subsection{Regular orbits from fusion of fundamental orbits} \label{sec:fusel2} \newcommand{{\tilde\mu}}{{\tilde\mu}} \newcommand{\lambda'}{\lambda'} \newcommand{\lambda}{\lambda} \newcommand{{|\tilde\mu|}}{{|\tilde\mu|}} \newcommand{|\lambda'|}{|\lambda'|} \newcommand{{|\lambda|}}{{|\lambda|}} We will construct the solution in the form of (\ref{eq:laxregular}) associated to regular $(\tilde \lambda, \tilde \mu)$ by fusion of two solutions associated to $(\lambda, \mu)$ and $(\lambda', \mu')$. Here $(\tilde \lambda, \tilde \mu)$ is defined such that \begin{gather*} \tilde \lambda^t = \big({\lambda'}^t, {\lambda^{t}}\big), \qquad |\tilde \lambda| = |\lambda| + |\lambda'| , \end{gather*} where $({\lambda'}^t, {\lambda^{t}})$ denotes the partition given by the union of ${\lambda'}^t$ and ${\lambda^{t}}$. The partitions~$\mu$,~$\mu'$ and~$\tilde \mu$ are single columns \begin{gather*} {\mu}^{t} = ( r - |\lambda|),\qquad {\mu'}^{t} = ( r - |\lambda'|),\qquad {\tilde \mu}^{t} = \big( r - |\tilde \lambda|\big) . \end{gather*} Then, by assumption of the recursion we represent $L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})$ in the form \begin{gather}\label{eq:lax1} L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})=\left(\begin{BMAT}[5pt]{c:c:c}{c:c:c} I&0&-P_{{\tilde\mu},{\lambda}}\\ 0&I&-P_{{\lambda'},{\lambda}}\\ Q_{{\lambda},{\tilde\mu}}&Q_{{\lambda},{\lambda'}}&xI-J_{{\lambda},{\lambda}}-Q_{{\lambda},{\tilde\mu}}P_{{\tilde\mu},{\lambda}}-Q_{{\lambda},{\lambda'}}P_{{\lambda'},{\lambda}} \end{BMAT} \right) . \end{gather} The blocks on the diagonal are of the size ${|\tilde\mu|}$, $|\lambda'|$ and ${|\lambda|}$ respectively, with ${|\tilde\mu|}+|\lambda'|+{|\lambda|}=r$. The matrix $L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})$ explicitly depends on ${|\lambda|}(r-{|\lambda|})$ pairs of conjugate variables arranged in the matrices $P_{{\tilde\mu},{\lambda}}$, $P_{{\lambda'},{\lambda}}$ and $Q_{{\lambda},{\tilde\mu}}$, $Q_{{\lambda},{\lambda'}}$ defined as \begin{alignat*}{3 & (P_{{\tilde\mu},{\lambda}})_{ij} = p_{i,{|\tilde\mu|} +|\lambda'|+ j} ,\qquad &&(P_{{\lambda'},{\lambda}})_{ij}= p_{{|\tilde\mu|}+i, {|\tilde\mu|} + |\lambda'| + j} ,&\\ & (Q_{{\lambda},{\tilde\mu}})_{ij} = q_{{|\tilde\mu|}+|\lambda'|+i, j} ,\qquad&& (Q_{{\lambda},{\lambda'}})_{ij}= q_{{|\tilde\mu|} + |\lambda'| + i,{|\tilde\mu|}+ j} ,& \end{alignat*} and the matrix $J_{{\lambda},{\lambda}}$ of the size ${|\lambda|} \times {|\lambda|}$ defined in~(\ref{eq:Jmatrix}). Similarly, we consider another Lax matrix \begin{gather}\label{eq:lax2} L'_{\lambda',\underline{x}',\mu'}(x;\underline{p}',\underline{q}')=\left(\begin{BMAT}[5pt]{c:c:c}{c:c:c} I& -P_{{\tilde\mu},{\lambda'}}'&0\\ Q_{{\lambda'},{\tilde\mu}}' &xI- J_{{\lambda'},{\lambda'}}'- Q_{{\lambda'},{\tilde\mu}}' P_{{\tilde\mu},{\lambda'}}'+ P_{{\lambda'},{\lambda}}' Q_{{\lambda},{\lambda'}}'&- P_{{\lambda'},{\lambda}}'\\ 0&-Q_{{\lambda},{\lambda'}}'&I \end{BMAT} \right) , \end{gather} with the same block structure as in \eqref{eq:lax1}. This matrix $ L'_{\lambda',\underline{x}',\mu'}(x;\underline{p}',\underline{q}')$ explicitly depends on $|\lambda'|(r-|\lambda'|)$ pairs of conjugate variables \begin{alignat*}{3 & (Q'_{{\lambda'},{\tilde\mu}})_{ij}= q'_{{|\tilde\mu|} + i, j}, \qquad&& (Q_{{\lambda},{\lambda'}}')_{ij} = q_{{|\tilde\mu|} + |\lambda'| + i, {|\tilde\mu|}+j}',& \\ & (P'_{{\tilde\mu},{\lambda'}})_{ij} = p_{i, {|\tilde\mu|} + j}', \qquad &&(P_{{\lambda'},{\lambda}}')_{ij} = p_{{|\tilde\mu|} + i, {|\tilde\mu|} + |\lambda'| + j}',& \end{alignat*} and another set of variables appearing in the expression for $J'_{\lambda',\lambda'}$ like in~(\ref{eq:Jmatrix}). The matrix $ L'_{\lambda',\underline{x}',\mu'}(x;\underline{p}',\underline{q}')$ that appears in~(\ref{eq:lax2}) is obtained from the canonical form~(\ref{eq:laxregular}) by permutation, that is a conjugation by an element of the Weyl group of ${\rm GL}_r$, and a canonical transformation in the variables~$\underline{p}$ and~$\underline{q}$. \looseness=-1 In the next step we multiply the matrices \eqref{eq:lax1} and \eqref{eq:lax2}. It was pointed out in \cite{Bazhanov:2010jq} for the corresponding solutions of the quantum Yang--Baxter equation that the product can be written as \begin{gather}\label{eq:prodll} L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q}) L'_{\lambda',\underline{x}',\mu'}(x;\underline{p}',\underline{q}')=\tilde Q' \tilde L_{\tilde \lambda,\tilde{\underline{x}},\tilde\mu}(x;\underline{\tilde p},\underline{\tilde q}) . \end{gather} Here $\tilde L_{\tilde \lambda,\tilde{\underline{x}},\tilde\mu}(x;\underline{\tilde p},\underline{\tilde q})$ denotes a~spectral parameter dependent Lax matrix and Casimir $\tilde Q'$ is a~lower triangular matrix. They are of the form \begin{gather*} \tilde L_{\tilde \lambda,\tilde{\underline{x}},\tilde\mu}(x;\underline{\tilde p},\underline{\tilde q})=WU\!\left(\!\begin{BMAT}[5pt]{c:c:c}{c:c:c} I&0 &0\\ 0&xI-J'_{\lambda',\lambda'}&-\tilde P_{{\lambda'},{\lambda}}\\ 0&0&xI-J_{\lambda,\lambda}\\ \end{BMAT}\! \right)\!U^{-1}V^{-1} ,\qquad \tilde Q'=\left(\!\begin{BMAT}[5pt]{c:c:c}{c:c:c} I&0&0\\ 0&I&0\\ 0&\tilde Q_{{\lambda},{\lambda'}}'&I \end{BMAT}\! \right) , \end{gather*} where \begin{gather*} W=\left(\begin{BMAT}[5pt]{c:c:c}{c:c:c} I&0&0\\ \tilde Q_{{\lambda'},{\tilde\mu}}&I&0\\ \tilde Q_{{\lambda},{\tilde\mu}}&0&I \end{BMAT} \right) ,\qquad U=\left(\begin{BMAT}[5pt]{c:c:c}{c:c:c} I&0&0\\ 0&I&0\\ 0& \tilde Q_{{\lambda},{\lambda'}}&I \end{BMAT} \right) ,\qquad V=\left(\begin{BMAT}[5pt]{c:c:c}{c:c:c} I&\tilde P_{{\tilde\mu},{\lambda'}}&\tilde P_{{\tilde\mu},{\lambda}}\\ 0&I&0\\ 0&0&I \end{BMAT} \right) \end{gather*} expressed in terms of the new variables \begin{alignat}{3} & \tilde P_{{\lambda'}{\lambda}}=P_{{\lambda'}{\lambda}}'+P_{{\lambda'}{\lambda}}-Q'_{{\lambda'}{\tilde\mu}}P_{{\tilde\mu}{\lambda}}, \qquad && \tilde Q_{{\lambda}{\lambda'}}=Q_{{\lambda}{\lambda'}}',& \nonumber\\ &\tilde P_{{\tilde\mu}{\lambda'}}=P_{{\tilde\mu}{\lambda'}}'-P_{{\tilde\mu}{\lambda}}Q'_{{\lambda}{\lambda'}}, \qquad && \tilde Q_{{\lambda'}{\tilde\mu}}=Q_{{\lambda'}{\tilde\mu}}', &\nonumber\\ &\tilde P_{{\tilde\mu}{\lambda}}=P_{{\tilde\mu}{\lambda}}, \qquad &&\tilde Q_{{\lambda}{\tilde\mu}}=Q_{{\lambda}{\tilde\mu}}+Q'_{{\lambda}{\lambda'}}Q'_{{\lambda'}{\tilde\mu}},& \nonumber\\ &\tilde P'_{{\lambda'}{\lambda}}= P_{{\lambda'}{\lambda}}, \qquad && \tilde Q'_{{\lambda}{\lambda'}}=Q_{{\lambda}{\lambda'}}-Q'_{{\lambda}{\lambda'}} .&\label{eq:trans1} \end{alignat} The polynomial change of variables (\ref{eq:trans1}) is a symplectomorphism (i.e., canonical transformation) as we can directly verify. Indeed, computing the differentials we find \begin{gather*} {\rm d} \tilde P_{{\lambda'}{\lambda}} \wedge {\rm d} \tilde Q_{{\lambda}{\lambda'}} = ({\rm d} P_{{\lambda'}{\lambda}} ' + {\rm d}P_{{\lambda'}{\lambda}} - {\rm d}Q_{{\lambda'}{\tilde\mu}}' P_{{\tilde\mu}{\lambda}} - Q_{{\lambda'}{\tilde\mu}}' {\rm d} P_{{\tilde\mu}{\lambda}}) \wedge {\rm d} Q_{{\lambda}{\lambda'}}', \\ {\rm d} \tilde P_{{\tilde\mu}{\lambda'}} \wedge {\rm d} \tilde Q_{{\lambda'}{\tilde\mu}} = ({\rm d} P_{{\tilde\mu}{\lambda'}}' - {\rm d}P_{{\tilde\mu}{\lambda}} Q_{{\lambda}{\lambda'}}' - P_{{\tilde\mu}{\lambda}} {\rm d} Q_{{\lambda}{\lambda'}}') \wedge {\rm d} Q_{{\lambda'}{\tilde\mu}}', \\ {\rm d} \tilde P_{{\tilde\mu}{\lambda}} \wedge {\rm d} \tilde Q_{{\lambda}{\tilde\mu}} = {\rm d} P_{{\tilde\mu}{\lambda}} \wedge ( {\rm d}Q_{{\lambda}{\tilde\mu}} + {\rm d}Q_{{\lambda}{\lambda'}}' Q_{{\lambda'}{\tilde\mu}}' + Q_{{\lambda}{\lambda'}}' {\rm d} Q_{{\lambda'}{\tilde\mu}}')\\ {\rm d} \tilde P_{{\lambda'}{\lambda}}' \wedge {\rm d} \tilde Q_{{\lambda}{\lambda'}}' = {\rm d}P_{{\lambda'}{\lambda}} \wedge ( {\rm d}Q_{{\lambda}{\lambda'}} - {\rm d}Q_{{\lambda}{\lambda'}}'), \end{gather*} and hence, after cancellations, we find that the canonical symplectic form is invariant \begin{gather*} \sum_{I \in \{{\tilde\mu}{\lambda'}, {\tilde\mu}{\lambda}, {\lambda'}{\lambda} \} } {\rm d} \tilde P_{I} \wedge {\rm d} \tilde Q_{I^t} + \sum_{I \in \{\lambda' \lambda \} } {\rm d}\tilde P_{I}' \wedge {\rm d} \tilde Q_{i}' \\ \qquad {} = \sum_{I \in \{{\tilde\mu}{\lambda}, {\lambda'}{\lambda}\}} {\rm d} P_{I} \wedge {\rm d} Q_{I^t} + \sum_{I \in \{{\tilde\mu}{\lambda'}, {\lambda'}{\lambda}\}} {\rm d} P'_{I} \wedge {\rm d}Q'_{I^{t}} . \end{gather*} In analogy to the Yang--Baxter equation, the product of two solutions to the Sklyanin relation~\eqref{eq:skl} with different sets of conjugate variables $(\underline{p},\underline{q})$ is again a solution to the Sklyanin relation~\eqref{eq:skl}. Therefore the matrix in \eqref{eq:prodll} satisfies the Sklyanin bracket when taking the Poisson bracket with respect to the variables $(\underline{\tilde p},\underline{\tilde q})$ which denote the elements of the matrices defined in~\eqref{eq:trans1}. Finally, we note that the result is independent of $\tilde P_{{\lambda'},{\lambda}}'$ which allows us to strip off the matrix~$\tilde Q'$ from~\eqref{eq:prodll}. Thus we conclude that \begin{gather* \tilde L_{\tilde \lambda,\tilde{\underline{x}},\tilde\mu}(x;\underline{\tilde p},\underline{\tilde q})= \left(\begin{BMAT}[5pt]{c:c}{c:c} I&-\tilde P_{{\tilde\mu},\tilde\lambda}\\ \tilde Q_{\tilde\lambda,{\tilde\mu}} &xI-\tilde J_{\tilde \lambda, \tilde\lambda} - \tilde Q_{\tilde\lambda,{\tilde\mu}}\tilde P_{{\tilde\mu},\tilde\lambda} \end{BMAT} \right) , \end{gather*} with \begin{gather* \big(\tilde P_{{\tilde\mu},\tilde\lambda}\big)_{ij} = \tilde p_{i, {|\tilde\mu|} +j}, \qquad \big( \tilde Q_{\tilde\lambda,{\tilde\mu}} \big)_{ij} = \tilde q _{{|\tilde\mu|} + i, j} \end{gather*} is a solution of the Sklyanin relation. Here the generators $\tilde J_{\tilde \lambda, \tilde \lambda}$ of the $\mathfrak{gl}(|\lambda'|+{|\lambda|})$ subalgebra are realised as \begin{gather* \tilde J_{\tilde \lambda ,\tilde \lambda}=\left(\begin{BMAT}[5pt]{c:c}{c:c} I&0\\ \tilde Q_{\lambda,\lambda'}&I \end{BMAT} \right)\cdot\left(\begin{BMAT}[5pt]{c:c}{c:c} J_{\lambda', \lambda'}'& \tilde P_{\lambda',\lambda}\\ 0&J_{\lambda, \lambda} \end{BMAT} \right)\cdot\left(\begin{BMAT}[5pt]{c:c}{c:c} I&0\\ -\tilde Q_{\lambda,\lambda'}&I \end{BMAT} \right) , \end{gather*} where \begin{gather* \big(\tilde P_{{\lambda'},{\lambda}}\big)_{ij} = \tilde p_{{|\tilde\mu|}+i, {|\tilde\mu|}+|\lambda'| + j}, \qquad \big(\tilde Q_{{\lambda},{\lambda'}}\big)_{ij}= \tilde q _{{|\tilde\mu|}+|\lambda'| + i,{|\tilde\mu|}+ j} . \end{gather*} Let us remark that here we have chosen a certain order of fusion, but depending on the order we would get different parametrization related by a polynomial choice of variables, see, e.g., Appendix~\ref{sec:clusterstructures}. It would be interesting to explore the resulting cluster structure in more details. \subsubsection{Linear fusion} Now to demonstrate the particular parametrization~(\ref{eq:Jmatrix}) for $\tilde J_{\tilde \lambda, \tilde \lambda}$ it is sufficient to assume that~$\lambda'$ is a single column partition ${\lambda'}^{t} = ({\lambda'}^{t}_1)$ while $\lambda$ is an arbitrary collection of columns. In this case \begin{gather*} J'_{\lambda', \lambda'} = X_{\lambda'} ,\qquad J_{\lambda, \lambda} = Q_{\lambda, \lambda} (X_{\lambda} + [P_{\lambda, \lambda} Q_{\lambda, \lambda}]_{+}) Q_{\lambda, \lambda}^{-1} . \end{gather*} Then we find that $\tilde J_{\tilde \lambda, \tilde \lambda}$ can be again represented in the form \begin{gather} \tilde J_{\tilde \lambda \tilde \lambda}=\left(\begin{BMAT}[5pt]{c:c}{c:c} I&0\\ \tilde Q_{\lambda\lambda'}&I \end{BMAT} \right)\left(\begin{BMAT}[5pt]{c:c}{c:c} I&0\\ 0 & Q_{\lambda\lambda} \end{BMAT} \right) \left(\begin{BMAT}[5pt]{c:c}{c:c} X_{\lambda'}& \tilde P_{\lambda'\lambda} Q_{\lambda, \lambda}\\ 0& X_{\lambda} + [P_{\lambda, \lambda} Q_{\lambda, \lambda}]_{+} \end{BMAT} \right)\nonumber\\ \hphantom{\tilde J_{\tilde \lambda \tilde \lambda}=}{}\times \left(\begin{BMAT}[5pt]{c:c}{c:c} I&0\\ 0 & Q_{\lambda, \lambda} \end{BMAT} \right)^{-1} \left(\begin{BMAT}[5pt]{c:c}{c:c} I&0\\ \tilde Q_{\lambda\lambda'}&I \end{BMAT} \right)^{-1}\label{eq:Jfactor} \end{gather} or \begin{gather*} \tilde J_{\tilde\lambda, \tilde\lambda} = Q_{\tilde\lambda, \tilde\lambda} (X_{\tilde\lambda} + [P_{\tilde\lambda, \tilde\lambda} Q_{\tilde\lambda, \tilde\lambda}]_{+}) Q_{\tilde\lambda, \tilde\lambda}^{-1} \end{gather*} with \begin{gather*} Q_{\tilde \lambda, \tilde \lambda} = \left(\begin{BMAT}[5pt]{c:c}{c:c} I&0\\ \tilde Q_{\lambda\lambda'}& Q_{\lambda, \lambda} \end{BMAT} \right), \qquad P_{\tilde \lambda, \tilde \lambda} = \left(\begin{BMAT}[5pt]{c:c}{c:c} 0 & \tilde P_{\lambda' \lambda} \\ 0 & P_{\lambda, \lambda} \end{BMAT} \right) . \end{gather*} As a consequence it follows that the Lax matrices \eqref{eq:laxregular} satisfy the Sklyanin relation \eqref{eq:skl}. \section{Degree 1 symplectic leaves singular only at infinity}\label{sec:mupart} \newcommand{\alpha}{\alpha} \newcommand{\beta}{\beta} \newcommand{\gamma}{\gamma} In the following section we focus on the Lax matrices that correspond to $\lambda=\varnothing$ and arbitrary partition~$\mu$. Similar to the case labelled by pure $\lambda$ partitions in Section~\ref{sec:lambda} the present case can be obtained from fusion of the basic building blocks. These basic building blocks are generalisations of the well-known Lax matrix of the Toda chain~\cite{FaddeevBook} corresponding to the partition $\mu=(2)$. They are introduced in Sections~\ref{sec:elmu} and~\ref{sec:elmuu}. The Lax matrices for arbitrary partitions $\mu$ are presented in Section~\ref{sec:fullmupart}. As discussed in Section~\ref{sec:fac} we can apply a similar fusion procedure as in Section~\ref{sec:fusel} to derive the general form of the Lax matrices. To describe the Lax matrices it is convenient to introduce the partitions \begin{gather}\label{eq:mudec} \alpha=\big(\underbrace{1,\ldots,1}_{\mu_2^t}\big) ,\qquad \beta=\big(\underbrace{1,\ldots,1}_{\mu_1^t-\mu_2^t}\big) ,\qquad \gamma=\big(\mu_2^t,\ldots,\mu_{\mu_1}^t\big)^t , \end{gather} as shown in Fig.~\ref{fig:decompose}. The partition $\mu$ is then written as $ \mu^t=\big(|\alpha|+|\beta|,\gamma_1^t,\ldots,\gamma_{\gamma_1}^t\big)$. For simplicity we are only considering partitions with $\mu_i\geq \mu_j$ for $1\leq i<j\leq \mu^t_1$. \begin{figure}\centering \begin{tikzpicture} \foreach \a in {2,3,4,5} { \begin{scope}[shift={(0.5*\a,0)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {2,3,4} { \begin{scope}[shift={(0.5*\a,0.5)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {2} { \begin{scope}[shift={(0.5*\a,1)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {1} { \begin{scope}[shift={(0.5*\a-0.1,1.5+0.1)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {1} { \begin{scope}[shift={(0.5*\a-0.1,2+0.1)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {1} { \begin{scope}[shift={(0.5*\a-0.1,1)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {1} { \begin{scope}[shift={(0.5*\a-0.1,0.5)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {1} { \begin{scope}[shift={(0.5*\a-0.1,0)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \draw [decorate,decoration={brace,amplitude=5pt},xshift=10pt,yshift=17pt] (0,1) -- (0,2)node [black,midway,xshift=-10pt] {\footnotesize $\beta$}; \draw [decorate,decoration={brace,amplitude=5pt},xshift=10pt,yshift=-14.5pt] (0,0.5) -- (0,2)node [black,midway,xshift=-10pt] {\footnotesize $\alpha$}; \draw [decorate,decoration={brace,amplitude=5pt},xshift=85.5pt,yshift=-3pt ,rotate=90] (0,0) -- (0,2)node [black,midway,xshift=0pt,yshift=-10pt] {\footnotesize $\gamma$}; \end{tikzpicture} \caption{Example of the decomposition in \eqref{eq:mudec} for $\mu=(5,4,2,1,1)$. We have $\alpha=(1,1,1)$, $\beta=(1,1)$ and $\gamma=(4,3,1)$.} \label{fig:decompose} \end{figure} \subsection[Elementary $\mu$ partitions: $\alpha=\gamma$ and $\beta=0$]{Elementary $\boldsymbol{\mu}$ partitions: $\boldsymbol{\alpha=\gamma}$ and $\boldsymbol{\beta=0}$}\label{sec:elmu} First we introduce the Lax matrices that correspond to the partitions $\lambda=\varnothing$ and $\mu=2^{[\frac{r}{2}]}$ with \begin{gather*} \alpha=(\underbrace{1,\ldots,1}_{\mu_1^t}) ,\qquad \beta=0 ,\qquad \gamma=(\underbrace{1,\ldots,1}_{\mu_2^t}) , \end{gather*} where $\mu_1^t=\mu_2^t=\frac{r}{2}$ and $r\in 2\mathbb{N}$. The Lax matrices $ L_{\mu}(x;\underline{p},\underline{q})=L_{\varnothing,\varnothing,\mu}(x;\underline{p},\underline{q})$ are $r\times r$ matrices with $|\alpha|+|\gamma|=r$ whose determinant evaluates to unity. They contain $\big(\frac{r}{2}\big)^2$ pairs of conjugate variables $\big(p_I,q^I\big)$ and can be written in the form \begin{gather}\label{eq:lax22alt} L_{\mu}(x;\underline{p},\underline{q})=\left(\begin{BMAT}[5pt]{c:c}{c:c} 0&K_{\alpha,\gamma}\\ \bar K_{\gamma,\alpha}& xI-F_{\gamma,\gamma} \end{BMAT}\right) . \end{gather} For later purposes we labeled the upper block by $\alpha$ and the lower block by $\gamma$ such that the block on the diagonal are of equal size $|\alpha|\times |\alpha|$ and $|\gamma|\times |\gamma|$ respectively. Further we introduced the matrices \begin{gather}\label{eq:G} F_{\gamma,\gamma}=Q_-GQ_-^{-1} ,\qquad\text{with}\quad G= P_0+[P_+Q_-]_++ Q_0[Q_+P_-]_-Q_0^{-1} , \end{gather} where $[\phantom{x}]_{\pm}$ denotes the projection on the upper and lower diagonal part respectively and \begin{gather}\label{eq:abc} \bar K_{\gamma,\alpha}=Q_-Q_0Q_+ ,\qquad K_{\alpha,\gamma}=-Q_+^{-1}Q_0^{-1}Q_-^{-1}=-\bar K_{\gamma,\alpha}^{-1} . \end{gather} The matrices $Q_{\pm,0}$ are parametrized in terms of the conjugate variables $(\underline{p},\underline{q})$ as follows \begin{gather*} Q_-=I + \sum_{|\mu|\geq i>j> \mu_1^t }q_{ij} e_{ij} ,\qquad Q_+=I +\sum_{\mu_1^t< i<j\leq |\mu|}q_{ij}e_{ij} ,\qquad Q_0=\sum_{i=\mu_1^t+1}^{|\mu|} {\rm e}^{q_{ii}}e_{ii} , \end{gather*} and \begin{gather*} P_-=\sum_{|\mu|\geq i>j>\mu^t_1 }p_{ij} e_{ij} ,\qquad P_+=\sum_{\mu_1^t< i<j\leq |\mu|}q_{ij}e_{ij} ,\qquad P_0=\sum_{i=\mu_1+1}^{|\mu|} p_{ii}e_{ii} . \end{gather*} We note that $Q_+$ is an upper triangular matrix containing variables $q_{ij}$ with $i>j$, while $Q_-$ is lower triangular containing the variables $q_{ij}$ with $i<j$. The diagonal matrix $Q_0$ only contains the exponential function of~$q_{ii}$. All variables~$p_{ij}$ are contained in $G$ which is decomposed as the sum of a diagonal, a lower diagonal and an upper diagonal matrix. The Sklyanin relation is equivalent to the commutators \begin{gather}\label{eq:FK} \{F_{ij},K_{kl}\}=-K_{kj}\delta_{il}, \qquad \{F_{ij},\bar K_{kl}\}=\bar K_{il}\delta_{kj}, \qquad \{K_{ij},\bar K_{kl}\}=0,\\ \label{eq:FF} \{F_{ij},F_{kl}\}=\delta_{kj} F_{il}- \delta_{il} F_{kj} . \end{gather} Here the latter can be identified with commutators of the $\mathfrak{gl}(\frac{r}{2})$ algebra, while the parametrization of $K_{\alpha, \gamma}$ is given in terms of a Gauss decomposition of ${\rm GL}(\frac{r}{2})$. These relations are verified explicitly in Appendix~\ref{app:proof}. For $\mu=(2)$, i.e., $|\alpha|=|\gamma|=1$, the Lax matrix in \eqref{eq:lax22alt} reproduces the well known Lax matrix for the Toda chain \begin{gather*} L_{(2)}(x;p,q)=\left(\begin{matrix} 0&-{\rm e}^{-q}\\ {\rm e}^q&x-p \end{matrix} \right) . \end{gather*} \subsection[Elementary $\mu$ partitions: $\alpha=\gamma$ and $\beta\neq 0$]{Elementary $\boldsymbol{\mu}$ partitions: $\boldsymbol{\alpha=\gamma}$ and $\boldsymbol{\beta\neq 0}$}\label{sec:elmuu} We can extend the elementary Lax matrices to the case $\beta\neq 0$ which can be used to obtain the Lax matrices for arbitrary partitions $\mu$. They correspond to the partitions \begin{gather*} \mu=(\underbrace{2,\ldots,2}_{|\alpha|=|\gamma|},\underbrace{1,\ldots,1}_{|\beta|}) , \end{gather*} with $|\alpha|+|\beta|+|\gamma|=r$ and contain $|\gamma|(|\alpha|+|\beta|)$ pairs of conjugate variables. The Lax matrices can be defined from $L_{\mu}(x;\underline{p},\underline{q})$ with $\alpha=\gamma$ and $\beta=0$ given in \eqref{eq:lax22alt} as \begin{gather}\label{eq:lax222111} L_{\mu}(x;\underline{p},\underline{q})=\left( \begin{BMAT}[5pt]{c:c:c}{c:c:c} 0&0&K_{\alpha,\gamma}\\ 0&I&-P_{\beta,\gamma}\\ \bar K_{\gamma,\alpha}&Q_{\gamma,\beta}&xI-F_{\gamma,\gamma}-Q_{\gamma,\beta}P_{\beta,\gamma}\\ \end{BMAT} \right) . \end{gather} Here $F_{\gamma,\gamma}$, $K_{\alpha,\gamma}$ and $\bar K_{\gamma,\alpha}$ are defined in \eqref{eq:abc} and do not depend on $\beta$. The diagonal block containing the identity matrix $I$ is of the size $|\beta|$. The matrices $P_{\beta,\gamma}$ and $Q_{\gamma,\beta}$ read \begin{gather}\label{eq:Pbggg} (P_{\beta,\gamma})_{i,j}=p_{|\alpha|+i,|\alpha|+|\beta|+j} ,\qquad ( Q_{\gamma,\beta})_{i,j}=q_{|\alpha|+|\beta|+i,|\alpha|+j} . \end{gather} The proof of Sklyanin relation is straightforward combining the proofs in Sections~\ref{sec:eleml} and \ref{sec:elmu}. The determinant can be obtained using \eqref{eq:detabcd} and yields unity. From here one may build all other Lax matrices corresponding to arbitrary~$\mu$ partitions by factorisation. The result is presented in the next subsection. \subsection[Lax matrices for $\mu$ partitions]{Lax matrices for $\boldsymbol{\mu}$ partitions}\label{sec:fullmupart} The Lax matrix for arbitrary $\mu$ partitions can be written in the form \begin{gather}\label{eq:mulax} L_{\mu}(x;\underline{p},\underline{q})=\left(\begin{BMAT}[5pt]{c:c:c}{c:c:c} 0&0&K_{\alpha,\gamma}\\ 0&I&-P_{\beta,\gamma}\\ \bar{K}_{\gamma,\alpha}&Q_{{\gamma},\beta}& xI -F_{\gamma,{\gamma}}-Q_{{\gamma},\beta}P_{\beta,{\gamma}} \end{BMAT} \right) . \end{gather} The blocks on the diagonal of this Lax matrix are of the size $|\alpha|$, $|\beta|$ and $|\gamma|$, respectively, with $|\alpha|+|\beta|+|\gamma|=r$. The matrices $P_{\beta,\gamma}$ and $Q_{\gamma,\beta}$ are defined as in \eqref{eq:Pbggg} and contain $|\gamma|\cdot|\beta|$ pairs of conjugate variables. The remaining matrix elements can then be expressed in terms of $\gamma_1$ copies of the matrices defined in \eqref{eq:G} and \eqref{eq:abc}. We have \begin{gather}\label{eq:bigmat} F_{\gamma,\gamma}=Q_{\gamma,\gamma}\left( \begin{BMAT}[5pt]{c:c:c:c:c}{c:c:c:c:c} F_{1,1}&\hat{P}_{1,2}&\hat{P}_{1,3}&\cdots&\hat{P}_{1,\gamma_1}\\ W_{2,1}&F_{2,2}&\hat{P}_{2,3}&\cdots&\hat{P}_{2,\gamma_1}\\ W_{3,1}&W_{3,2}&F_{3,3}&\ddots&\vdots\\ \vdots&\vdots&\ddots&\ddots&\hat{P}_{\gamma_1-1,\gamma_1}\\ W_{\gamma_1,1}&W_{\gamma_1,2}&\cdots&W_{\gamma_1,\gamma_1-1}&F_{\gamma_1,\gamma_1}\\ \end{BMAT} \right)Q^{-1}_{\gamma,\gamma} ,\\ \label{eq:bigK} {K}_{\alpha,{\gamma}}=\left(\begin{BMAT}[5pt]{c:c:c:c}{c} {D}^{[\gamma_1]}_{\alpha,\alpha}\cdots {D}^{[2]} _{\alpha,\alpha} {K}_{\alpha,1}&\cdots & {D}^{[\gamma_1]}_{\alpha,\alpha} {K}_{\alpha,\gamma_1-1}& {K}_{\alpha,\gamma_1} \end{BMAT} \right)Q_{\gamma,\gamma}^{-1} \end{gather} and \begin{gather}\label{eq:bigKb} \bar{ {K}}_{{\gamma},\alpha}=Q_{\gamma,\gamma}\left(\begin{BMAT}[5pt]{c}{c:c:c:c} \bar{ {K}}_{1,\alpha}\\ \bar{ {K}}_{2,\alpha} {D}^{[1]}_{\alpha,\alpha} \\ \cdots \\\bar{ {K}}_{\gamma_1,\alpha} {D}^{[\gamma_1-1]}_{\alpha,\alpha}\cdots {D}^{[1]}_{\alpha,\alpha} \end{BMAT} \right) . \end{gather} Each block $(i,j)$ in \eqref{eq:bigmat} is of the size $\gamma_i^t\times\gamma_j^t$. The matrices $\hat{P}_{ij}$ are defined as in \eqref{eq:pt} with \begin{gather*} \hat{P}_{ij}=(P_{\gamma,\gamma})_{ij}+\sum_{k=j+1}^{\gamma_1}(P_{\gamma,\gamma})_{ik}(Q_{\gamma,\gamma})_{kj} . \end{gather*} The corresponding matrices $Q_{\gamma,\gamma}$ and $P_{\gamma,\gamma}$ as defined for the partition $\lambda$ in \eqref{eq:Umatrix} read \begin{gather* Q_{\gamma,\gamma}=\left(\! \begin{BMAT}[5pt]{c:c:c:c:c}{c:c:c:c:c} I &0&0&0&0\\ Q_{2,1}&I &0&0&0\\ Q_{3,1}&Q_{3,2}&I &0&0\\ \vdots&\vdots&\ddots&\ddots&0\\ Q_{\gamma_1,1}&Q_{\gamma_1,2}&\cdots&Q_{\gamma_1,\gamma_1-1}&I \\ \end{BMAT}\! \right) \!,\quad P_{\gamma,\gamma}=\left(\! \begin{BMAT}[5pt]{c:c:c:c:c}{c:c:c:c:c} 0&P_{1,2}&P_{1,3}&\cdots&P_{1,\gamma_1}\\ 0&0 &P_{2,3}&\cdots&P_{2,\gamma_1}\\ 0&0&0&\ddots&\vdots\\ 0&0&0&\ddots&P_{\gamma_1-1,\gamma_1}\\ 0&0&0&0&0 \\ \end{BMAT}\! \right) \!, \end{gather*} where $Q_{ij}$ and $P_{ij}$ denote block matrices explicitly given by \begin{alignat*}{3} &(Q_{ij})_{kl}= q_{\ell(s)+k,\ell(t)+l}, \qquad && k \in \big[1,\gamma^t_i\big], \quad l \in \big[1, \gamma^t_j\big] ,\\ & (P_{ij})_{kl}=p_{\ell(s) + k , \ell(t) + l}, \qquad && k \in \big[1,\gamma^t_i\big], \quad l \in \big[1,\gamma^t_j\big] . \end{alignat*} Here we defined $\ell(i)=|\alpha|+|\beta|+\sum\limits_{l=1}^{i-1}\gamma_l^t$. The elements on the lower diagonal of the middle part of $F_{\gamma,\gamma}$ in~\eqref{eq:bigmat} are defined as the product \begin{gather*} W_{ij}=-\bar{ {K}}_{i,\alpha} {D}^{[i-1]}_{\alpha,\alpha}\cdots{D}^{[j+1]}_{\alpha,\alpha} {K}_{\alpha,j} , \end{gather*} which in particular yields $W_{i+1,i}=-\bar{ {K}}_{i+1,\alpha} {K}_{\alpha,i}$. The remaining matrices are parametrized in terms of the matrices defined in~\eqref{eq:G} and~\eqref{eq:abc} as \begin{gather} F_{k,k} =\big(Q_-GQ_-^{-1}\big)_{\gamma^t_{k},\gamma^t_{k}}+Q_{\gamma^t_{k},|\alpha|-\gamma^t_k}P_{|\alpha|-\gamma^t_k,\gamma^t_{k}} , \qquad K_{\alpha,k} =-\left(\begin{BMAT}[5pt]{c}{c:c} \left(Q_-Q_0Q_+\right)^{-1}_{\gamma_{k}^t,\gamma_{k}^t}\\ P_{|\alpha|-\gamma_{k}^t,\gamma_{k}^t}\\ \end{BMAT} \right) , \nonumber\\ \bar K_{k,\alpha}=\left(\begin{BMAT}[5pt]{c:c}{c} \left(Q_-Q_0Q_+\right)_{\gamma_{k}^t,\gamma_{k}^t}&Q_{ \gamma_{k}^t,|\alpha|-\gamma_{k}^t} \end{BMAT} \right), \hspace{14mm} D^{[k]}_{\alpha,\alpha}=\diag(\underbrace{0,\ldots,0}_{\gamma_{k}^t},\underbrace{1,\ldots,1}_{|\alpha|-\gamma_{k}^t}) . \label{eq:elemelem} \end{gather} Here the matrices $(Q_-Q_0Q_+)_{\gamma^t_{k},\gamma^t_{k}}$, $(Q_-Q_0Q_+)_{\gamma^t_{k},\gamma^t_{k}}^{-1}$ and $\big(Q_-GQ_-^{-1}\big)_{\gamma^t_{k},\gamma^t_{k}}$ are built from the variables $q_{ij}$ and $p_{ji}$ where $\ell(k)<i,j\leq \ell(k+1)$ with $\ell(k)=|\alpha|+|\beta|+\sum\limits_{l=1}^{k-1}\gamma_l^t$. Further the matrices $Q_{\gamma_{k}^t,|\alpha|-|\gamma|_{k}^t}$ and $P_{|\alpha|-\gamma_{k}^t,|\gamma|_{k}^t}$ are of the form \begin{alignat*}{3} & \big(Q_{\gamma_{k}^t,|\alpha|-|\gamma|_{k}^t}\big)_{ij} = q_{\ell(k)+i,\gamma_{k}^t+j}, \qquad && i \in \big[1, \gamma_{k}^{t}\big],\quad j \in \big[1,|\alpha|-\gamma_{k}^{t}\big] ,& \\ & \big(P_{|\alpha|-\gamma_{k}^t,|\gamma|_{k}^t}\big)_{ij} =p_{\gamma_{k}^t+ i , \ell(k) + j}, \qquad && i \in \big[1,|\alpha|-\gamma_k^t\big], \quad j \in \big[1, \gamma_{k}^{t}\big] .& \end{alignat*} The total number of pairs $(p_I,q_I)$ in the Lax matrix for general $\mu$ partitions is $\frac{1}{2}\Big(r^2-\sum\limits_{i=1}^{\mu_1}\big(\mu_i^t\big)^2\Big)$. Here $|\beta|\cdot|\gamma|$ pairs come from the elements $P$ and $Q$ in~\eqref{eq:mulax}, the matrices~$P_{\gamma,\gamma}$ and~$Q_{\gamma,\gamma}$ contain $\sum\limits_{i<j}\gamma_i^t\gamma_j^t$ pairs of conjugate variables and the matrices~$F_{k,k}$, $K_{\alpha,k}$ and $\bar K_{k,\alpha}$ in~\eqref{eq:elemelem} contain for $k=1,\ldots,\gamma_1$ in total $|\alpha|\cdot|\gamma|$ pairs of variables. The expression for the Lax matrix $L_{\mu}(x;\underline{p},\underline{q})$ in \eqref{eq:mulax} is in principle valid for any ordering of columns where $|\alpha|+|\beta|$ denotes the height of the biggest columns and $\gamma$ the partition that remains after removing that column. If the partition is ordered, i.e., $\lambda_i\geq\lambda_j$ for $i<j$, we have that $D_{\alpha,\alpha}^{[i]}D_{\alpha,\alpha}^{[j]}=D_{\alpha,\alpha}^{[j]}D_{\alpha,\alpha}^{[i]}=D_{\alpha,\alpha}^{[i]}$ for $i<j$ and $D_{\alpha,\alpha}^{[1]}=0$ which simplifies the expressions above. \subsection[Fusion procedure for $\mu$ partitions]{Fusion procedure for $\boldsymbol{\mu}$ partitions}\label{sec:fac} \newcommand{\tilde\ia}{\tilde\alpha} \newcommand{\tilde\ib}{\tilde\beta} \newcommand{\ic'}{\gamma'} \newcommand{\ic}{\gamma} The formula for the Lax matrices of the $\mu$ partitions can be shown in analogy to Section~\ref{sec:fusel}. We define three partitions $\mu$, $\mu'$ and $\tilde\mu$ with $|\mu|=|\mu'|=|\tilde\mu|=r$. They are related by fusion via \begin{gather}\label{eq:mufuse} |\tilde\alpha| =\max(|\alpha|,|\alpha'|) ,\qquad |\tilde \beta|=\min(|\beta|,|\beta'|) ,\qquad \tilde\gamma ^t=\big(\gamma^t,{\gamma'}^t\big). \end{gather} Here we consider a solution to the Sklyanin relation of the form~\eqref{eq:lax222111} written as a $4\times 4$ block matrix \begin{gather}\label{eq:ll1} L_{\mu}(x;\underline{p},\underline{q})=\left(\begin{BMAT}[5pt]{c:c:c:c}{c:c:c:c} {D_{\tilde\ia,\tilde\ia}}&0&0&K_{\tilde\ia,\ic}\\ 0&I&0&-P_{\tilde\ib,\ic}\\ 0&0&I&-P_{\ic',\ic}\\ \bar K_{\ic,\tilde\ia}&Q_{\ic,\tilde\ib}&Q_{\ic,\ic'}&xI-{F}_{\ic,\ic}-Q_{\ic,\tilde\ib}P_{\tilde\ib,\ic}-Q_{\ic,\ic'}P_{\ic',\ic} \end{BMAT} \right) . \end{gather} The blocks on the diagonal are of the size $|\tilde \alpha|$, $|\tilde \beta|$, $|\gamma'|$ and $|\gamma|$, respectively with $|\tilde \alpha|+|\tilde \beta|+|\gamma'|+|\gamma|=r$. The matrices $Q_{\ic,\tilde\ib}$, $Q_{\ic,\ic'}$ and $P_{\tilde\ib,\ic}$, $P_{\ic',\ic}$ are explicitly given in terms of the conjugate variables. They read \begin{alignat*}{3} & (P_{\tilde\ib,\ic})_{i,j} =p_{|\tilde \alpha|+i,|\tilde \alpha|+|\tilde \beta|+|\gamma'|+j}, \qquad && (P_{\ic',\ic})_{i,j} =p_{|\tilde \alpha|+|\tilde \beta|+i,|\tilde \alpha|+|\tilde \beta|+|\gamma'|+j},& \\ &(Q_{\ic,\tilde\ib})_{i,j}=q_{|\tilde \alpha|+|\tilde \beta|+|\gamma'|+i,|\tilde \alpha|+j} , \qquad && (Q_{\ic,\ic'})_{i,j}=q_{|\tilde \alpha|+|\tilde \beta|+|\gamma'|+i,|\tilde \alpha|+|\tilde \beta|+j} .& \end{alignat*} Furthermore we define a second Lax matrix, cf.~\eqref{eq:lax2}, which also is a solution of the Sklyanin relation. It has the same block structure as~\eqref{eq:ll1} and reads \begin{gather* {L}'_{\mu'}(x;\underline{p}',\underline{q}')=\left(\begin{BMAT}[5pt]{c:c:c:c}{c:c:c:c} {D}'_{\tilde\ia,\tilde\ia}&0&{K}'_{\tilde\ia,\ic'}&0\\ 0&I& -P_{\tilde\ib,\ic'}'&0\\ \bar K'_{\ic',\tilde\ia}&Q_{\ic',\tilde\ib}'&xI- {F}'_{\ic',\ic'}-Q_{\ic',\tilde\ib}' P_{\tilde\ib,\ic'}'+ P_{\ic',\ic}'Q_{\ic,\ic'}'& -P_{\ic',\ic}'\\ 0& 0&- Q_{\ic,\ic'}'& I \end{BMAT} \right) . \end{gather*} We got \begin{alignat*}{3} & (P'_{\tilde\ib,\ic'})_{i,j}=p_{|\tilde \alpha|+i,|\tilde \alpha|+|\tilde \beta|+j} , \qquad && (P_{\ic',\ic}')_{i,j}=p_{|\tilde \alpha|+|\tilde \beta|+i,|\tilde \alpha|+|\tilde \beta|+|\gamma'|+j} ,&\\ & (Q'_{\ic',\tilde\ib})_{i,j} =q_{|\tilde \alpha|+|\tilde \beta|+i,|\tilde \alpha|+j} , \qquad && (Q_{\ic,\ic'})_{i,j} =q_{|\tilde \alpha|+|\tilde \beta|+|\gamma'|+i,|\tilde \alpha|+|\tilde \beta|+j} .& \end{alignat*} We proceed as in Section~\ref{sec:fusel} and multiply the two solutions of the Sklyanin relation. The product can again be written as \begin{gather* L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q}) {L}'_{\mu'}(x;\underline{p}',\underline{q}')=\tilde Q'\tilde{L}_{\tilde\mu}(x,\underline{\tilde p},\underline{\tilde q}) , \end{gather*} cf.~\eqref{eq:prodll}. The spectral parameter dependent matrix $\tilde{L}_{\tilde\mu}(x,\underline{\tilde p},\underline{\tilde q})$ and the matrix $\tilde Q'$ take the form \begin{gather* \tilde{L}_{\tilde\mu}(x,\underline{\tilde p},\underline{\tilde q})=WU\left(\begin{BMAT}[5pt]{c:c:c:c}{c:c:c:c} {D}_{\tilde\ia,\tilde\ia}{D}'_{\tilde\ia,\tilde\ia}&0&{D_{\tilde\ia,\tilde\ia}}{K_{\tilde\ia,\ic'}}' & {K}_{\tilde\ia,\ic} \\ 0 &I&0&0\\ {\bar K}'_{\ic',\tilde\ia}&0&xI-{F}'_{\ic',\ic'}& -\tilde P_{\gamma',\gamma}\\ {\bar K}_{\ic,\tilde\ia} {D} ' _{\tilde\ia,\tilde\ia}&0&{{\bar K}}_{\ic\tilde\ia}{K}'_{\tilde\ia,\ic'}&xI-{F}_{\ic,\ic} \end{BMAT} \right)U^{-1}V^{-1} , \end{gather*} and \begin{gather*} \tilde Q'=\left(\begin{BMAT}[5pt]{c:c:c:c}{c:c:c:c} I&0&0&0\\ 0&I&0&0\\ 0&0&I&0\\ 0 &0&\tilde Q_{\gamma,\gamma'}'&I \end{BMAT} \right) . \end{gather*} Here we have written $\tilde{L}_{\tilde\mu}(x,\underline{\tilde p},\underline{\tilde q})$ in a factorised form and introduced the matrices \begin{gather* W=\left(\begin{BMAT}[5pt]{c:c:c:c}{c:c:c:c} I&0&0&0\\ 0&I&0&0\\ 0&\tilde Q_{\gamma',\tilde \beta}&I&0\\ 0 &\tilde Q_{\gamma,\tilde \beta}&0&I \end{BMAT} \right) ,\qquad U=\left(\begin{BMAT}[5pt]{c:c:c:c}{c:c:c:c} I&0&0&0\\ 0&I&0&0\\ 0&0&I&0\\ 0 &0&\tilde Q_{\gamma,\gamma'}&I \end{BMAT} \right) ,\\ V^{-1}=\left(\begin{BMAT}[5pt]{c:c:c:c}{c:c:c:c} I&0&0&0\\ 0&I&\tilde P_{\tilde \beta,\gamma'}&\tilde P_{\tilde \beta,\gamma}\\ 0& 0&I&0\\ 0 &0&0&I \end{BMAT} \right) . \end{gather*} They are expressed in terms of the new variables \begin{alignat*}{3} & \tilde P_{{\ic'}{\ic}}=P_{{\ic'}{\ic}}'+P_{{\ic'}{\ic}}-Q'_{{\ic'}\tilde\ib}P_{\tilde\ib{\ic}}, \qquad && \tilde Q_{{\ic}{\ic'}} =Q_{{\ic}{\ic'}}',&\\ &\tilde P_{\tilde\ib{\ic'}}=P_{\tilde\ib{\ic'}}'-P_{\tilde\ib{\ic}}Q'_{{\ic}{\ic'}}, \qquad && \tilde Q_{{\ic'}\tilde\ib} =Q_{{\ic'}\tilde\ib}', & \\ &\tilde P_{\tilde\ib{\ic}}=P_{\tilde\ib{\ic}}, \qquad && \tilde Q_{{\ic}\tilde\ib}=Q_{{\ic}\tilde\ib}+Q'_{{\ic}{\ic'}}Q'_{{\ic'}\tilde\ib}, &\\ & \tilde P'_{{\ic'}{\ic}}= P_{{\ic'}{\ic}}, \qquad && \tilde Q'_{{\ic}{\ic'}}=Q_{{\ic}{\ic'}}-Q'_{{\ic}{\ic'}} .& \end{alignat*} This is the same change of variables as in \eqref{eq:trans1} and therefore it is canonical. Following the same logic as in Section~\ref{sec:fusel} we conclude that $\tilde{L}_{\tilde\mu}(x,\underline{\tilde p},\underline{\tilde q})$ is a solution of the Sklyanin relation. For convenience we write it in the same form as $L_{\mu}(x;\underline{p},\underline{q})$ such that \begin{gather}\label{eq:laxz} \tilde{L}_{\tilde\mu}(x,\underline{\tilde p},\underline{\tilde q})=\left(\begin{BMAT}[5pt]{c:c:c}{c:c:c} \tilde{D} _{\tilde\alpha,\tilde\alpha}&0& {K}_{\tilde\ia,\tilde \gamma}\\ 0&I&-\tilde P_{\tilde\ib,\tilde\gamma}\\ {{\bar K}}_{\tilde\gamma,\tilde\ia}&\tilde Q_{\tilde\gamma,\tilde\ib}&xI-\tilde F_{\tilde\gamma,\tilde\gamma}-\tilde Q_{\tilde\gamma,\tilde\ib}\tilde P_{\tilde\ib,\tilde\gamma}\\ \end{BMAT} \right) . \end{gather} The size of the block matrices on the diagonal is $|\tilde \alpha|$, $|\tilde \beta|$ and $|\tilde \gamma|$. We defined the matrices \begin{gather* \big(\tilde P_{\tilde\ib,\tilde\gamma}\big)_{i,j}=\tilde p_{|\tilde \alpha|+i,|\tilde \alpha|+|\tilde \beta|+j} ,\qquad \big(\tilde Q_{\tilde\gamma,\tilde\ib}\big)_{i,j}=\tilde p_{|\tilde \alpha|+|\tilde \beta|+i,|\tilde \alpha|+j}, \end{gather*} while the remaining elements are given by \begin{gather*} \tilde F_{\tilde\gamma,\tilde\gamma}=\tilde Q_-\left(\begin{BMAT}[5pt]{c:c}{c:c} {F}'_{\ic',\ic'}& \tilde P'_{\ic',\ic} \\ -{{\bar K}_{\ic,\tilde\ia}}{K_{\tilde\ia,\ic'}'} &{F}_{\ic,\ic} \end{BMAT} \right)\tilde Q_-^{-1} ,\qquad\text{with}\quad \tilde Q_-= \left(\begin{BMAT}[5pt]{c:c}{c:c} I&0\\ \tilde Q_{ \ic,\ic'}'&I \end{BMAT} \right) ,\\ {{\bar K}_{\tilde\gamma,\tilde\ia}}=\tilde Q_-\left(\begin{BMAT}[5pt]{c}{c:c} {{\bar K}_{\ic',\tilde\ia}}'\\ {{\bar K}_{\ic,\tilde\ia}}{D_{\tilde\ia,\tilde\ia}'} \end{BMAT} \right) ,\qquad {K}_{\tilde\ia,\tilde\gamma}=\left(\begin{BMAT}[5pt]{c:c}{c} {D}_{\tilde\ia,\tilde\ia}{K_{\tilde\ia,\ic'}'}& {K}_{\tilde\ia,\ic} \end{BMAT} \right)\tilde Q_-^{-1} , \end{gather*} and $\tilde {D}_{\tilde\ia,\tilde\ia}={D}_{\tilde\ia,\tilde\ia}{D}_{\tilde\ia,\tilde\ia}'$. \subsubsection{Recursion} \newcommand{\tilde \alpha}{\tilde \alpha} \newcommand{\tilde \beta}{\tilde \beta} \newcommand{\tilde \gamma}{\tilde \gamma} We specify the matrix elements in the fusion procedure to describe the fusion of one arbitrary partition $\mu$ as proposed in Section~\ref{sec:fullmupart} and an elementary matrix \eqref{eq:lax222111} corresponding to the partition $\mu'$ with the restriction $\alpha'=\gamma'$. The resulting partition $\tilde\mu$ is then written in terms of~$\mu$ and~$\mu'$ as in~\eqref{eq:mufuse}. This can be seen as follows. The primed letters correspond to elements of the Lax matrix corresponding to $\mu'$ and read \newcommand{}{} \begin{gather*} {F'}_{\gamma',\gamma'}=\left(Q_-^{}G^{}Q_-^{-1}\right)_{\gamma',\gamma'}+Q^{}_{\gamma',\tilde\alpha-\alpha'}P^{}_{\tilde\alpha-\alpha',\gamma'} , \\ {\bar K}'_{{\gamma'},\tilde\alpha}=\left(\begin{BMAT}[5pt]{c:c}{c} \left(Q_-^{}Q_0^{}Q_+^{}\right)_{\gamma',\alpha'}&Q^{}_{\gamma',\tilde\alpha-\alpha'} \end{BMAT} \right), \qquad {K}'_{\tilde\alpha,\gamma'}=-\left(\begin{BMAT}[5pt]{c}{c:c} \left(Q_-^{}Q_0^{}Q_+^{}\right)^{-1}_{\alpha',\gamma'}\\ P^{}_{\tilde\alpha-\alpha',\gamma'}\\ \end{BMAT} \right) \end{gather*} and \begin{gather*} {D}'_{\tilde\alpha,\tilde\alpha}={D}^{[\gamma']}_{\tilde \alpha,\tilde \alpha}=\diag(\underbrace{0,\ldots,0}_{\alpha'},\underbrace{1,\ldots,1}_{\tilde \alpha-\alpha'}) . \end{gather*} The unprimed letters correspond to the partition $\mu$ as given in \eqref{eq:bigmat}, \eqref{eq:bigK} and \eqref{eq:bigKb}. We find that \begin{gather* F_{\tilde \gamma,\tilde \gamma}=Q_{\tilde \gamma,\tilde \gamma}\left( \begin{BMAT}[5pt]{c:c}{c:c} F'_{\gamma',\gamma'}&\tilde P_{\gamma',\gamma}Q_{\gamma,\gamma}\\ -Q_{\gamma,\gamma}^{-1}\bar K_{\gamma,\tilde\alpha}K'_{\tilde\alpha,\gamma'}& \begin{BMAT}[5pt]{c:c:c:c}{c:c:c:c} F_{1,1}&\hat{P}_{1,2}&\cdots&\hat{P}_{1,\gamma_1}\\ W_{2,1}&F_{2,2}&\ddots&\vdots\\ \vdots&\ddots&\ddots&\hat{P}_{\gamma_1-1,\gamma_1}\\ W_{\gamma_1,1}&\cdots&W_{\gamma_1,\gamma_1-1}&F_{\gamma_1,\gamma_1}\\ \end{BMAT} \end{BMAT} \right)Q^{-1}_{\tilde \gamma,\tilde \gamma} , \end{gather*} where similar as for the case of $\lambda$-partitions in \eqref{eq:Jfactor} we identify \begin{gather*} \tilde P_{\gamma',\gamma}Q_{\gamma,\gamma}=\left(\begin{BMAT}[5pt]{c:c:c:c}{c} \hat{P}_{\gamma',1}&\hat{P}_{\gamma',2}&\cdots&\hat{P}_{\gamma',\gamma_1} \end{BMAT}\right) . \end{gather*} Furthermore we identify \begin{gather*} -Q_{\gamma,\gamma}^{-1}\bar K_{\gamma,\tilde\alpha}K'_{\tilde\alpha,\gamma'}=-\left(\begin{BMAT}[5pt]{c}{c:c:c:c} \bar K_{1,\tilde \alpha}K_{\tilde \alpha,\gamma'}'\\ \bar K_{2,\tilde \alpha}D_{\tilde \alpha,\tilde \alpha}^{[1]}K_{\tilde \alpha,\gamma'}'\\ \vdots\\ \bar K_{\gamma_1,\tilde \alpha}D_{\tilde \alpha,\tilde \alpha}^{[\gamma_1-1]}\cdots D_{\tilde \alpha,\tilde \alpha}^{[1]}K_{\tilde \alpha,\gamma'}' \end{BMAT}\right)=\left(\begin{BMAT}[5pt]{c}{c:c:c:c} W_{1,\gamma'}\\ W_{2,\gamma'}\\ \vdots\\ W_{\gamma_1,\gamma'} \end{BMAT}\right) \end{gather*} and obtain \begin{gather*} \bar K_{\tilde\gamma,\tilde\alpha}=Q_{\tilde\gamma,\tilde \gamma}\left(\begin{BMAT}[5pt]{c}{c:c:c:c:c} \bar{ {K}}_{\gamma',\tilde \alpha}'\\ \bar{ {K}}_{1,\tilde \alpha} {D}^{[\gamma']}_{\tilde \alpha,\tilde \alpha}\\ \bar{ {K}}_{2,\tilde \alpha} {D}^{[1]}_{\tilde \alpha,\tilde \alpha}{D}^{[\gamma']}_{\tilde \alpha,\tilde \alpha} \\ \vdots \\\bar{ {K}}_{\gamma_1,\tilde \alpha} {D}^{[\gamma_1-1]}_{\tilde \alpha,\tilde \alpha}\cdots {D}^{[1]}_{\tilde \alpha,\tilde \alpha}{D}^{[\gamma']}_{\tilde \alpha,\tilde \alpha} \end{BMAT} \right) \end{gather*} and \begin{gather*} {K}_{\tilde \alpha,\tilde {\gamma}}=\left(\begin{BMAT}[5pt]{c:c:c:c:c}{c} {D}^{[\gamma_1]}_{\tilde \alpha,\tilde \alpha}\cdots {D}^{[1]} _{\tilde \alpha,\tilde \alpha} {K}_{\tilde \alpha,\gamma'}'& {D}^{[\gamma_1]}_{\tilde \alpha,\tilde \alpha}\cdots {D}^{[2]} _{\tilde \alpha,\tilde \alpha} {K}_{\tilde \alpha,1}&\cdots & {D}^{[\gamma_1]}_{\alpha,\alpha} {K}_{\alpha,\gamma_1-1}& {K}_{\alpha,\gamma_1} \end{BMAT} \right)Q_{\tilde \gamma,\tilde \gamma}^{-1} . \end{gather*} Thus we conclude that \eqref{eq:mulax} satisfies Sklyanin's quadratic Poisson bracket. \section{Generic degree 1 symplectic leaves}\label{sec:lmpart} We will now define the Lax matrices $L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})$ for arbitrary partitions $ \lambda$ and $ \mu$. They can be obtained by fusing the Lax matrix for regular partitions~\eqref{eq:laxregular} with the Lax matrix for~$\mu$ partitions~\eqref{eq:mulax}. \subsection[Lax matrix for $\lambda$, $\mu$ partitions]{Lax matrix for $\boldsymbol{\lambda}$, $\boldsymbol{\mu}$ partitions} The Lax matrix for arbitrary partitions $ \lambda$ and $ \mu$ can compactly be written as \begin{gather}\label{eq:finallax} L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})=\left(\begin{BMAT}[5pt]{c:c:c}{c:c:c} 0&0&K_{\alpha,\gamma\lambda}\\ 0 &I &-P_{\beta,\gamma\lambda}\\ \bar{K}_{\gamma\lambda,\alpha}& Q_{\gamma\lambda,\beta}&xI -{F}_{\gamma\lambda,\gamma\lambda}-Q_{\gamma\lambda,\beta}P_{\beta,\gamma\lambda}\\ \end{BMAT} \right) . \end{gather} The blocks on the diagonal are of the size $|\alpha|$, $|\beta|$ and $|\gamma|+|\lambda|$ respectively. Here $Q_{\lambda,\gamma}$ and $P_{\gamma,\lambda}$ are defined as \begin{gather*} (P_{\gamma,\lambda})_{i,j}= p_{|\alpha|+|\beta|+i,| \alpha|+| \beta|+|\gamma|+j} ,\qquad ( Q_{\lambda,\gamma})_{i,j}=q_{| \alpha|+| \beta|+|\gamma|+i,| \alpha|+|\beta|+j}. \end{gather*} The remaining matrix elements in \eqref{eq:finallax} are given in terms of the components of the Lax matrix for regular partitions~\eqref{eq:laxregular} and the Lax matrix for~$\mu$ partitions~\eqref{eq:mulax}. We have \begin{gather*} {F}_{\gamma\lambda,\gamma\lambda}=\left(\begin{BMAT}[5pt]{c:c}{c:c} I &0\\ Q_{\lambda,\gamma}&I \end{BMAT} \right)\cdot\left(\begin{BMAT}[5pt]{c:c}{c:c} F_{\gamma,\gamma}&P_{\gamma,\lambda}\\ -Q_{\lambda,\alpha} K_{\alpha,\gamma} & J_{\lambda,\lambda}+Q_{\lambda,\alpha}P_{\alpha,\lambda} \end{BMAT} \right)\cdot\left(\begin{BMAT}[5pt]{c:c}{c:c} I &0\\ -Q_{\lambda,\gamma}&I \end{BMAT} \right) , \end{gather*} and \begin{alignat*}{3} & \bar{{K}}_{\gamma\lambda,\alpha}=\left(\begin{BMAT}[5pt]{c}{c:c} \bar K_{\gamma,\alpha}\\ Q_{\lambda,\gamma} \bar K_{\gamma,\alpha} \end{BMAT} \right) , \qquad && {{K}}_{\alpha,\gamma\lambda}=\left(\begin{BMAT}[5pt]{c:c}{c} K_{\alpha,\gamma}+P_{\alpha,\lambda} Q_{\lambda,\gamma}&-P_{\alpha,\lambda} \end{BMAT} \right) ,&\\ & Q_{\gamma\lambda,\beta}=\left(\begin{BMAT}[5pt]{c}{c:c} Q_{\gamma,\beta}\\ Q_{\lambda,\beta} \end{BMAT} \right) ,\qquad && P_{\beta,\gamma\lambda}=\left(\begin{BMAT}[5pt]{c:c}{c} P_{\beta,\gamma}&P_{\beta,\lambda} \end{BMAT} \right) . \end{alignat*} Again we can check that the number of pairs of conjugate variables agrees with~\eqref{eq:dimform}. First we note that $F$ contains $\sum\limits_{i<j}\gamma_i^t\gamma_j^t+|\alpha|\cdot|\gamma|$ and $J$ contains $\sum\limits_{i<j}\lambda_i^t\lambda_j^t$ pairs. The remaining variables are contained in $P_{\beta,\gamma\lambda}$, $Q_{\gamma\lambda,\beta}$, $P_{\alpha,\lambda}$, $Q_{\lambda,\alpha}$ and $P_{\gamma,\lambda}$, $Q_{\lambda,\gamma}$. By construction, cf.~Section~\ref{sec:fuslm}, the determinant of the Lax matrix in~\eqref{eq:finallax} satisfies~\eqref{eq:detlax}. The symplectic leaves that we found in the Poisson--Lie group $\mathcal{G}$ are orbits of certain representative elements under the dressing action of the dual Poisson--Lie group $\mathcal{G}^{*}$. These representative elements are easily seen as Lax matrices at $\underline p = \underline q = 0$. Here the Lax matrix \eqref{eq:finallax} reduces to a~block matrix of the form \begin{gather* L_{{\lambda},\underline{x},\mu}(x;\varnothing,\varnothing)=\left(\begin{BMAT}[5pt]{c:c}{c:c} xI_\mu-\Sigma_\mu &0\\ 0&xI-X_\lambda \end{BMAT} \right) , \end{gather*} where \begin{gather*} I_\mu=\diag(\underbrace{0,\ldots,0}_{|\alpha|+|\beta|},\underbrace{1\ldots,1}_{|\gamma|}) \end{gather*} and $X_\lambda$ denotes the diagonal matrix defined in \eqref{eq:Xlambda}. The matrix $\Sigma_\mu$ is a permutation matrix containing the elements~$\pm 1$. This matrix can be block diagonalized such that it contains $|\alpha|+|\beta|$ blocks of the size $\mu_i$, $i=1,\ldots,|\alpha|+|\beta|$, corresponding to the rows of the partitions $\mu$. The diagonal of each block $i$ reads $\diag(0,x,\ldots,x)$ and its remaining elements $\pm 1$ correspond to a~cyclic permutation of length $\mu_i$. For example for a row of $\mu_i=4$ we obtain \begin{gather*} \left(\begin{matrix} 0&-1&0&0\\ 0&x&-1&0\\ 0&0&x&-1\\ 1&0&0&x \end{matrix} \right) . \end{gather*} For $\mu_i=1$ where $i=|\alpha|+1,\ldots,|\alpha|+|\beta|$, we obtain a $1\times 1$ block containing only the element~$1$. \subsection{Fusion procedure}\label{sec:fuslm} The Lax matrix \eqref{eq:finallax} can be derived using the factorisation formula in Section~\ref{sec:fac} when substituting the $\gamma$ block for a $\lambda=\tilde\lambda$ block as follows \begin{gather*} {F}_{\gamma,\gamma}\rightarrow J_{\tilde \lambda,\tilde \lambda}+Q_{\tilde \lambda,\tilde \alpha}P_{\tilde \alpha,\tilde \lambda} ,\qquad \bar{{K}}_{\gamma,\tilde \alpha}\rightarrow Q_{\tilde \lambda,\tilde \alpha} ,\qquad{K}_{\tilde \alpha,\gamma}\rightarrow-P_{\tilde \alpha,\tilde\lambda} ,\qquad {D}_{\tilde \alpha,\tilde \alpha}\rightarrowI . \end{gather*} Here $I $ is the $|\tilde\alpha|\times| \tilde \alpha|$ identity matrix. The primed elements in the second Lax matrix $L'$ are taken to be as defined in \eqref{eq:mulax} as \begin{gather* {F}'_{\gamma',\gamma'}=F_{\tilde\gamma,\tilde\gamma} ,\qquad \bar{{K}}'_{\gamma',\tilde \alpha}=\bar{K}_{\tilde \gamma,\tilde \alpha} ,\qquad {K}'_{\tilde \alpha,\gamma'}=K_{\tilde \alpha,\tilde \gamma} ,\qquad {D}'_{\tilde \alpha,\tilde \alpha}=0 . \end{gather*} Here ${D'}$ is equal to the $|\tilde \alpha|\times|\tilde \alpha|$ zero matrix. This factorisation corresponds to the fusion of the partitions $\lambda$, $\mu$ and $\lambda'$, $\mu'$ expressed in terms of the resulting partition $\tilde\lambda$, $\tilde\mu$ via \begin{gather*}\begin{split}& \alpha=\varnothing ,\qquad \beta=(\underbrace{1,\ldots,1}_{|\tilde\alpha|+|\tilde\beta|+|\tilde\gamma|}) ,\qquad \gamma=\varnothing ,\qquad \lambda=\tilde\lambda , \\ & \alpha'=\tilde\alpha ,\qquad \beta'=(\underbrace{1,\ldots,1}_{|\tilde\beta|+|\tilde\lambda|}) ,\qquad \gamma'=\tilde \gamma ,\qquad \lambda'=\varnothing .\end{split} \end{gather*} The final result of the factorisation can be directly read off from \eqref{eq:laxz}. We conclude that \eqref{eq:finallax} is a solution to Skyanin's relation \eqref{eq:skl}. \section[Algebraic completely integrable systems and Coulomb branches of $A_{r-1}$~quiver gauge theory]{Algebraic completely integrable systems \\ and Coulomb branches of $\boldsymbol{A_{r-1}}$~quiver gauge theory}\label{sec:specdet} \begin{figure}[t]\centering \begin{tikzpicture} \node at (0,0) {\plot}; \fill [white] (3.1,-0.58) rectangle (6.3,3.92); \node at (4.8,1.6) {\parti}; \end{tikzpicture} \caption{An example of non-vanishing commuting Hamiltonians $\mathcal{X}_{i,j}^{[\lambda,\mu]}$ for a ${\rm GL}_{r}$ Lax matrix $L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})$ corresponding to a partition $\nu=(5,5,2,2,1,1,1)$ with $r=17$ and $d_\nu=106$, with non-zero $m_2=3$ and $m_4=m_7=1$. The horizontal axis $i$ labels the nodes of the $A_{r-1}$ quiver diagram for $i \in [1, r-1]$, and the vertical coordinate $j$ of the enveloping profile denotes the color ranks $n_i$ in the quiver diagram.}\label{fig:newton} \end{figure} The symplectic leaf $\mathcal{M}_{\underline{\lambda}, \underline{x}, \mu}$, i.e., the moduli space of multiplicative Higgs bundles with fixed singularities, supports fibration of an algebraic completely integrable system \begin{gather} \label{eq:intsys} H\colon \ \mathcal{M}_{\underline{\lambda}, \underline{x}, \mu} \to \mathcal{U}_{\underline{\lambda}, \underline{x}, \mu} . \end{gather} Here $H$ denotes a complete set of independent commuting Hamiltonian functions (also known as conserved charges or action variable or integrals of motion of an integrable Hamilonian dynamical system) and $\mathcal{U}_{\underline{\lambda}, \underline{x}, \mu}$ denotes the space where the complete set of independent Hamiltonians takes value. The fibers $\mathcal{A}_{u} = H^{-1}(u), u \in \mathcal{U}_{\underline{\lambda}, \underline{x}, \mu}$ of the map \eqref{eq:intsys} are abelian varieties which are holomorphic Lagrangians with respect to the holomorphic symplectic structure on $\mathcal{M}_{\underline{\lambda}, \underline{x}, \mu}$, so that \begin{gather*} \dim_{\mathbb{C}} \mathcal{U}_{\underline{\lambda}, \underline{x}, \mu} = \dim_{\mathbb{C}} \mathcal{A}_{u} = \tfrac 1 2 \dim \mathcal{M}_{\underline{\lambda}, \underline{x}, \mu} \end{gather*} Let \begin{gather*} d_{\underline{\lambda}, \underline{x}, \mu} = \tfrac 1 2 \dim \mathcal{M}_{\underline{\lambda}, \underline{x}, \mu} \end{gather*} denote the half-dimension of the symplectic leaf (phase space) $\mathcal{M}_{\underline{\lambda}, \underline{x}, \mu}$. In the context of Seiberg--Witten integrable systems \cite{Cherkis:2001gm, Cherkis:2000ft, Nekrasov:2012xe, Seiberg:1994rs} the holomorphic symplectic phase space $\mathcal{M}_{\underline{\lambda}, \underline{x}, \mu}$ is the Coulomb branch of the hyperk\"{a}hler moduli space of vacua of $\mathcal{N}=2$ supersymmetric quiver gauge theory on $\mathbb{R}^{3} \times S^1$ viewed as a holomorphic symplectic manifold at a distinguished point $\zeta = 0$ on the twistor sphere of complex structures. The complex base space $\mathcal{U}_{\underline{\lambda}, \underline{x}, \mu}$ is the moduli space of vacua of the same $\mathcal{N}=2$ supersymmetric gauge theory on $\mathbb{R}^{4}$ called $\mathcal{U}$-plane in the respective context. In terms of action-angle variables, the complex action variables parametrize the base $\mathcal{U}$-plane, and the complex angle variables parametrize the abelian fibers $\mathcal{A}_{u}$. \looseness=-1 To realize the structure of an algebraic completely integrable system \eqref{eq:intsys} we need to construct~$d_{\underline{\lambda}, \underline{x}, \mu}$ independent Poisson commuting Hamiltonian functions on $\mathcal{M}_{\underline{\lambda}, \underline{x}, \mu}$. Like in the case of additive Higgs bundles (Hitchin system), the commuting Hamiltonian functions on multiplicative Higgs bundles (or more general abstract Higgs bundles) can be realized by the abstract cameral cover construction \cite{Donagi:2000dr}. In the case of additive Higgs bundles on $X$, the cameral cover construction generates Poisson commuting Hamiltonian functions as coefficients of $P(\phi(x))$ where the Higgs field $\phi(x)$ is a section of $\operatorname{ad} \mathfrak{g} \otimes K_{X}$ and $P$ is an adjoint invariant function on the Lie algebra $\mathfrak{g}$. Similarly, in the case of multiplicative Higgs bundles on $X$, the cameral cover construction generates Poisson commuting Hamiltonian functions as coefficients of $\chi(g (x))$ where $\chi$ is an adjoint invariant function on the group $G$ and multiplicative Higgs field $g(x)$ is a section of $\operatorname{Ad} G$ on~$X$. The complete set of independent Poisson commuting Hamiltonians for simple~$G$ is spanned by the characters $\chi_{R_i}$ of the fundamental irreducible highest weight modules $R_{k}$ whose highest weight is the fundamental weight $\omega_{k}$ for each $k$ in the set of nodes of the Dynkin diagram of~$g$. If $G = {\rm GL}_{r}$, the irreducible highest weight module $R_{k}$ with highest weight $\omega_k$ associated to the $k$-th node of the $A_{r-1}$ Dynkin diagram of the simple factor ${\rm SL}_{r} \subset {\rm GL}_{r}$ is isomorphic to the $k$-th external power $R_{k} = \bigwedge^{k} R_{1}$, for $k = 1, \dots, r-1$, of the defining $r$-dimensional representation~$R_1$, and we set $R_{r} = \bigwedge^{r} R_{1}$ to be the determinant 1-dimensional representation. It is convenient to assemble the fundamental characters $\chi_{k} = \chi_{R_{k}}$ for $k = 1, \dots, r$ into the spectral polynomial \begin{gather*} \det ( y I_{r \times r} - L(x) )_{r \times r} = \sum_{k=0}^{r} (-1)^{k} y^{r-k} \chi_k (g(x)), \end{gather*} where $\chi_k (g(x)) = \tr_{R_{k}} \rho_k (g(x))$ is a character for a fundamental representation $\rho_{k}\colon G \to \mathrm{End}(R_k)$ evaluated on Higgs field~$g(x)$. Now we illustrate explicitly the construction of commuting Hamiltonians for the Lax matrices constructed in the previous sections that describe the symplectic leaves $ \mathcal{M}_{\underline{\lambda}, \underline{x}, \mu}$. First, for any symplectic leaf $ \mathcal{M}_{\underline{\lambda}, \underline{x}, \mu}$ and its representing Higgs field $g_{\underline{\lambda}, \underline{x}, \mu}(x)$ we define its twisted version \begin{gather*} g_{\underline{\lambda}, \underline{x}, \mu, g_L; g_R}(x) = g_L g_{\underline{\lambda}, \underline{x}, \mu}(x) g_R, \end{gather*} which represents a symplectic leaf $\mathcal{M}_{\underline{\lambda}, \underline{x}, \mu; g_L, g_R}$. Here $g_L \in G, g_R \in G$ are arbitrary constant ($x$-independent) Higgs fields. We remark that the symplectic leaves $\mathcal{M}_{\underline{\lambda}, \underline{x}, \mu; g_L, g_R}$ are isomorphic for various $g_L$, $g_{R}$, and for certain relation between $g_{L}$ and $g_{R}$ they in fact coincide, in this sense the labeling by both $g_{L}$ and $g_{R}$ are redundant.\footnote{What is exactly the degree of redundancy? We can see that for the case regular at infinity $\mu = \varnothing$, when~$L(x)$ is an $x$-shifted co-adjoint orbit in $\mathfrak{g}$, the non-redundant label is the product $g_{R} g_{L}$. In any case, the resulting completely integrable system depends only on the product $g_R g_L$ as we see from the spectral polynomial~\eqref{eq:specdet}.} For the following, it is sufficient to take, $g_{L} \equiv g_{\infty}$, $g_{R} \equiv 1$, and define the Lax matrix \begin{gather} \label{eq:twistedL} L_{\underline{\lambda}, \underline{x}, \mu, g_\infty}(x) = \rho_1 (g_\infty) L_{\underline{\lambda}, \underline{x}, \mu}(x), \end{gather} where $\rho_1(g_\infty)$ is $r \times r$ matrix representing $g_\infty \in G$. Note that due to the symmetries of the $\mathfrak{r}$-matrix in \eqref{eq:skl} the product $\rho_1 (g_\infty) L_{\underline{\lambda}, \underline{x}, \mu}(x)$ is a solution to the Sklyanin relation if $L_{\underline{\lambda}, \underline{x}, \mu}$ is. Now define the spectral determinant to be a polynomial of two variables $x$ and $y$ \begin{gather}\label{eq:specdet} W_{\underline{\lambda}, \underline{x}, \mu, g_\infty}(x,y)= \det(y - L_{\underline{\lambda}, \underline{x}, \mu, g_\infty}(x) ) = \sum_{k=0}^{r} (-1)^{k} y^{r-k} \chi_k (x) . \end{gather} The commuting Hamiltonians are coefficients of the monomials $x^j y^i $. With \begin{gather} \label{eq:chik} \chi_k(x) = \mathcal{Q}^{[\lambda]}_{r-k} \mathcal{X}^{[\lambda, \mu]}_{k}(x), \end{gather} we find that the spectral determinant can be written as \begin{gather}\label{eq:specdetQ} W_{\underline{\lambda}, \underline{x}, \mu, g_\infty}(x,y)=y^r+\sum_{i=1}^{r-1} (-1)^{r-i} \mathcal{Q}_i^{[\lambda]}(x) \mathcal{X}^{[\lambda,\mu]}_{r-i}(x) y^{i}+ (-1)^{r} \mathcal{Q}_0^{[\lambda]}(x) , \end{gather} where $\mathcal{Q}_i^{[\lambda]}$ is a polynomial in $x$, cf.~\eqref{eq:appe}, which is independent of the conjugate variables $(p,q)$ of the Lax matrix and which takes the form \begin{gather}\label{eq:Qpol} \mathcal{Q}_i^{[\lambda]}(x)=\prod_{j=1}^{\lambda_{i+1}}(x-x_j)^{\lambda_j^t-i} . \end{gather} All commuting Hamiltonians are thus contained in $\mathcal{X}_{i}^{[\lambda,\mu]}(x)$. More precisely $\mathcal{X}^{[\lambda,\mu]}_{r-k}(x)$ is a~polynomial in $x$ of degree \begin{gather}\label{eq:ncolors} n_k^{[\lambda,\mu]}= \sum_{j=1}^{k} (\nu_j-1) \qquad\text{with}\quad \nu_i=\lambda_i+\mu_i ,\quad k \in [1, r-1] . \end{gather} We note that the number of independent commuting Hamiltonians only depends on the total dominant co-weight represented by the partition obtained by the union of columns of the partitions $\lambda$ and $\mu$ minus the shift by the diagonal co-representation (see below (\ref{eq:diagshift}). The charges are obtained as the coefficients of the expansion \begin{gather*} \mathcal{X}_{r-i}^{[\lambda,\mu]}(x)=\sum_{j=0}^{n_i^{[\lambda,\mu]}}\mathcal{X}_{r-i,j}^{[\lambda,\mu]} x^j , \end{gather*} cf. Fig.~\ref{fig:newton}. The highest coefficients do not depend on the conjugate variables $(p_I,q_I)$ but all other coefficients in the expansion do. The total number of linearly independent charges is equal to the number of conjugate pairs in the corresponding Lax matrix \begin{gather}\label{eq:sumcharges} d_{\underline{\lambda},\mu}=\sum_{k=1}^{r-1} n_k^{\underline{\lambda},\mu} , \end{gather} cf.~\eqref{eq:dimform}. This relation is shown using Frobenius-like coordinates for the partitions in Appendix~\ref{sec:proofcharges}. For given partitions we can plot the non vanishing coefficients of the spectral determinant in a Newton diagram as done in Fig.~\ref{fig:newton}. Here we introduce the parameters{\samepage \begin{gather}\label{eq:params} m_k^{[\lambda,\mu]}=\big(n_k^{[\lambda,\mu]}-n_{k-1}^{[\lambda,\mu]}\big)-\big(n_{k+1}^{[\lambda,\mu]}-n_{k}^{[\lambda,\mu]}\big)=\nu_k-\nu_{k+1} \qquad\text{for}\quad k\in [1, r-1] \end{gather} to label the partition and the corresponding Newton diagram.} The representation theoretical meaning of the equations \eqref{eq:ncolors}, \eqref{eq:sumcharges} and \eqref{eq:params} is the following. The partitions $\underline{\lambda}$ and $\mu$ encode the ${\rm GL}_{r}$ co-weights of the respective singularity of the multiplicative Higgs field at finite points $\underline{x}$ and $x_\infty$. The encoding is in the $r$-dimensional basis of the dual to the weights of the defining representation that we call $\check e_k$ with $k = 1, \dots, r$. In terms of~$e_i$ define the simple co-roots $\alpha_i$ of ${\rm SL}_r$ to be \begin{gather*} \check \alpha_k = \check e_k - \check e_{k-1}, \qquad k \in [1, r-1] \end{gather*} and define the fundamental weights to be \begin{gather*} \check \omega_{k} = \sum_{j=1}^{k} \check e_k - \frac{k}{r} \sum_{j=1}^{r} \check e_j, \qquad k \in [1, r - 1]. \end{gather*} The dominant co-weight associated to each singularity $x_{*} \in \underline{x}$ with associated partition $\lambda_{*} = \lambda_{*,1} \geq \lambda_{*, 2} \geq \dots \geq \lambda_{*, r} = 0$ is given by \begin{gather*} \check \omega_{*} = \sum_{k=1}^{r} \lambda_{*, _i } \check e_i \end{gather*} and the dominant co-weight associated to the singularity at $x_{\infty} = \infty$ with associated partition~$\mu$ is given by \begin{gather}\label{eq:diagshift} \check \omega_{\infty} = \sum_{k=1}^{r} (\mu_i - 1) \check e_i, \end{gather} so that the ${\rm GL}_r$ multiplicative Higgs field behaves up to a multiplication by a regular function as $(x - x_{*})^{\check \omega_{*}}$ as $ x \to x_{*}$ and as $ (1/x)^{\check \omega_{\infty}}$ as $ x \to \infty$. Let $\rho$ be the Weyl vector \begin{gather*} \rho = \frac{1}{2} \sum_{\alpha > 0} \alpha = \sum_{k=1}^{r-1} \omega_k = \sum_{k=1}^{r} \frac{ r - (2k+1)}{2} e_k \end{gather*} and let \begin{gather*} \check \omega_{\rm tot} = \check \omega_{\infty} + \sum_{x_{*} \in \underline{x}} \check \omega_{x_*} \end{gather*} be the sum of the co-weights of all singularities in $\underline{x}$ and $x_\infty$. Then we see that the dimension formula \eqref{eq:sumcharges} is equivalent to \begin{gather*} \dim \mathcal{M}_{\underline{\lambda}, \underline{x}, \mu; g_L, g_R} = 2 d_{\underline{\lambda}, \underline{x}, \mu} = 2 (\rho, \check \omega_{\rm tot}) \end{gather*} in agreement with the general formula for the dimension of the moduli space of monopoles with Dirac singularities encoded by the total co-weight $\check \omega_{\rm tot}$. The numbers $n_k$ and $m_k$ for $k \in [1, r-1]$ are the number of colors in the node $k$ and the number of fundamental flavours attached to the node $k$ of the Dynkin quiver~\cite{Nekrasov:2012xe}, including the ``deficit'' fundamental flavours multiplets of asymptotically free theory which would make it conformal if added. We have \begin{gather*} \check \omega_{\rm tot} = \sum_{k=1}^{r-1} \check \alpha_k n_k = \sum_{k=1}^{r-1} \check \omega_k m_k \end{gather*} in agreement with \eqref{eq:ncolors} and \eqref{eq:params}. The position of each singularity $x_{*} \in \underline{x}$ to which we have associated a column $\lambda_{*}^{t}$ encoding a fundamental co-weight $\check \omega_{\lambda_{*}^t}$ is the mass of the fundamental flavour multiplet at the node~$\lambda_{*}^{t}$. \begin{figure}[t]\centering \quiver \caption{Representation of the Newton polygon in Fig.~\ref{fig:newton} corresponding to the partition $\nu=(5,5,2,2,1,1,1)$ with $r=17$ as quiver diagram. Here the integers in the circles denote the number of charges for a given index $i$ indicated below. The parameters $m_i^{[\lambda,\mu]}$ are given in the squared boxes.}\label{fig:quiver} \end{figure} Now the spectral curve can be compared with \cite{Nekrasov:2012xe} where a slightly different notation is used. To do so we note that the polynomials $\mathcal{Q}_i^{[\lambda]}$ in \eqref{eq:Qpol} can be written in terms of the parameters~$m_i^{[\lambda,\varnothing]}$ introduced in~\eqref{eq:params} as \begin{gather}\label{eq:QasP} \mathcal{Q}_k^{[\lambda]}(x)=\prod_{i=k+1}^{|\lambda|} \mathcal{P}_{r-i}^{ i-k}(x) ,\qquad\text{with}\quad \mathcal{P}_{r-i}(x)=\prod_{j=1}^{m_i^{[\lambda,\varnothing]}}(x-x_{\lambda_i-j+1}) . \end{gather} This relation is shown in Appendix~\ref{app:prf2}. Setting $m_i^{[\lambda,\varnothing]}=0$ for $i>|\lambda|$ we can now write the spectral determinant \eqref{eq:specdetQ} in the notation used in $(7.5)$ of \cite{Nekrasov:2012xe}. We find \begin{gather*} W_{\underline{\lambda}, \underline{x}, \mu, g_\infty}(x,y)=y^r+\sum_{i=1}^{r-1}(-\zeta(x))^{ i} \prod_{j=1}^{i-1} \mathcal{P}_{j}^{ i-j}(x) \mathcal{X}_{i}(x) y^{r-i}+(-\zeta(x))^{ r}\prod_{j=1}^{r-1} \mathcal{P}_{j}^{ r-j}(x) , \end{gather*} where we defined \begin{gather*} \zeta(x)=\mathcal{P}_{0}(x) \qquad \mathcal{X}_{i}(x)=\mathcal{X}^{[\lambda,\mu]}_{i}(x) , \end{gather*} with $i=1,\ldots,r-1$. We note that here the so-called matter polynomials $\mathcal{P}$ only depend on the partition $\lambda$ and not on $\mu$. For singularities associated to the partition $\nu=(5,5,2,2,1,1,1)$ as plotted in Fig.~\ref{fig:newton} the quiver diagram is depicted in Fig.~\ref{fig:quiver}. Further examples are discussed in Appendix~\ref{app:examples}. \section{Higher degree symplectic leaves}\label{sec:higherdegree} \begin{figure} \centering \begin{tikzpicture} \foreach \a in {0,1,2} { \begin{scope}[shift={(0.5*\a,0)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {0,1,2} { \begin{scope}[shift={(0.5*\a,0.5)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {3,4} { \begin{scope}[shift={(0.5*\a+0.1,0)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {3} { \begin{scope}[shift={(0.5*\a+0.1,0.5)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \foreach \a in {5,6,7} { \begin{scope}[shift={(0.5*\a+0.2,0)}] \draw (0,0) rectangle (0.5,0.5); \end{scope} } \node [below ] at (0.25,0) {$x_1$}; \node [below ] at (0.75,0) {$x_2$}; \node [below ] at (1.25,0) {$x_3$}; \node [below ] at (1.85,0) {$x_4$}; \node [below ] at (2.35,0) {$x_5$}; \node [below ] at (2.95,0) {$x_6$}; \node [below ] at (3.45,0) {$x_7$}; \node [below ] at (3.95,0) {$x_8$}; \end{tikzpicture} \caption{Decomposition of the partition $\lambda=(8,4)$ for $r=3$ into three partitions $(3,3)$, $(2,1)$ and $(3)$.}\label{fig:ytdecc} \end{figure} In this section we discuss some symplectic leaves of higher degree $n$ in the spectral parameter $x$ corresponding to the partitions of the total size $ n r $ for $G = {\rm GL}_{r}$. In the case $r=2$ we can factorize the higher degree Lax matrices for partitions $\lambda=(2n)$ as a product of degree~1 Lax matrices labelled by partitions $\lambda=(2)${\samepage \begin{gather*} L_{(2n),\underline{x},\varnothing}(x;\underline{p},\underline{q})=L_{(2),(x_1,x_2),\varnothing}(x;p_1,q_1)\cdots L_{(2),(x_{2n-1},x_{2n}),\varnothing}(x;p_n,q_n), \end{gather*} cf.~\cite{ShapiroThesis}.} For $r=3$ the partitions are of the form $\lambda=(\lambda_1,3n-\lambda_1)$ with $\lambda_1\leq 3n$. The Lax matrices for these partitions can be factorized as a product of Lax matrices of partitions $\lambda=(3)$, $\lambda=(2,1)$ and their conjugates. The conjugates correspond to the partitions $\lambda=(3,3)$ and $\lambda=(2,1)$ respectively and are obtained via \begin{gather*} \bar{L} _{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q})=\det L_{\lambda,\underline{x},\mu}(x;\underline{p},\underline{q}) L^{-1} _{\lambda,\underline{x},\mu}(-x;\underline{p},\underline{q}) . \end{gather*} This can be seen as follows. If $3n-\lambda_1=0$ we can build the partition from copies of $\lambda=(3)$ as for the case $r=2$. Any such partition is extended to the case where $3n-\lambda_1=1$ by adding a~partition $\lambda=(2,1)$ which again extends to $3n-\lambda_1=2$ by adding another partition $\lambda=(2,1)$. Now we note that any $\lambda=(\lambda_1,3n-\lambda_1)$ can be reduced to the cases discussed when stripping off multiples of the partition $\lambda=(3,3)$. An example is shown in Fig.~\ref{fig:ytdecc}. A similar factorization of higher degree leaves to the product of the degree 1 leaves applies to the case of ${\rm GL}_4$. However in the case of ${\rm GL}_5$ and higher rank such factorization fails, first time for the $n=2$ and the partition $\lambda=(4,3,3)$, i.e., $\lambda^{t} = (3,3,3,1)$, of the total size $|\lambda|=10$. The Lax matrix associated to this partition is not factorized into a product of degree 1 Lax matrices. However, we can compute $L_{\lambda =(4,3,3)}$ using the fusion method. \subsection{Fusion of degree 2}\label{app:quadnotfull} In this subsection we present the degree 2 fusion of two elementary Lax matrices \begin{gather*} {L}(x)=\left(\begin{BMAT}[5pt]{c:c:c}{c:c:c} x-x_1+P_{12}Q_{21}&-P_{12}&P_{12}P_{23}\\ -Q_{21}&I&-P_{23}\\ -Q_{32}Q_{21}&Q_{32}&x-x_1-Q_{32}P_{23}\\ \end{BMAT} \right) \end{gather*} and \begin{gather*} {L}'(x)=\left(\begin{BMAT}[5pt]{c:c:c}{c:c:c} x-x_2+P'_{13}Q'_{31}&P'_{13}Q'_{32}&-P'_{13}\\ P'_{23}Q'_{31}&x-x_2+P'_{23}Q'_{32}&-P'_{23}\\ -Q'_{31}&-Q'_{32}&I\\ \end{BMAT} \right) \end{gather*} as introduced in \eqref{eq:elax}. Here the blocks on the diagonal are of the size $k_1\times k_1$, $k_2\times k_2$ and $k_3\times k_3$ and the Lax matrices contain $k_2(k_1+k_3)$ and $k_3(k_1+k_2)$ pairs of conjugate variables respectively. We find that their product can be decomposed as \begin{gather*} L(x)L'(x)=\tilde Q\tilde L(x), \end{gather*} where \begin{gather*} \tilde Q=\left(\begin{BMAT}[5pt]{c:c:c}{c:c:c} I&0&0\\ 0&I&0\\ 0&\tilde Q_{32}&I\\ \end{BMAT} \right) \end{gather*} and \begin{gather}\label{eq:laxquad} \tilde L(x)=\left(\begin{BMAT}[5pt]{c:c}{c:c} (x-x_1)(x-x_2)I+\tilde P_{1\tilde 2}(xI-\tildeJ)\tilde Q_{\tilde 2 1} &-\tilde P_{1\tilde 2}(xI-\tildeJ)\\ -(xI-\tilde J) \tilde Q_{\tilde 2 1}&xI-\tilde J\\ \end{BMAT} \right) . \end{gather} Here we defined \begin{gather*} \tilde P_{1\tilde 2}=\left(\begin{BMAT}[5pt]{c:c}{c} \tilde P_{12}& \tilde P'_{13}\\ \end{BMAT} \right) ,\qquad \tilde Q_{\tilde 21}=\left(\begin{BMAT}[5pt]{c}{c:c} \tilde Q_{21}\\ \tilde Q'_{31}\\ \end{BMAT} \right) , \end{gather*} and \begin{gather*} \tilde J=\left(\begin{BMAT}[5pt]{c:c}{c:c} I&0\\ \tilde Q'_{32}&I\\ \end{BMAT} \right)\left(\begin{BMAT}[5pt]{c:c}{c:c} x_2I&\tilde P'_{23}\\ 0&x_1I\\ \end{BMAT} \right)\left(\begin{BMAT}[5pt]{c:c}{c:c} I&0\\ -\tilde Q'_{32}&I\\ \end{BMAT} \right) . \end{gather*} As in the linear fusion we introduced the new canonical variables \begin{alignat*}{3} &\tilde P_{12}=P_{12}-P'_{13}Q'_{32}, \qquad && \tilde Q_{21}=Q_{21},&\\ &\tilde P'_{13}=P'_{13}, \qquad && \tilde Q'_{31}=Q'_{31}+Q'_{32}Q_{21},&\\ &\tilde P'_{23}=P'_{23}+P_{23}-Q_{21}P'_{13},\qquad && \tilde Q'_{32}=Q'_{32}, &\\ & \tilde P_{23} =P_{23}, \qquad && \tilde Q_{32}=Q_{32}-Q'_{32} .& \end{alignat*} The final Lax matrix has $k_1k_2+k_1k_3+k_2k_3$ pairs $(\underline{p},\underline{q})$ and $\det \tilde L(x)=(x-x_1)^{k_1+k_3}(x-x_2)^{k_1+k_2}$. It corresponds to the partition $\tilde\lambda^t=(k_1+k_3,k_1+k_2)$. This can be seen when setting all~$p$ and~$q$ equal to zero in~\eqref{eq:laxquad}. One obtains \begin{gather*} \left. \tilde L(x)\right|_{\underline{p},\underline{q}=\varnothing}=\diag \left(\begin{BMAT}[5pt]{c:c:c}{c} (x-x_1)(x-x_2)I_{k_1\times k_1} & (x-x_2)I_{k_2\times k_2} & (x-x_1)I_{k_3\times k_3} \end{BMAT} \right) . \end{gather*} \subsection{Full fusion of degree 2}\label{app:fullquad} We can further multiply the resulting Lax matrix in \eqref{eq:laxquad} \begin{gather*} L'(x)=\left(\begin{BMAT}[5pt]{c:c}{c:c} (x-x_1)(x-x_2)I+ P'_{1 \tilde 2}(xI-J')Q'_{\tilde 2 1} &- P'_{1\tilde 2}(xI-J')\\ -(xI-J') Q'_{\tilde 2 1}&xI- J'\\ \end{BMAT} \right) , \end{gather*} with \begin{gather*} L(x)=\left(\begin{BMAT}[5pt]{c:c}{c:c} I&-P_{1\tilde 2}\\ Q_{\tilde 2 1}&xI-J-Q_{\tilde 2 1}P_{1\tilde 2}\\ \end{BMAT} \right) , \end{gather*} which corresponds to a general regular partition \eqref{eq:laxregular} with arbitrary $\lambda$ and $\mu=1^{[k_1]}$. The blocks on the diagonal are of the size $k_1\times k_1$ and $\tilde k_2=k_2+k_3$ as defined in Section~\ref{app:quadnotfull}. One finds \begin{gather*} L(x)L'(x)=\tilde Q\tilde L(x) \end{gather*} with \begin{gather*} \tilde Q=\left(\begin{BMAT}[5pt]{c:c}{c:c} I&0\\ \tilde Q_{\tilde 21}&I\\ \end{BMAT} \right) , \end{gather*} and \begin{gather*} \tilde L(x)=\left(\begin{BMAT}[5pt]{c:c}{c:c} I&0\\ \tilde Q'_{\tilde 21}&I\\ \end{BMAT} \right)\left(\begin{BMAT}[5pt]{c:c}{c:c} (x-x_1)(x-x_2)I&-\tilde P'_{1\tilde 2}(xI-J')\\ 0&(xI-J)(xI-J')\\ \end{BMAT} \right)\left(\begin{BMAT}[5pt]{c:c}{c:c} I&0\\ -\tilde Q'_{\tilde 21}&I\\ \end{BMAT} \right) . \end{gather*} Here we defined \begin{gather*} \tilde P'_{1\tilde 2} =P'_{1\tilde 2}+P_{1\tilde 2}, \qquad \tilde P_{1\tilde 2}=P_{1\tilde 2}, \qquad \tilde Q'_{\tilde 21}=Q'_{\tilde 21} ,\qquad \tilde Q_{\tilde 21}=Q_{\tilde 21}-Q'_{\tilde 21} . \end{gather*} The final Lax matrix contains $k_1\tilde k_2+k_J+k_{J'}$ pairs of conjugate variables where $k_J$ and $k_{J'}$ denote the number of pairs in $J$ and $J'$ respectively. It corresponds to the partition $\tilde \lambda^t=(k_1+k_3,k_1+k_2,\lambda^t)$. Setting $p,q=0$ yields the block matrix \begin{gather}\label{eq:qbas} \left. \tilde L(x)\right|_{\underline{p},\underline{q}=\varnothing}(x)=\diag \left(\begin{BMAT}[5pt]{c:c}{c} (x-x_1)(x-x_2)I_{k_1\times k_1} & (x-X_{\lambda})(x-X'_{\lambda'})I_{\tilde k_2\times\tilde k_2} \end{BMAT} \right) . \end{gather} Here $X_\lambda$ is defined in \eqref{eq:Xlambda} corresponding to an arbitrary partition $\lambda$ and $X'_{\lambda'}$ follows from \eqref{eq:laxquad} and reads \begin{gather*} X_{\lambda'}'=\diag \left(\begin{BMAT}[5pt]{c:c}{c} ((x-x_2)I_{k_2\times k_2} & (x-x_1)I_{k_3\times k_3} \end{BMAT}\right) . \end{gather*} \subsection[Example $\tilde \lambda=(4,3,3)$]{Example $\boldsymbol{\tilde \lambda=(4,3,3)}$} The case ${\rm GL}_5$ and $\tilde \lambda=(4,3,3)$ we discussed at the beginning of this section corresponds to setting $k_1=1$ and $\tilde k_2=4$ in Section~\ref{app:fullquad} while setting $\lambda=(2,1,1)$ such that $n_J=3$. The Lax matrix $L'$ is obtained in the case $k_1=1$, $k_2=2$ and $k_3=2$ from the fusion in Section~\ref{app:quadnotfull} and thus yields $n_{J'}=4$. The half-dimension is $\frac 1 2 \dim_{\mathbb{C}}\mathcal{M}_{4,3,3} = 11$. For $p,q=0$ the diagonal of the Lax matrix follows from~\eqref{eq:qbas} when taking $X_{\lambda}=\diag(x_3,x_3,x_3,x_4)$. \section{Quantization}\label{sec:quantum} Notice that the classical Yang--Baxter equation (\ref{eq:skl}) is a limit of quantum Yang--Baxter equation \begin{gather*} R_{12}(x-y) \hat L_{13}(x) \hat L_{23}(y) = \hat L_{23}(y) \hat L_{13}(x) R_{12}( x- y) \end{gather*} in $\End(V) \otimes \End(V) \otimes A$ where $V \simeq \mathbb{C}^{r}$ is the fundamental representation of $\mathfrak{gl}_{r}$ and $A$ is the quantized algebra of functions on the classical phase space parametrized locally by $\big(p_I, q^I\big)$. Here the quantum $R$-matrix is $R \in \End(V) \otimes \End(V)$ and the quantum L-operator is $\hat L \in \End(V) \otimes A$, that is an $r \times r$ matrix valued in operators in $A$. The quantum $R$-matrix is \begin{gather*} R(x) = 1 + \frac{\epsilon \mathbb{P}}{x}, \end{gather*} where $\epsilon = - i \hbar$ is the quantization parameter and $\mathbb{P}$ is the permutation operator (\ref{eq:perm}). In terms of matrix elements $\hat L_{ij}$ we have the quantum Yang Baxter equation in $A$ \begin{gather*} \big[ \hat L_{ij}(x), \hat L_{kl}(y)\big] = -\frac{\epsilon}{x - y} \big(\hat L_{kj}(x) \hat L_{il}(y) - \hat L_{kj}(y) \hat L_{il}(x)\big) \end{gather*} and its classical limit is \begin{gather*} \{ L_{ij}(x), L_{kl}(y) \} = - \frac{1} {x - y} (L_{kj}(x) L_{il}(y) - L_{kj}(y) L_{il}(x)) \end{gather*} with the standard convention \begin{gather*} \big[\hat \phi , \hat \psi\big] = \epsilon \{ \phi, \psi \} + O\big(\epsilon^2\big) , \qquad \epsilon \to 0, \end{gather*} where $ \big[\hat \phi , \hat \psi\big] $ denotes the commutator of the elements $\hat \phi$, $\hat \psi$ of the algebra $A$ that correspond to the quantization of the functions $\phi$, $\psi$ on the classical phase space with Poisson brackets $\{\phi, \psi\}$. In particular the canonical coordinates $ p_I$, $q^I$ have Poisson bracket \begin{gather*} \big\{ p_I, q^J \big\} = \delta_{I}^J \end{gather*} and the respective operators have commutation relations \begin{gather*} \big[ \hat p_I, \hat q^J \big] = \epsilon \delta_{I}^J \end{gather*} that can be represented in the algebra of differential operators acting on Hilbert space of states represented by function of $q^I$ as \begin{gather*} \hat q^I \mapsto q^I, \qquad \hat p_I \mapsto \epsilon \frac{ \partial}{ \partial q^I}. \end{gather*} For a polynomial function $f(\underline{\hat q},\underline{ \hat p})$ the normal ordering notation ${:}f(\underline{\hat q},\underline{\hat p}){:}$ means placing all operators $\hat p_I$ to the right of the operators $\hat q^I$ in each monomial. The quantum version $\hat L _{\underline{\lambda}, \underline{x}, \mu}(x)$ of all our classical solutions $L_{\underline{\lambda}, \underline{x}, \mu}(x)$ is obtained by replacing all variables $(\underline{p},\underline{q})$ by the operators $\underline{\hat p}$, $\underline{ \hat q}$ and assuming normal ordering convention. One can check that such operator valued matrix $\hat L_{\underline{\lambda}, \underline{x}, \mu}(x)$ satisfies quantum Yang--Baxter equation. The commuting Hamiltonians are obtained from the expansion of the quantum spectral determinant (quantum spectral curve) as in~\cite{Chervov:2006xk} \begin{gather} \hat W_{x,y} =\tr A_r \big(y-{\rm e}^{\epsilon \partial_x}\hat L_1'(x)\big)\big(y-{\rm e}^{\epsilon \partial_x}\hat L_2'(x)\big)\cdots\big(y- {\rm e}^{\epsilon \partial_x}\hat L_r'(x)\big)\nonumber\\ \hphantom{\hat W_{x,y}}{} =\sum_{k=0}^r (-1)^k y^{r-k} \hat \chi_k(x+\epsilon){\rm e}^{\epsilon k \partial_x},\label{eq:qspecdet} \end{gather} where $\hat L'(x)=\rho_1(g_\infty)\hat L _{\underline{\lambda}, \underline{x}, \mu}(x)$, cf.~\eqref{eq:twistedL}, and $A_k$ is the normalised antisymmetrizer acting on the $k$-fold tensor product of $\mathbb{C}^r$. The quantum characters whose coefficients generate the algebra of quantum commuting Hamiltonians (Bethe subalgebra) are \begin{gather}\label{eq:qchar} \hat \chi_k(x)=\tr A_k \hat L_{1}'(x)\cdots \hat L_{k}'(x+\epsilon (k-1)) \end{gather} see also \cite{molevbook}. The definition of the quantum spectral determinant~(\ref{eq:qspecdet}) is a quantum version of the classical spectral curve (\ref{eq:specdet}), and there is a quantum version of the factorization (\ref{eq:chik}) \begin{gather* \hat \chi_k (x) = \hat {\mathcal{Q}}^{[\lambda]}_{r-k}(x+\epsilon|\mu|) \hat {\mathcal{X}}^{[\lambda, \mu]}_{k}(x), \end{gather*} where the $c$-valued polynomials are \begin{gather* \hat{ \mathcal{Q}}_i^{[\lambda]}(x)=\prod_{j=1}^{\lambda_{i+1}}\prod_{k=1}^{\lambda_{j}^t-i}\left(x-x_j+\epsilon\left(\sum_{l=1}^{j-1}\lambda_l^t+k-1\right)\right) . \end{gather*} The quantization of the corresponding integrable systems in the context of the $\mathcal{N}=2$ supersymmetric quiver gauge theories has been considered in~\cite{Nekrasov:2013xda}, in particular the $q$-character functions appearing in \cite{Nekrasov:2013xda} after \cite{Frenkel:1998} stand for the eigenvalue of the quantum commuting Hamiltonians~(\ref{eq:qchar}). The quantized symplectic leaves $\hat{\mathcal{M}}_{\underline{\lambda}, \underline{x}, \mu}$ are modules, typically infinite-dimensional, for the dual Yangian algebra $\mathbf{Y}(\mathfrak{gl}_{r})^{*}$ which is a quantum deformation algebra of the space of functions on the Poisson--Lie group ${\rm GL}_{r}(\mathcal{K}_{\mathbb{P}^{1}_x})$. This representation theory relates to the `pre-fundamental' modules of Hernandez--Jimbo~\cite{hernandez2012asymptotic} associated to the individual singularities at points $x_i$ labeled by a fundamental co-weight $\check \omega_{\lambda_{i}^{t}}$.
1,108,101,565,898
arxiv
\section{Introduction} Since kinetic equations are not first-principle physical equations, rather they often arise from mean field approximations of particle systems, hence there are inevitably modeling errors due to incomplete knowledge of the interaction mechanism, imprecise measurement of the initial and boundary data, forcing terms, geometry, etc. These errors can contribute uncertainties to the problems. Despite of intensive research at both theoretical and numerical levels, most researches are concerned with deterministic models and ignored uncertainties. Nevertheless, uncertainty quantification for kinetic equations, due to its importance in making reliable predications, calibrations and improvements of the kinetic models, deserves major attention from the research community. To understand the propagation of the uncertainties and how they impact long-time behavior of the solution, sensitivity and regularity analyses are crucial, since they allow us to explore how sensitive the solution depends on random input parameters and to determine the convergence rate of the numerical methods in the parameter space. In recent years one begins to see some activities in such studies, see for examples \cite{DesPer, JinLiuMa, Liu, QinWang, JinZhu, ShuJin, LiuJinUQ}. At the numerical level, one of the popular UQ methods is the generalized polynomial chaos method in the stochastic Galerkin (referred as gPC-SG) framework \cite{GS, LMK, XiuBook}. Compared with the classical Monte-Carlo method, the gPC-SG approach enjoys a spectral accuracy in the random space--provided the solution is sufficiently regular in the space--while the Monte-Carlo method converges with only half-th order accuracy. {\color{blue} As far as the non-intrusive stochastic collocation (SC) method is concerned, first the regularity analysis performed in this article is also useful for the accuracy analysis of SC methods. Second, there have been comparisons in terms of computational efficiencies between SG and SC for high dimensional problems; and there have been supporting cases the the SG methods are more efficient (see for example \cite{Xiu-Shen}). For the problem under study, it remains an interesting question to make such a comparison for high dimensional problems, but this is out of the scope of this article and could be an interesting future project. } Recent studies of gPC-SG methods for kinetic equations and their behavior in various asymptotic regimes are summarized in the review article \cite{HuReview}. Kinetic equations play an important role in semiconductor device modeling \cite{MarkowichBook}. In such problems, the equations often have a diffusive scaling, characterized by the dimensionless Knudsen number $\varepsilon$, that leads asymptotically to the drift-diffusion equations as $\varepsilon$ goes to zero. For multiscale problems in which $\varepsilon$ can vary in several orders of magnitude, the asymptotic-preserving (AP) schemes have proven to be effective and efficient to deal with different scales in a seamless way. An AP scheme switches between a micro solver and a macro one automatically, depending on the ratio of numerical parameters (mesh size, time step, etc.) over $\varepsilon$ \cite{Jin-AP-99, Jin-AP-Review, HJL-Review}. Just considering the transport of electrons in the conduction band, \cite{JinLorenzo} first introduced an AP scheme for the semiconductor Boltzmann equation with an anisotropic collision operator, which is able to capture the correct diffusive behavior for the underresolved numerical approximation. The scheme was further improved in \cite{Dengjia} with better stability condition. A higher-order scheme was constructed in \cite{Dimarco}, which improved the strict parabolic stability condition to a hyperbolic one. An efficient AP scheme in the high field regime was developed in \cite{JinWang}. The authors in \cite{HuWang} further study the semiconductor Boltzmann equation with a two-scale stiff collision operators, by taking into account different effects including the interactions between electrons and the lattice defects caused by ionized impurities \cite{Degond_ET}; they design and demonstrate the efficiency and accuracy of an asymptotic-preserving scheme that leads to an energy-transport system as mean free path goes to zero at a discretized level. For kinetic equations that contain random uncertainty, \cite{XZJ} first introduced the notion of stochastic AP (s-AP), which was followed recently by many works successfully handling the multiple scales for the kinetic equations with uncertainties \cite{Hu, JinLiu, MuLiu, JinLu}. {\color{blue}s-AP scheme is introduced in the SG setting. It extends the idea from the deterministic AP methods to the stochastic case, which requires that as $\varepsilon\to 0$, the SG for the microscopic model with uncertainties automatically becomes a SG approximation for the limiting macroscopic stochastic equation. } In this paper, we study the bipolar semiconductor system with random uncertainties, by taking into consideration the generation-recombination process between electrons and holes \cite{Ansgar}. The bipolar semiconductor Boltzmann-Poisson equations will be studied, and we design and implement the gPC-SG scheme, with a formal proof of the s-AP property. In order to analyze the convergence rate of the scheme, we use the hypocoercivity theory, which was well established in deterministic kinetic theory \cite{VillaniBook, DMS, CN, MB} and recently extended to study uncertain kinetic equations in the linear case \cite{QinWang} and nonlinear ones \cite{JinZhu, ShuJin, LiuJinUQ}. By ignoring the self-consistent electric potential and using the hypocoercivity analysis done in \cite{MB, LiuJinUQ}, we obtain an exponential decay in time of the random solutions to the (deterministic) global equilibrium, and uniform spectral convergence with an exponential decay in time of the numerical error of the gPC-SG method for the underlying system with uncertainties, under suitable assumptions on the gPC polynomials and the random inputs. To our knowledge, this is the first study of AP and s-AP schemes for bipolar semiconductor Boltzmann system, in both deterministic and uncertain cases. This paper is organized as the following. Section \ref{sec:2} gives an introduction of the bipolar Boltzmann-Poisson model, followed by a derivation of the limiting drift-diffusion equations. Section \ref{sec:3} discusses the AP scheme for the deterministic problem. A s-AP scheme in the gPC-SG framework for the bipolar model with random inputs will be studied and verified in section \ref{sec:4}. A convergence rate analysis for both the analytical solution and the gPC solution for a simpler model (without electric field) will also be conducted in section \ref{sec:4}. In section \ref{sec:6}, we present several numerical examples for both the deterministic problem and the model with uncertainties, to illustrate the efficiency, accuracy and s-AP properties of the proposed scheme. Finally, the paper is concluded in section \ref{sec:7}. \section{The bipolar semiconductor Boltzmann-Poisson system} \label{sec:2} In semiconductor devices, electrical currents originate from the transport of electrons and holes. $f_n(x,v,t)$, $f_p(x,v,t)$ represent the existence probability of an electron and a hole, respectively, at position $x\in\mathbb R^d$, with the velocity $v\in\mathbb R^d$, where $d$ is the dimension, at time $t\geq 0$. The Boltzmann equations that give the evolution of the distribution functions for them are written by (\cite{Ansgar, Poupaud}) \begin{eqnarray} &\label{model_a}\displaystyle\epsilon \partial_t f_n + (v\cdot\nabla_x f_n -E\cdot \nabla_v f_n) =\frac{1}{\epsilon}Q_n(f_n)+\epsilon I_n(f_n, f_p), \\[6pt] &\label{model_b}\displaystyle\epsilon \partial_t f_p +(\beta v\cdot\nabla_x f_p + E\cdot \nabla_v f_p)=\frac{1}{\epsilon}Q_p(f_p)+\epsilon I_p(f_n, f_p), \\[6pt] &\label{model_c} \displaystyle\gamma\, \Delta_x \Phi = n-p-C(x), \qquad E=-\nabla_x \Phi. \end{eqnarray} where $\beta=m_{e}^{\ast}/m_{h}^{\ast}$ is the ratio of the effective masses of electrons and holes, which we consider it a constant. $\Phi=\Phi(t,x)$ represents the electric potential, $E=E(t,x)$ is the self-consistent electric field given by the Poisson equation (\ref{model_c}). $\gamma$ is some scaled Debye length, $C(x)$ is the doping profile. The densities of the electron and the hole is given by $$n=\int_{\mathbb R^d} f_n\, dv, \qquad p=\int_{\mathbb R^d} f_p\, dv. $$ Under the low density approximation, the linear collision operators are given by \begin{equation} Q_i (f_i)=\int_{\mathbb R^d} \sigma_i(x, v,w)(M_i(v)f_i(w)-M_i(w)f_i(v))dw, \qquad i=n, \, p\,, \label{Qi}\end{equation} with \begin{equation} M_n(v)=\frac{1}{(2\pi)^{d/2}}e^{-|v|^2/2}\,, \qquad M_p(v)=\frac{1}{(2\pi/\beta)^{d/2}}e^{-\beta|v|^2/2}\,. \label{Max}\end{equation} being the normalized Maxwellian distribution of the electrons and holes. The anisotropic scattering kernel $\sigma_i$ for electrons and holes respectively are rotationally invariant and satisfies \begin{equation}\label{sigma}\sigma_i(x,v,w)=\sigma_i(x,w,v)>0, \qquad i =n, \, p\,. \end{equation} The process of {\it generation} of an electron-hole pair is that an electron moves from the valence band to the conduction band, leaving a hole behind it in the valence band. The inverse process of an electron moving from the conduction to the valence band is termed the {\it recombination} of an electron-hole pair. See the following figure for an explanation \cite{Ansgar}: \begin{figure}[H] \includegraphics[width=0.55\textwidth]{fig1.jpg} \includegraphics[width=0.53\textwidth]{fig2.jpg} \caption{A recombination-generation process} \end{figure} The recombination-generation operators are given by \cite{Ansgar, Poupaud} \begin{eqnarray} &\label{RG0}\displaystyle I_n(f_n, f_p)=\int_{\mathbb R^d} \sigma_I(x,v,w)\left[M_n(v)-M_p(w)f_n(v)f_p(w)\right]dw, \\[4pt] &\label{RG}\displaystyle I_p(f_n, f_p)=\int_{\mathbb R^d} \sigma_I(x,w,v)\left[M_n(w)-M_p(v)f_n(w)f_p(v)\right]dw, \end{eqnarray} where $\sigma_I$ is the generation-recombination kernel and is also rotationally invariant, as given in (\ref{sigma}). The collision frequency for electrons and holes is given by \begin{equation}\label{freq1} \lambda_i(x,v)=\int_{\mathbb R^d}\sigma_i(x,v,w)M_i(w)dw, \qquad i=n, \, p\,. \end{equation} The author in \cite{Poupaud} has proved the existence and uniqueness of smooth solutions of the system (\ref{model_a})--(\ref{model_c}). {\color{red} {\bf Remark.} We give some explanations for the derivation of $I_n$, $I_p$ that model the generation and recombination processes: \begin{equation} \label{R:I_n} I_n(f_n, f_p)=\int_{\mathbb R^d} \sigma_I(x,v,w)\left[M_n(v)(1-f_n(v))(1-f_p(w)) - M_p(w) f_n(v) f_p(w)\right]dw. \end{equation} The first term in the integral $I_n$ in (\ref{R:I_n}) represents the probability of creation of an electron at the coordinates $(x,v)$ and a hole at $(x,w)$; the second term in the integral represents the probability of recombination of an electron-hole pair. Due to the hypothesis of low density, i.e., $f_n, \, f_p \ll 1$, the terms $(1-f_n(v))$, $(1-f_p(w))$ tend to be $1$, then one gets $I_n$ defined in (\ref{RG0}). Similarly for $I_p$ given in (\ref{RG}). The recombination-generation effects are not negligible and crucial in many physics applications such as bipolar transistors, solar cells, LEDs and semiconductor lasers. Take solar cells as an example. Their mechanism is composed of several steps, that is, electron-hole pair generation by absorption of light in semiconductors, separation of electron-hole pairs by built-in potential, electron-hole recombination, etc. Understanding the recombination-generation processes is important and could help us improve the efficiency of solar cells \cite{solar}. } \subsection{Bipolar drift-diffusion equations} The relaxation time for collision and generation-recombination process has the relation: $\tau_{\text{col}}\gg \tau_{\text{gen}}$. Indeed, the typical time scale of collisions is $10^{-12}$s whereas the typical time for recombination-generation effects is $10^{-9}$s. In this section, the drift-diffusion equations are derived under the assumption that collisions occur on a much faster time scale than recombination-generation processes. First, let us recall the following properties for the collision operators $Q_i$, for $i=n, p$\,, as discussed in \cite{Ansgar, Poupaud}. \\ \noindent (i) The kernel of $Q_i$ is spanned by $M_i$. \noindent(ii) $Q_i(f)=g$ has a solution if and only if $\int_{\mathbb R^d} g\, dv=0$. \noindent(iii) The equations $$Q_n(h_n)=vM_n(v), \qquad Q_p(h_p)=\beta vM_p(v)$$ have solutions $h_n$, $h_p$ with the property that there exist $\mu_{0,n}$, $\mu_{0,p}\geq 0$ satisfying \begin{equation}\label{mu}\int_{\mathbb R^d} v\otimes h_n\, dv =-\mu_{0,n}I, \qquad \int_{\mathbb R^d} \beta v\otimes h_p\, dv = -\mu_{0,p}I, \end{equation} where $I \in\mathbb R^{d\times d}$ is the identity matrix. Let $(f_n^{\epsilon}, f_p^{\epsilon}, E^{\epsilon})$ be a solution of (\ref{model_a})--(\ref{model_c}). As $\epsilon\to 0$ in (\ref{model_a}) and (\ref{model_b}), then $$Q_n(f_n)=0, \qquad Q_p(f_p)=0, $$ where $\displaystyle f_i = \lim_{\epsilon\to 0} f_i^{\epsilon}$. Thus $f_n=nM_n$ and $f_p=pM_p$ by property (i). Inserting the Chapman-Enskog expansions \begin{equation}\label{Enskog} f_n^{\epsilon}=nM_n+\epsilon g_n^{\epsilon}, \qquad f_p^{\epsilon}=pM_p+\epsilon g_p^{\epsilon} \end{equation} into the Boltzmann equations (\ref{model_a}), one has \begin{eqnarray*} &\displaystyle \epsilon (n {\color{red}M_{n}}+\epsilon g_n^{\epsilon}) + (v\cdot\nabla_x(n{\color{red} M_{n}})-E\cdot\nabla_v(n {\color{red}M_{n}})) \\[6pt] &\displaystyle \qquad\qquad\qquad\qquad\qquad\qquad\qquad + \epsilon (v\cdot\nabla_x g_n^{\epsilon}-E\cdot\nabla_v g_n^{\epsilon}) = Q_n(g_n^{\epsilon}) + \epsilon I_n(f_n^{\epsilon}, f_p^{\epsilon}). \end{eqnarray*} The limit $\epsilon\to 0$ yields \begin{equation}\displaystyle Q_n(g_n)=(\nabla_x n+nE)\cdot v {\color{red} M_{n}}. \label{Qn_limit} \end{equation} Similarly, inserting the expansion (\ref{Enskog}) into (\ref{model_b}), one gets \begin{equation}\displaystyle Q_p(g_p)=\beta (\nabla_x p-pE)\cdot v {\color{red}M_{p}}. \label{Qp_limit}\end{equation} where $\displaystyle g_i=\lim_{\epsilon\to 0}g_i^{\epsilon}$, $i=n,p$. By property (iii), solutions of (\ref{Qn_limit}) and (\ref{Qp_limit}) are $$g_n=\frac{J_n}{\mu_{0,n}}\cdot h_n +c_n M_n, \qquad g_p=-\frac{J_p}{\mu_{0,p}}\cdot h_p +c_p M_p, $$ for some constants $c_n$, $c_p$, with $J_n$, $J_p$ defined by $$ J_n=\mu_{0,n}(\nabla_x n+nE), \qquad J_p=-\mu_{0,p}(\nabla_x p-pE). $$ Thus \begin{equation}\langle v g_n\rangle =-J_n, \qquad \beta\langle v g_p \rangle =J_p, \label{vg}\end{equation} where $\displaystyle\langle\, \cdot\, \rangle = \int_{\mathbb R^d} dv. $ Insert the Chapman-Enskog expansions (\ref{Enskog}) into (\ref{model_a})--(\ref{model_b}) and integrate the velocity on both sides, then \begin{equation} \begin{aligned} \partial_t \langle nM\rangle +\epsilon\partial_t \langle g_n^{\epsilon}\rangle +\nabla_x\cdot\langle vg_n^{\epsilon}\rangle &=\langle I_n(nM_n+\epsilon\, g_n^{\epsilon}, \, pM_p+\epsilon\, g_p^{\epsilon})\rangle, \\[6pt] \partial_t \langle pM\rangle +\epsilon\partial_t \langle g_p^{\epsilon}\rangle + \beta\nabla_x\cdot\langle vg_p^{\epsilon}\rangle &=\langle I_p(nM_n+\epsilon\, g_n^{\epsilon}, \, pM_p+\epsilon\, g_p^{\epsilon})\rangle. \end{aligned} \end{equation} As $\epsilon\to 0$, by (\ref{vg}), one has \begin{eqnarray} &\label{limit_a}\displaystyle \partial_t n -\nabla_x\cdot J_n =\langle I_n(nM_n, \, pM_p) \rangle, \\[6pt] &\label{limit_b}\displaystyle \partial_t p +\nabla_x \cdot J_p=\langle I_p(nM_n,\, pM_p)\rangle. \end{eqnarray} Denote $R(n,p)=\langle I_n(nM_n, pM_p) \rangle$, then \begin{align} &\displaystyle R(n,p)= \int_{\mathbb R^d}\int_{\mathbb R^d}\sigma_I(x,v,w)M_n(v)dwdv - np \int_{\mathbb R^d}\int_{\mathbb R^d} \sigma_I(x,v,w)M_p^2(w)M_n(v)dwdv\notag\\[4pt] &\label{R_def}\displaystyle\qquad\quad := A(x) - np B(x), \end{align} where we define $$A(x)=\int_{\mathbb R^d}\int_{\mathbb R^d}\sigma_I(x,v,w)M_n(v)dwdv, \qquad B(x)=\int_{\mathbb R^d}\int_{\mathbb R^d} \sigma_I(x,v,w)M_p^2(w)M_n(v)dwdv. $$ Also note that $\langle I_n(nM_n, pM_p) \rangle=\langle I_p(nM_n, pM_p) \rangle$. The bipolar drift-diffusion Poisson system is given below: \\[2pt] {\bf{\large Bipolar drift-diffusion equations}} \begin{equation} \begin{aligned} \partial_t n -\nabla_x\cdot J_n &= R(n,p), \qquad\qquad J_n=\mu_{0,n}\, (\nabla_x n+nE), \\[6pt] \partial_t p -\nabla_x \cdot J_p &= R(n,p), \qquad\qquad J_p=\mu_{0,p}\, (\nabla_x p-pE), \\[6pt] -\gamma\, \nabla_x E &= n-p-C(x), \qquad x\in \mathbb R^d, \end{aligned} \label{diff1} \end{equation} with $R(n,p)$ defined in (\ref{R_def}). \\[2pt] {\color{red} {\bf Remark.} We list below some major differences and numerical difficulties compared with the single-species semiconductor Boltzmann equation studied in \cite{JinLorenzo}. We first recall the model equation (2.1) in \cite{JinLorenzo}, \begin{equation}\label{model-ref} \varepsilon \partial_t f + {\bf v}\cdot \nabla_{{\bf x}}f - \frac{q}{m}{\bf E}\cdot\nabla_{{\bf v}}f = \frac{1}{\varepsilon}{\bf Q}(f) + \varepsilon G. \end{equation} There $G=G(t, {\bf x}, {\bf v})$ is a source term that models the generation-recombination process. One can see that $G$ is not a function of $f$, thus the model studied in \cite{JinLorenzo} is linear, and only constant functions $G$ are considered in their numerical tests. In our model systems under study, $I_n(f_n, f_p)$ and $I_p(f_n, f_p)$ on the right-hand-side of (\ref{model_a})--(\ref{model_b}) model the generation and recombination of an electron-hole pair. Defined in (\ref{RG}), $I_n$, $I_p$ are non-linear integral operators in $f_n$, $f_p$ and are much more complicated than $G=G(t, {\bf x}, {\bf v})$ considered in \cite{JinLorenzo}! In fact, equations (\ref{model_a})--(\ref{model_b}) that describe the evolution of the distribution functions for electrons and holes are {\it coupled} through these non-linear integral operators, which is accounted for the {\it major difference} compared to the single-species model. As $\varepsilon\to 0$, the limiting system--bipolar drift-diffusion equations given in (\ref{diff1}) are also different from the drift-diffusion equation for the single-species, with the non-linear term $R(n,p)$ on the right-hand-side. Even for the deterministic bipolar model, it is not a trivial extension of the numerical method developed in \cite{JinLorenzo}. We would like to emphasize that this project is the {\it first} study of AP and s-AP schemes for bipolar semiconductor Boltzmann--Poisson system, in both deterministic and uncertain settings. } \section{Parity equations and diffusive relaxation system} \label{sec:3} \subsection{Even- and Odd- Parity Equations} Consider the one-dimensional velocity space $v\in\mathbb R$. Denote $f_1=f_n$, $f_2=f_p$, $\rho_1=n$, $\rho_2=p$ and rewrite the system (\ref{model_a}) as \begin{align} \label{model_1}\partial_t f_1 + \frac{1}{\epsilon}(v\cdot\nabla_x f_1-E\cdot\nabla_v f_1) &=\frac{1}{\epsilon^2}Q_1(f_1) + I_1(f_1,f_2), \\[6pt] \label{model_2}\partial_t f_2 + \frac{1}{\epsilon}(\beta v\cdot\nabla_x f_2+E\cdot\nabla_v f_2)&=\frac{1}{\epsilon^2}Q_2(f_2) + I_2(f_1,f_2), \\[6pt] \label{model_3}\gamma\, \nabla_x E & =\rho_1-\rho_2-C(x). \end{align} We will use the even- and odd- parities formulation, which is an effective vehicle to derive asymptotic-preserving scheme for linear transport equation \cite{JPT2} and one-component semiconductor Boltzmann equation \cite{JinLorenzo}. First, introduce the even parities $r_i$ and the odd parities $j_i$, for $i=1,2$, \begin{equation} \begin{aligned} r_i(t,x,v) & =\frac{1}{2}\left[f_i(t,x,v)+f_i(t,x,-v)\right], \\[6pt] j_i(t,x,v) &=\frac{1}{2\epsilon}\left[f_i(t,x,v)-f_i(t,x,-v)\right]. \end{aligned} \label{RJ} \end{equation} Split (\ref{model_1}) and (\ref{model_2}) respectively into two equations, one for $v>0$ and one for $-v$, then \begin{equation} \begin{aligned} \partial_t f_i + \frac{1}{\epsilon}(s_i v\cdot\nabla_x f_i \mp E\cdot\nabla_v f_i)& =\frac{1}{\epsilon^2}Q_i(f_i)(v) + I_i(f_1,f_2)(v), \\[6pt] \partial_t f_i - \frac{1}{\epsilon}(s_i v\cdot\nabla_x f_i \pm E\cdot\nabla_v f_i) &=\frac{1}{\epsilon^2}Q_i(f_i)(-v) + I_i(f_1,f_2)(-v). \end{aligned} \label{EO} \end{equation} where $s_1=1$, $s_2=\beta$. (A notation remark: in the first equation, $i=1$ corresponds to $- E\cdot\nabla_v f_1$ and $i=2$ corresponds to $E\cdot\nabla_v f_2$\, ; in the second equation, $i=1$ corresponds to $E\cdot\nabla_v f_1$ and $i=2$ corresponds to $-E\cdot\nabla_v f_2$\,.) Adding (and multiplying by $1/2$), subtracting (and multiplying by $1/2\epsilon$) the two equations in (\ref{EO}), for $i=1$, $2$, respectively, one gets \begin{equation} \begin{aligned} \partial_t r_1 + v\cdot\nabla_x j_1 - E\cdot\nabla_v j_1 &= \frac{1}{\epsilon^2}Q_1(r_1)+I_{\text{1,plus}}(r_1,r_2), \\[6pt] \partial_t j_1 + \frac{1}{\epsilon^2}(v\cdot\nabla_x r_1 - E\cdot\nabla_v r_1) &= -\frac{1}{\epsilon^2}\lambda_1\, j_1 + I_{\text{1,minus}}(r_2, j_1), \end{aligned} \label{EO1} \end{equation} and \begin{equation} \begin{aligned} \partial_t r_2 + v\cdot\nabla_x j_2 + E\cdot\nabla_v j_2 &= \frac{1}{\epsilon^2}Q_2(r_2)+I_{2,\text{plus}}(r_1,r_2), \\[6pt] \partial_t j_2 + \frac{1}{\epsilon^2}(\beta v\cdot\nabla_x r_2 + E\cdot\nabla_v r_2) &= -\frac{1}{\epsilon^2}\lambda_2\, j_2 + I_{\text{2,minus}}(r_1,j_2), \end{aligned} \label{EO2} \end{equation} where \begin{equation} \begin{aligned} I_{\text{1,plus}}(r_1,r_2) &=\frac{1}{2}\int_{\mathbb R}\left(\sigma_I(v,w) + \sigma_I(-v,w)\right) dw M_1(v) - \int_{\mathbb R} \sigma_I(v,w)r_2(w)M_2(w)dw\, r_1(v), \\[6pt] I_{\text{2,plus}}(r_1,r_2) &= \frac{1}{2}\int_{\mathbb R}\left(\sigma_I(v,w) + \sigma_I(-v,w)\right)M_1(w)dw -\int_{\mathbb R}\sigma_I(v,w)r_1(w)dw\, r_2(v)M_2(v), \\[6pt] I_{\text{1,minus}}(r_2, j_1) &=\frac{1}{2}\int_{\mathbb R}\left(\sigma_I(v,w) -\sigma_I(-v,w)\right) dw M_1(v) - \epsilon\int_{\mathbb R} \sigma_I(v,w)r_2(w)M_2(w)dw \, j_1(v), \\[6pt] I_{\text{2,minus}}(r_1,j_2) &=\frac{1}{2}\int_{\mathbb R}\left(\sigma_I(v,w) - \sigma_I(-v,w)\right)M_1(w)dw - \epsilon \int_{\mathbb R}\sigma_I(v,w)r_1(w)dw\, j_2(v)M_2(v), \end{aligned} \label{I_DEF} \end{equation} which is derived in Appendix (ii). The macroscopic variables $\rho_i$ and mean velocity $u_i$ can be expressed in terms of the new variables $r_i$, $j_i$ ($i=1,2$), \begin{equation} \begin{aligned} \rho_i (t,x) &=\int_{\mathbb R} f_i(t,x,v) dv= \int_{\mathbb R} r_i(t,x,v) dv, \\[4pt] u_i(t,x) &=\frac{1}{\epsilon\rho_i}\int_{\mathbb R} f_i(t,x,v)v\, dv=\frac{1}{\rho_i}\int j_i(t,x,v)v\, dv. \end{aligned} \label{mac} \end{equation} \subsection{Diffusive relaxation system} As was done in \cite{JinLiu, JinLorenzo, JPT2}, we rewrite the equations (\ref{EO1})--(\ref{EO2}) into the following diffusive relaxation system \begin{equation} \begin{aligned} \partial_t r_i + v\cdot\nabla_x j_i \mp E\cdot\nabla_v j_i &= \frac{1}{\epsilon^2}Q_i(r_i)+I_{i,\text{plus}}\,, \\[6pt] \partial_t j_i + \phi (s_i v\cdot\nabla_x r_i \mp E\cdot\nabla_v r_i) &=-\frac{1}{\epsilon^2}\left[\lambda_i j_i +(1-\epsilon^2\phi)(s_i v\cdot\nabla_x r_i \mp E\cdot\nabla_v r_i)\right]+I_{i,\text{minus}}\,, \end{aligned} \label{EO5} \end{equation} where $\phi=\phi(\epsilon)$ is a control parameter such that $0\leq\phi\leq 1/\epsilon^2$. One simple choice of $\phi$ is $$\phi(\epsilon)=\min\left\{1,\, \frac{1}{\epsilon^2}\right\}. $$ A standard time splitting on the system (\ref{EO5}) consists of a relaxation step \begin{align} &\displaystyle\label{relax_1}\partial_t r_i = \frac{1}{\epsilon^2}Q_i(r_i), \\[6pt] &\displaystyle\label{relax_2}\partial_t j_i = -\frac{1}{\epsilon^2}\left[\lambda_i\, j_i +(1-\epsilon^2\phi)(s_i v\cdot\nabla_x r_i \mp E\cdot\nabla_v r_i)\right], \end{align} and the transport step \begin{equation} \begin{aligned} \partial_t r_i + v\cdot\nabla_x j_i \mp E\cdot\nabla_v j_i &= I_{i,\text{plus}}\,, \\[6pt] \partial_t j_i + \phi\, (s_i v\cdot\nabla_x r_i \mp E\cdot\nabla_v r_i) &= I_{i,\text{minus}}\,. \end{aligned} \label{trans_1} \end{equation} \\[2pt] {\color{red} {\bf Remark.} We address the major numerical difficulties compared to the single-species problem studied in \cite{JinLorenzo}. With the non-linear integral operators $I_1$, $I_2$ in (\ref{EO}), in order to use the even-odd decomposition method, extra effort is needed to deal with the non-linear terms. The linear transport terms on the left-hand-side and the linear collision terms on the right-hand-side remain linear after adding and subtracting of the two equations in (\ref{EO}). The difficulty is to derive what the non-linear integral operators become, namely, to write the non-linear terms (after the addition and subtraction operations) as functions with respect to one of the pairs in the set $\{r_1, r_2, j_1, j_2\}$. This calculation requires repeatedly use of change of variables, rotationally invariance property and symmetry of the collision kernel $\sigma_I$, and is shown clearly in the Appendix. Moreover, these non-linear operators $I_{i,\text{plus}}$, $I_{i, \text{minus}}$ increase the computational complexity of the gPC-SG method introduced in section \ref{subsec:gPC}, where we have tensor products of matrices and vectors there. We mention another major difference in numerical scheme compared with the one species semiconductor Boltzmann equation studied in \cite{JinLorenzo}. Indeed, for each species, the procedure of rewriting the equations (\ref{EO1})--(\ref{EO2}) into the diffusive relaxation system and adopting the first-order time-splitting is similar to \cite{JinLorenzo}, except that one needs to determine whether to put the non-linear terms $I_{i, \text{plus}}$, $I_{i, \text{minus}}$ ($i=1, 2$) on the right-hand-side of equations in the relaxation step or the transport step. We design the scheme to put them in the transport step so that the AP property is guaranteed. Furthermore, sAP property of the discretized gPC-SG scheme for the underlying system in the stochastic case is proved in section \ref{sec:sAP}. } \subsection{A discretized asymptotic-preserving scheme} \label{Det_scheme} In the relaxation step (\ref{relax_1}), since the collision term is stiff, one needs to treat it implicitly. The generation-recombination term is non-stiff, so one can leave it explicitly. It is hard to invert the collision operator $Q_i$ generally (especially for the anisotropic case). In {\color{blue}\cite{JinLorenzo}}, a Wild sum based time relaxation scheme, first proposed in \cite{GPT}, was adopted to handle the stiffness in the collision term. In \cite{Dengjia}, a fully implicit scheme for one-component semiconductor Boltzmann equation in the diffusive regime in which the more convenient BGK penalization method of Filbet-Jin \cite{Filbet-Jin} was developed. Here we also use this approach. We reformulate (\ref{relax_1}) into the following form \begin{equation}\label{relax_1a}\partial_t r_i = \underbrace{\frac{1}{\epsilon^2}\left[Q_i(r_i)-P_i(r_i)\right]}_{\text{less stiff}}+ \underbrace{\frac{1}{\epsilon^2}P_i(r_i)}_{\text{stiff}}. \end{equation} The first term on the right hand side of (\ref{relax_1a}) is non-stiff, or less stiff and less dissipative compared to the second term, thus it can be discretized explicitly, which avoids inverting the operator $Q_i$. The second term on the right hand side of (\ref{relax_1a}) is stiff or dissipative, thus will be treated implicitly. The discretized scheme for the system (\ref{relax_1a}) and (\ref{relax_2}) is given by \begin{align} &\label{Dis_Relax1}\displaystyle\frac{r_i^{\ast}-r_i^n}{\Delta t} = \frac{1}{\epsilon^2} \left[Q_i(r_i^n)-P_i(r_i^n)\right]+ \frac{1}{\epsilon^2}P_i(r_i^{\ast}), \\[6pt] &\label{Dis_Relax2}\displaystyle\frac{j_i^{\ast}-j_i^n}{\Delta t}=-\frac{1}{\epsilon^2}\left[\lambda_i\, j_i^{\ast} +(1-\epsilon^2\phi)(s_i v\cdot\nabla_x r_i^{\ast} \mp E^{\ast}\cdot\nabla_v r_i^{\ast})\right]. \end{align} where $P_i$ is the BGK operator, which is a linear operator and is asymptotically close to the collision term $Q_i(f)$, and is given by \begin{equation}\label{BGK} P_i(r_i) = \eta_i (\rho_i M_i(v) - r_i), \end{equation} where $\eta_i$ is some constant chosen as the maximum value of the Fr$\acute{e}$chet derivative $\nabla Q_i(r_i)$ \cite{Filbet-Jin}. In particular for the anisotropic semiconductor Boltzmann case, it is addressed in \cite{JinWang} that $\eta_i$ should be chosen to satisfy $\eta_i > \max_{v}\lambda_i(x,v)$ for $i=1, 2$, where $\lambda_i$ is the collision frequency defined in (\ref{freq1}). \\[6pt] {\bf{\large{A Discretized Scheme:}}} \\ For notation simplicity, we describe the spatial discretization in one dimension. Consider the spatial domain $\Omega=[x_L, x_R]$ which is partitioned into $N$ grid cells with a uniform mesh size $\Delta x=1/N$. Define the left boundary $x_L$ as $x_{1/2}$, right boundary $x_R$ as $x_{N+1/2}$, choose the spatial grid points $\displaystyle x_{i-1/2}=x_{1/2}+(i-1)\Delta x$, for $i=1, \cdots, N+1$. The $i$-th interior cell is $\displaystyle [x_{i-1/2},x_{i+1/2}]$, for $i=1,\cdots, N$, with the cell average at time level $t^n$ given by $$ U_i^n=\frac{1}{\Delta x}\int_{x_{i-1/2}}^{x_{i+1/2}} U(t^n, x,v)\, dx.$$ The velocity discretization is performed using spectral approximation based on the Hermite polynomials, which is equivalent to the moment method. We refer the reader to \cite{Schmeiser, JinLorenzo} for details. The scheme can be implemented as follows. \begin{itemize} \item Step 1. \quad Update $\rho_i^{\ast}$ and $r_i^{\ast}$. \\ Integrate (\ref{Dis_Relax1}) over $v$, note that $\displaystyle\int_{\mathbb R} Q_i(r_i)\, dv=0$ and $\displaystyle\int_{\mathbb R} P_i(r_i)\, dv=0$, then \begin{equation}\rho_i^{\ast}=\rho^n. \label{rho_star}\end{equation} Denote $$\theta_1^{(i)}=\frac{\Delta t}{\epsilon^2+\eta_i \Delta t}\,. $$ By (\ref{Dis_Relax1}), (\ref{BGK}) and (\ref{rho_star}), one can update $r_i^{\ast}$: \begin{equation} r_i^{\ast}=r_i^n + \theta_1^{(i)}\,Q_i(r_i^n). \end{equation} -- Step 1.1. One can use any Poisson solver such as the spectral method to solve for $\Phi$, $$-\gamma\, \Delta_x\Phi =\rho_1-\rho_2-C(x), $$ then update the electric field $E^{\ast}$ by using the equation $E=-\nabla_x\Phi$ and a second order spatial discretization. \item Step 2. \quad Update $j_i^{\ast}$. \\ Denote $$\theta_2^{(i)}=\frac{\epsilon^2}{\epsilon^2+\lambda_i\Delta t}\,, \qquad \theta_3^{(i)}=\frac{\Delta t\,(1-\epsilon^2\phi)}{\epsilon^2 + \lambda_i\Delta t}\,. $$ (\ref{Dis_Relax2}) can be solved explicitly since we already have $r_i^{\ast}$, \begin{equation}\label{step2} j_i^{\ast}=\theta_2^{(i)}\,j_i^n - \theta_3^{(i)}\,(s_i v\cdot\nabla_x r_i^{\ast} \mp E^{\ast}\cdot\nabla_v r_i^{\ast}), \end{equation} where $s_1=1$ and $s_2=\beta$. The spatial derivative of $f$ that appears in (\ref{step2}) is approximated using central difference, which allows one to implement the scheme explicitly and guarantee a second-order accuracy. \item Step 3. Update $r_i^{n+1}$, $j_i^{n+1}$ in the transport step. \\ For notation simplicity, we focus on the case $i=1$. To define the numerical fluxes we used the second-order upwind scheme (with slope limiter) in the spatial direction (\cite{JPT, Jin_Xin}). In the $x$-direction the Riemann invariants are $$U_1=\frac{1}{2}(r_1+\phi^{-\frac{1}{2}}j_1), \qquad\qquad V_1=\frac{1}{2}(r_1-\phi^{-\frac{1}{2}}j_1), $$ which move with the characteristic speed $\pm\sqrt{\phi}$\,. The second-order upwind discretization of $r\pm\phi^{-\frac{1}{2}}j$ (drop the subscript $1$ in $r_1$, $j_1$) is given by \begin{align*} &\displaystyle\frac{1}{2}(r+\phi^{-\frac{1}{2}}j)_{i+\frac{1}{2}} =\frac{1}{2}(r+\phi^{-\frac{1}{2}}j)_{i} + \frac{\Delta x}{4}\mu_i^{+}, \\[6pt] &\displaystyle\frac{1}{2}(r-\phi^{-\frac{1}{2}}j)_{i+\frac{1}{2}} =\frac{1}{2}(r-\phi^{-\frac{1}{2}}j)_{i+1} - \frac{\Delta x}{4}\mu_{i+1}^{-}, \end{align*} where $\mu_{i}^{\pm}$ are the slope limiters of $r\pm\phi^{-\frac{1}{2}}j$ on the $i$-th cell at $(\ast)$-th time step. For $v>0$, let $\tau=\sqrt{\phi}\, v\,\frac{\Delta t}{\Delta x}>0$, then \begin{align} &\displaystyle r_i^{n+1}=(1-\tau)r_i^{\ast} +\frac{\tau}{2}(r_{i+1}^{\ast} + r_{i-1}^{\ast}) - \frac{\tau}{2\sqrt{\phi}}(j_{i+1}^{\ast} - j_{i-1}^{\ast})\notag \\[6pt] &\label{r1}\displaystyle\qquad\quad+\frac{\tau}{4}\Delta x (-\mu_i^{+}-\mu_{i+1}^{-}+\mu_{i-1}^{+}+\mu_i^{-})\pm\Delta t E^{\ast}\cdot\nabla_v j_i^{\ast} + \Delta t\, I_{i, \text{plus}}^{\ast}\,, \\[6pt] &\displaystyle j_i^{n+1}=(1-\tau)j_i^{\ast} + \frac{\tau}{2}(j_{i+1}^{\ast}+j_{i-1}^{\ast})-\frac{\sqrt{\phi}\tau}{2}(r_{i+1}^{\ast} -r_{i-1}^{\ast}) \notag\\[6pt] &\label{j1}\displaystyle\qquad\quad+\frac{\tau}{4}\sqrt{\phi}\Delta x (-\mu_i^{+}+\mu_{i+1}^{-}+\mu_{i-1}^{+}-\mu_i^{-})\pm\phi\, \Delta t E^{\ast}\cdot\nabla_v r_i^{\ast} + \Delta t\, I_{i, \text{minus}}^{\ast}\,. \end{align} The slope limiter is defined by $$\mu_i^{\pm}=\frac{1}{\Delta x}\left[\pm r_{i\pm1}+\phi^{-\frac{1}{2}}j_{i\pm 1}\mp r_i -\phi^{-\frac{1}{2}}j_i\right]\psi(\theta_i^{\pm}), $$ with $$\theta_i^{\pm}=\left(\frac{r_i\pm\phi^{-\frac{1}{2}}j_i-r_{i-1}\mp\phi^{-\frac{1}{2}}j_{i-1}}{r_{i+1}\pm\phi^{-\frac{1}{2}} j_{i+1}-r_i\mp\phi^{-\frac{1}{2}}j_i}\right)^{\pm}, $$ and $\psi$ is the particular slope limiter function. A simple minmod slope limiter is chosen here, $$\psi(\theta)=\max\{0,\min\{1,\theta\}\}. $$ To update $r_2^{n+1}$, $j_2^{n+1}$, one needs to change $\tau$ to $\tau=\sqrt{\phi\beta}\,v\, \frac{\Delta t}{\Delta x}$, and $\phi$ to $\phi\beta$ in (\ref{r1}), (\ref{j1}), except that the term $\pm\phi\,\Delta t\, E^{\ast}\cdot\nabla_v r_i^{\ast}$ remains the same in (\ref{j1}). \end{itemize} \begin{remark} The velocity discretization is performed using the Hermite quadrature rule, see \cite{Klar, JinLorenzo, JinLiu}. We denote $N_v$ as the number of quadrature points used in the numerical tests. \end{remark} \section{The model with random inputs} \label{sec:4} In this section, the two-band semiconductor system with random inputs is considered. The collision kernels describing the transition rate between the same-species collisions or the generation-recombination process between different species can be uncertain, due to incomplete knowledge of the interaction mechanism. The uncertainties may also come from inaccurate measurement of the initial data, boundary data, and the doping profile $C(x,{\bf{z}})$. (\ref{model_1})--(\ref{model_3}) with random inputs is given by \begin{equation} \begin{aligned} \partial_t f_1 + \frac{1}{\epsilon}(v\cdot\nabla_x f_1-E\cdot\nabla_v f_1) &=\frac{1}{\epsilon^2}Q_1(f_1)(x,{\bf{z}})+ I_1(f_1,f_2)(x,{\bf{z}}), \\[6pt] \partial_t f_2 + \frac{1}{\epsilon}(\beta v\cdot\nabla_x f_2+E\cdot\nabla_v f_2) &=\frac{1}{\epsilon^2}Q_2(f_2)(x,{\bf{z}}) + I_2(f_1,f_2)(x,{\bf{z}}), \\[6pt] -\gamma \nabla_x E &=\rho_1-\rho_2-C(x,{\bf{z}}), \\[6pt] f_i(0,x,v,z)&=f_{i,\text{in}}(x,v,z). \end{aligned} \label{UQ} \end{equation} \subsection{Regularity and local sensitivity results} \label{Converg1} Conducting the convergence rate analysis on system (\ref{UQ}) with a self-consistent potential is complicated and remains a future work. For a discussion of Vlasov-Poisson-Fokker-Planck system with random initial data and small scalings, see \cite{JinZhu}. In this section, we consider the following system without electric potential (and let the mass ratio $\beta=1$ for simplicity), \begin{equation} \begin{aligned} \partial_t f_i + \frac{1}{\epsilon}v\cdot\nabla_x f_i &=\frac{1}{\epsilon^2}Q_i(f_i)(x,z)+ I_i(f_1,f_2)(x,z), \\[6pt] f_i(0,x,v,z)&=f_{i,\text{in}}(x,v,z), \qquad i=1,2, \qquad z\in I_z \subset \mathbb R\,. \end{aligned} \label{uq_1} \end{equation} We will use the hypocoercivity theory to prove the exponential convergence of the random solutions toward the (deterministic) global equilibrium, in addition to spectral accuracy and exponential decay of the numerical error of the gPC-SG method. {\color{red} This is an example that the framework studied in \cite{LiuJinUQ} for general class of collisional kinetic models with random inputs can be {\it generalized}. The main differences are: here we have a multi-species system; and the non-linear integral operators $I_1$, $I_2$ own a different scaling compared to that of the linear collision operators $Q_1$, $Q_2$. } Here is a brief review of the solution estimate in \cite{LiuJinUQ}: \begin{align} \label{BP} \left\{ \begin{array}{l} \displaystyle \partial_t f + \frac{1}{\varepsilon} v\cdot\nabla_x f = \frac{1}{\varepsilon^2}\mathcal C(f,f), \\[4pt] \displaystyle f(0,x,v,z) = f_{\text{in}}(x,v,z), \end{array}\right. \end{align} where we consider the incompressible Navier-Stokes or diffusion scaling. $\mathcal C$ is a general class of collision operators, both the collision kernels and the initial data depend on the random variable $z\in I_z$, with $I_z$ a compact domain. Under the perturbative setting, $f$ should be a small perturbation of the global equilibrium (Maxwellian) $\mathcal M$: \begin{equation} f=\mathcal M+ \varepsilon M h, \qquad \mathcal M=\frac{1}{(2\pi)^{\frac{d}{2}}}\, e^{-\frac{|v|^2}{2}}, \label{per-f} \end{equation} where $M=\sqrt{\mathcal M}$. Applying this $f$ into (\ref{BP}), then the fluctuation $h$ satisfies \begin{equation}\partial_t h + \frac{1}{\varepsilon} v\cdot\nabla_x h =\frac{1}{\varepsilon^2}\mathcal L(h)+\frac{1}{\varepsilon}\mathcal F(h,h), \label{INS-scaling} \end{equation} where $\mathcal L$ is the linearized (around $\mathcal M)$ collision operator, and $\mathcal F$ is the nonlinear remainder. \underline{\it{Notations: }} For two multi-indices $j$ and $l$ in $\mathbb N^{d}$, define $$\partial_l^j = \partial/\partial v_j\, \partial/\partial x_l\,. $$ For derivatives in $z$, we use the notation $$\partial_z^{\alpha} h = \partial^{\alpha}h\,. $$ Denote $||\cdot||_{\Lambda}:= ||\, ||\cdot||_{\Lambda_v}\, ||_{L^2_x}$. Define the Sobolev norms \begin{eqnarray} &&||h||_{H_{x,v}^s}^2 = \sum_{|j|+|l|\leq s}\, ||\partial_l^j h||_{L^2_{x,v}}^2\,, \qquad ||h||_{H_{x,v}^{s,r}}^2 = \sum_{|m|\leq r}\, ||\partial^m h||_{H_{x,v}^s}^2\,, \\ &&||h(x,v,\cdot)||_{H^{s}_{x,v}H_z^r}^2 = \int_{I_z}\, ||h||_{H_{x,v}^{s,r}}^2 \pi(z)dz, \end{eqnarray} in addition to the $\sup$ norm in $z$ variable, $$ ||h||_{H_{x,v}^{s,r} L_z^{\infty}}=\sup_{z\in I_z}\, ||h||_{H_{x,v}^{s,r}}\,. $$ The following estimates on $h$ and the spectral accuracy of the SG methods are proved in \cite{LiuJinUQ}: \underline{\it{Result I: }} Assume $||h(0)||_{H_{x,v}^s L_z^{\infty}} \leq C_{I}$, if $h$ is a solution of (\ref{INS-scaling}) in $H_{x,v}^s$ for all $z$, then \begin{equation}\label{thm2_1} ||h(t)||_{H_{x,v}^{s, r}L_z^{\infty}}\leq C_{I}\, e^{-\tau_s t}\,, \qquad ||h(t)||_{H_{x,v}^{s} H_z^r} \leq C_{I}\, e^{-\tau_s t}\,, \end{equation} where $C_I$, $\tau_s$ are positive constants independent of $\varepsilon$. It is shown in \cite{CN} {\color{red} that} the deterministic, linear relaxation model satisfies all the Assumptions H1--H4, by taking $||\cdot||_{\Lambda_v}=||\cdot||_{L^2_v}$, then $||\cdot||_{\Lambda}=||\cdot||_{L^2_{x,v}}$. Assumption H5 is also satisfied for the non-linear operator $I_1$, $I_2$, that is, for each $z\in I_z$, $\exists\, k_0\in\mathbb N$ and a constant $C>0$ such that $\forall\, k\geq k_0$, $$||I_{i}(h,h)||_{H^k_{x,v}}\leq C ||h||_{H^k_{x,v}}^2, $$ by using Sobolev embeddings and the Cauchy-Schwarz inequality, exactly the same as discussed in \cite{CN, MB}. The following assumptions on the collision kernels $\sigma_{i}$ ($i=1,2$) and $\sigma_{I}$ are needed: \begin{equation} \label{assump1} |\partial_{z}^{k}\sigma_{i}(x,v,w,z)|\leq C_b, \qquad |\partial_{z}^{k}\sigma_{I}(x,v,w,z)|\leq C_b^{\ast}, \qquad \forall\, k\geq 0\,. \end{equation} Under these conditions, one can easily check that Assumptions H1--H5 given in \cite{LiuJinUQ} still hold when uncertainties are from collision kernels. Let $$f_i=\mathcal M + \varepsilon M h_i, \qquad i=1, 2. $$ Plug it into the system (\ref{uq_1}), the perturbed solution $h_i$ satisfies \begin{equation}\label{INS1}\partial_t h_i + \frac{1}{\varepsilon}v\cdot\nabla_x h_i =\frac{1}{\varepsilon^2}Q_i(h_i) + \varepsilon I_i(h_1, h_2). \end{equation} Since the generation-recombination process has a weaker effect than the collision among particles, which leads to the non-linear operators $I_1$, $I_2$ owning a different scaling than the linear operators $Q_1$, $Q_2$. Note that whatever discussed in \cite{LiuJinUQ} for the scaled equation (\ref{INS-scaling}) remains valid for the problem we consider here, since the coefficient in front of the non-linear operator in (\ref{INS1}) and (\ref{INS-scaling}) has the relation: $\varepsilon < 1/\varepsilon$. Based on the proof of Lemma 3.1 in section 3 in \cite{LiuJinUQ}, as a corollary, it is obvious to check that the perturbed solution $h_i$ for the two-species system has the following estimate: \begin{equation}\label{hi}\frac{d}{dt} ||h_i||_{\mathcal H_{\epsilon_{\perp}}^{s,r}}^2 \leq \bigg[K_1 \sum_{i=1}^{2} ||h_i||_{H^{s,r}}^2 - K_2\bigg] \left(\sum_{i=1}^{2} ||h_i||_{H_{\Lambda}^{s,r}}^2 \right), \end{equation} where the complicated definition of the norm $||\cdot||_{\mathcal H_{\epsilon_{\perp}}^s}$ is omitted, but one can check (2.20) in \cite{LiuJinUQ}. One just needs to know that $||\cdot||_{\mathcal H_{\epsilon_{\perp}}^s}$ is equivalent to $||\cdot||_{H^s}$, and that $||\cdot||_{H_{\Lambda}^{s,r}}=||\cdot||_{H^{s,r}}$ in our problem, then (\ref{hi}) becomes $$\frac{d}{dt}\left(\sum_{i=1}^{2} ||h_i||_{\mathcal H_{\epsilon_{\perp}}^{s,r}}^2\right) \leq \bigg[K_3 \sum_{i=1}^{2}||h_i||_{\mathcal H_{\epsilon_{\perp}}^{s,r}}^2-K_2\bigg] \left(\sum_{i=1}^{2}||h_i||_{H^{s,r}}^2\right), $$ where $K_1, K_2, K_3$ are all constants independent of $\varepsilon$ and $z$. If the initial data satisfies \begin{equation} \label{IC} ||h_1(0)||_{\mathcal H_{\epsilon_{\perp}}^{s,r}}^2 + ||h_2(0)||_{\mathcal H_{\epsilon_{\perp}}^{s,r}}^2 \leq \frac{K_2}{2 K_3}\,, \end{equation} then $$\frac{d}{dt}\left(\sum_{i=1}^{2} ||h_i||_{\mathcal H_{\epsilon_{\perp}}^{s,r}}^2 \right) \leq {\color{red} -\frac{K_2}{2}}\left(\sum_{i=1}^{2} ||h_i||_{H^{s,r}}^2 \right) \leq - \widetilde C \left(\sum_{i=1}^{2} ||h_i||_{\mathcal H_{\epsilon_{\perp}}^{s,r}}^2\right). $$ The last inequality is because $H^{s}$ norm is equivalent to $\mathcal H_{\epsilon_{\perp}}^{s}$ norm. \begin{theorem} \label{thm_f} If the assumptions for the random kernels and the initial data--(\ref{assump1}) and (\ref{IC}) are satisfied, then solution of each species has the following estimate: \begin{equation} ||h_i(t)||_{H_{x,v}^{s, r}L_z^{\infty}}\leq C_1\, e^{-\tau_1 t}\,, \qquad ||h_i(t)||_{H_{x,v}^{s} H_z^{r}} \leq C_1\, e^{-\tau_1 t}\,, \, i=1, 2, \end{equation} where $C_1$, $\tau_1$ are constants independent of $\varepsilon$ and $z$. \end{theorem} This result shows that the random perturbation in both initial data and collision kernel will decay exponentially, and the random solutions $f_1(t,x,v,z)$, $f_2(t,x,v,z)$ will both converge exponentially in time to the deterministic global Maxwellian $\mathcal M$. That is, the dependence on the random parameter $z$ of the two-band system is insensitive for long time. {\color{red} {\bf Remark.} Thanks to the small $\mathcal O(\varepsilon)$ scaling of the non-linear integral terms $I_1$, $I_2$, the analysis and conclusions presented in \cite{LiuJinUQ} can be extended here. Though compared to the previous work, where a complete framework for the kinetic equations with multiple scales and uncertainties and its gPC-SG systems has been well-established, the analysis conducted here is not as exquisite, yet it is a nice observation that the conclusions there can be adopted and generalized, since \cite{LiuJinUQ} does not mention directly the kinetic equation whose right-hand-side has a linear collision operator combined with a non-linear integral term and of different scalings. More importantly, this first attempt to study the bipolar semiconductor Boltzmann-Poisson system with random inputs from both numerical and analysis points of view may intrigue new directions of study. For example, conducting sensitivity analysis for the multi-species full Boltzmann equations with random inputs, which is more complicated and a non-trivial extension of the single-species problem studied in \cite{LiuJinUQ}. } \subsection{A gPC-SG Method} \label{subsec:gPC} Let $\mathbb P_P^n$ be the space of the $n$-variate polynomials of degree less than or equal to $P\geq 1$, and recall that $$ \text{dim}(\mathbb P_P^n)= \mbox{card}\{{\bf{k}} \in \mathbb N ^n, |{\bf{k}}|\leq P\}= \left(\begin{array}{c} n+P \\ P \end{array} \right):=K, $$ where we have denoted ${\bf{k}}= (k_1,\dots, k_n)$ and $|{\bf{k}}|=k_1+\dots+k_n$. We consider the inner product $$ \langle f, g\rangle_\pi = \int_{I_{{\bf{z}}}} f g\, \pi({\bf{z}})d{\bf{z}}, \quad \forall\, f, g \in L^2(\pi({\bf{z}})d{\bf{z}}),$$ where $L^2(\pi({\bf{z}})d{\bf{z}})$ is the usual weighted Lebesgue space, and its associated norm is $$ \|f\|_{L^2(\pi({\bf{z}})d{\bf{z}})}^2 = \int_{I_{{\bf{z}}}}f^2\, \pi({\bf{z}})d{\bf{z}}.$$ Consider a corresponding orthonormal basis $\{\psi_{{\bf{k}}}({\bf{z}})\}_{{\bf{k}}\in \mathbb N ^n, \, |{\bf{k}}|\leq P}$ of the space $\mathbb P_P^n$, where the degree of $\psi_{\bf{k}}$ is $\mbox{deg}(\psi_{\bf{k}})= |{\bf{k}}|$. In particular \begin{equation*} \langle \psi_{\bf{k}}, \psi_{\bf{l}}\rangle_\pi = \int_{I_{{\bf{z}}}} \psi_{\bf{k}}({\bf{z}})\psi_{\bf{l}}({\bf{z}})\pi({\bf{z}})d{\bf{z}}=\delta_{{\bf{k}}{\bf{l}}}, \qquad |{\bf{k}}|, \, |{\bf{l}}|\leq P, \end{equation*} where $\delta_{{\bf{k}}{\bf{l}}}$ is the Kronecker symbol. The commonly used pairs of $\{\psi_{\bf{k}}({\bf{z}})\}$ and $\pi({\bf{z}})$ include Hermite-Gaussian, Legendre-uniform, Laguerre-Gamma, etc \cite{XiuBook, XiuKarn}. The SG method seeks the solution as a projection onto the space $\mathbb P_P^n$ (the set of $n$-variate orthonormal polynomials of degree up to $P\geq 1$), that is \begin{equation}\label{soln u} f(t,x,v,{\bf{z}}) \approx f^K(t,x,v,{\bf{z}})=\sum_{k=1}^{K} \hat f_k(t,x,v)\psi_k({\bf{z}}). \end{equation} From this approximation, one can easily compute statistical moments, such as the mean and standard deviation, \begin{equation}\label{moments} \mathbb{E}(f)\approx \hat f_1, \qquad \text{SD}(f)\approx \big(\sum_{k=2}^{K}|\hat f_k|^2\big)^{1/2}\,. \end{equation} By the gPC-SG approach, one inserts the ansatzes \begin{equation} \label{ans1} \begin{aligned} f_i^K &=\sum_{k=1}^K \hat{(f_i)}_k \psi_k({\bf{z}})={\bf \hat f}_{i}\cdot {\boldsymbol\psi}({\bf{z}}), \qquad i=1, 2, \\[4pt] E^K &=\sum_{k=1}^K \hat{E}_k \psi_k({\bf{z}}) = {\bf \hat E}\cdot {\boldsymbol\psi}({\bf{z}}) \end{aligned} \end{equation} into system (\ref{UQ}) and enforces the residual to be orthogonal to the polynomial space spanned by $\psi_{\bf{k}}({\bf{z}})$, then \begin{equation} \begin{aligned} \partial_t \hat{(f_1)}_k + \frac{1}{\epsilon}[v\cdot\nabla_x \hat{(f_1)}_k - \sum_i\sum_j \hat{E}_i \cdot \nabla_v \hat{(f_1)}_j\, G_{ijk}] &=\frac{1}{\epsilon^2}({\bf Q}_1)_{k}({\bf \hat f}_1) + ({\bf I}_1)_{k}({\bf \hat f}_1, {\bf \hat f}_2), \\[6pt] \partial_t \hat{(f_2)}_k + \frac{1}{\epsilon}[\beta v\cdot\nabla_x \hat{(f_2)}_k + \sum_i\sum_j \hat{E}_i \cdot \nabla_v \hat{(f_2)}_j\, G_{ijk}] &=\frac{1}{\epsilon^2}({\bf Q}_2)_{k}({\bf \hat f}_2) + ({\bf I}_2)_{k}({\bf \hat f}_1, {\bf \hat f}_2), \\[6pt] - \gamma\nabla_x {\bf \hat E} &= (\hat{\rho}_1)_k - (\hat{\rho}_2)_k - L_k, \end{aligned} \label{E_k} \end{equation} where \begin{align*} &\displaystyle ({\bf Q}_i)_{k}({\bf \hat f}_{i}) = \int_{\mathbb R^d} (B_i(v,w))_k \left[M(v){\bf \hat f}_{i}(w) - M(w){\bf \hat f}_{i}(v)\right] dw, \qquad i=1,2, \\[6pt] &\displaystyle ({\bf I}_1)_{k}({\bf \hat f}_1, {\bf \hat f}_2) = \int_{\mathbb R^d} D_k(x,v,w)M_1(v)dw - \int_{\mathbb R^d} \sum_i \sum_j (\hat {f_1}(v))_i (\hat {f_2}(w))_j M_2(w) F_{ijk}(x,v,w) dw, \\[6pt] &\displaystyle ({\bf I}_2)_{k}({\bf \hat f}_1, {\bf \hat f}_2) = \int_{\mathbb R^d} D_k(x,w,v)M_1(w)dw - \int_{\mathbb R^d} \sum_i \sum_j (\hat {f_1}(w))_i (\hat {f_2}(v))_j M_2(v) F_{ijk}(x,v,w) dw, \end{align*} with $(B_i)_k$ the $k$-th row of $K\times K$ matrix $(B_i)_{mn}$ ($i=1, 2$), given by \begin{equation}\label{B_matrix} (B_i)_{mn}(x,v,w) = \int_{I_{{\bf{z}}}}\sigma_i(x,v,w,{\bf{z}})\psi_m({\bf{z}})\psi_n({\bf{z}})\pi({\bf{z}}) d{\bf{z}}. \end{equation} The tensors $(G_{ijk})_{K\times K \times K}$, $(F_{ijk})_{K\times K \times K}$ and the vectors $(L_k)_{K\times 1}$, $(D_k)_{K\times 1}$ are defined by \begin{equation} \begin{aligned} G_{ijk} &= \int_{I_{{\bf{z}}}}\psi_i({\bf{z}})\psi_j({\bf{z}})\psi_k({\bf{z}})\pi({\bf{z}}) d{\bf{z}}, \\[6pt] F_{ijk}(x,v,w) &=\int_{I_{{\bf{z}}}}\sigma_I(x,v,w,{\bf{z}})\psi_i({\bf{z}})\psi_j({\bf{z}})\psi_k({\bf{z}})\pi({\bf{z}})d{\bf{z}}, \\[6pt] L_k(x) &= \int_{I_{{\bf{z}}}}C(x,{\bf{z}}) \psi_k({\bf{z}})\pi({\bf{z}})d{\bf{z}}, \\[6pt] D_k(x,v,w) & = \int_{I_{{\bf{z}}}} \sigma_I(x,v,w,{\bf{z}})\psi_k({\bf{z}})\pi({\bf{z}})d{\bf{z}}. \end{aligned} \end{equation} \\[15pt] {\bf\large{A convergence rate analysis}} \\ Here is a brief review of the gPC error estimate in \cite{LiuJinUQ} for the single species model: \\ \underline{\it{Result II: }} Define the norm $$ ||h^e||_{H_{x,v}^{s} L_z^2}:= \int_{I_z}\, ||h^e||_{H_{x,v}^s}\, \pi(z)dz. $$ Under the technical conditions on the gPC polynomials: \begin{equation} \label{basis} ||\psi_k||_{L^{\infty}} \leq C k^p, \qquad \forall\, k, \end{equation} we have \begin{equation} ||h-h^K||_{H_{x,v}^{s}L_z^2} \leq C_{e}\, \frac{e^{-\lambda t}}{K^r}\,, \end{equation} with the constants $C_{e}, \,\lambda>0$ independent of $K$ and $\varepsilon$. We now give the main conclusion for the gPC error estimate for our two-band model: \begin{theorem} \label{thm_gPC} Under the assumption for the gPC polynomials (\ref{basis}) and the random kernels (\ref{assump1}), also assume that $\sigma_{I}$ is linear in $z$ with \begin{equation} \label{assump2} |\partial_{z}\sigma_{I}|\leq O(\varepsilon), \end{equation} then \begin{equation} ||h_i-h_i^K||_{H_{x,v}^{s}L_z^2} \leq C_2\, \frac{e^{-\tau_2 t}}{K^r}\,, \qquad\text{for } i=1, 2, \end{equation} where $C_2$, $\tau_2$ are constants independent of $\varepsilon$ and $z$. \end{theorem} The proof of this theorem is really similar to \cite{LiuJinUQ} and we omit it here. Compared to \cite{LiuJinUQ}, one only needs to add up the estimates for $i=1$ and $i=2$, the same way as shown in the proof of Theorem \ref{thm_f}. To conclude, Theorem \ref{thm_gPC} gives a {\it uniform} spectral convergence of the SG method for the system (\ref{uq_1}), with convergence rate exponentially decaying in time, under suitable assumptions (\ref{assump1}), (\ref{basis}) and (\ref{assump2}). \\[20pt] {\bf\large{The even-odd decomposition method}}\\ We use the even-odd decomposition and insert the ansatzes $$ r_i^K = \sum_{k=1}^K \hat{(r_i)}_k \psi_k({\bf{z}}) ={\bf \hat r}_{i}\cdot {\boldsymbol\psi}({\bf{z}}), \qquad j_i^K = \sum_{k=1}^K \hat{(j_i)}_k \psi_k({\bf{z}}) ={\bf\hat j}_{i}\cdot {\boldsymbol\psi}({\bf{z}}), \qquad i=1, 2, $$ and $E^K$ in (\ref{ans1}) into systems (\ref{relax_1}) and (\ref{trans_1}). By the standard Galerkin projection, one gets the relaxation step \begin{align} &\label{s_relax1}\displaystyle \partial_t (\hat {r_i})_k = \frac{1}{\epsilon^2} ({\bf Q}_i)_{k}({\bf \hat r}_{i}), \\[6pt] &\label{s_relax2}\displaystyle \partial_t (\hat {j_i})_k = -\frac{1}{\epsilon^2} \left[(H_i)_k\, {\bf \hat j}_{i} + (1-\epsilon^2\Phi)(v\cdot\nabla_x (\hat r_i)_k \mp \sum_m\sum_n \hat E_m \cdot\nabla_v (\hat {r_i})_n G_{mnk})\right], \end{align} where $(H_i)_k$ is the $k$-th row of the matrix $(H_i)_{K\times K}$, given by $$(H_i)_{mn}(x,v) = \int_{I_{{\bf{z}}}}\lambda_{i}(x,v,{\bf{z}})\psi_m({\bf{z}})\psi_n({\bf{z}})\pi({\bf{z}})d{\bf{z}}, $$ with the matrix $B_i$ given in (\ref{B_matrix}). The transport step is given by \begin{align} \label{s_trans1}\displaystyle \partial_t (\hat r_i)_k + v\cdot\nabla_x (\hat j_i)_k \mp \sum_m\sum_n \hat E_m \cdot \nabla_v (\hat j_i)_n\, G_{mnk} &= ({\bf I}_{\text{i,plus}})_k, \\[6pt] \label{s_trans2}\displaystyle \partial_t (\hat j_i)_k + \Phi[s_i v\cdot \nabla_x (\hat r_i)_k \mp \sum_m \sum_n \hat E_m \cdot \nabla_v (\hat r_i)_n\, G_{mnk}] &= ({\bf I}_{\text{i,minus}})_k, \end{align} where \begin{equation*} \begin{aligned} ({\bf I}_{\text{1,plus}})_k &= \frac{1}{2}M_1(v) J_k^a(x,v,w) - \int_{\mathbb R^d} \sum_m \sum_n (\hat r_1(v))_m (\hat r_2(w))_n\, M_2(w) F_{mnk}(x,v,w)dw, \\[6pt] ({\bf I}_{\text{2,plus}})_k &= \frac{1}{2} J_k^c(x,v,w) - \int_{\mathbb R^d} \sum_m \sum_n (\hat r_1(w))_m (\hat r_2(v))_n\, M_2(v) F_{mnk}(x,v,w)dw, \\[6pt] ({\bf I}_{\text{1,minus}})_k &= \frac{1}{2}M_1(v) J_k^b(x,v,w) - \epsilon\int_{\mathbb R^d} \sum_m \sum_n (\hat j_1(v))_m (\hat r_2(w))_n\, M_2(w) F_{mnk}(x,v,w)dw, \\[6pt] ({\bf I}_{\text{2,minus}})_k &= \frac{1}{2} J_k^d(x,v,w) -\epsilon\int_{\mathbb R^d}\sum_m \sum_n (\hat r_1(w))_m (\hat j_2(v))_n\, M_2(w) F_{mnk}(x,v,w)dw, \end{aligned} \end{equation*} with $(J_k^a)_{K\times K}$, $(J_k^b)_{K\times K}$, $(J_k^c)_{K\times K}$ and $(J_k^d)_{K\times K}$ given by \begin{align*} &\displaystyle J_k^a(x,v,w)= \int_{I_{{\bf{z}}}}\int_{\mathbb R^d} \big(\sigma_I(x,v,w,{\bf{z}})+\sigma_I(x,-v,w,{\bf{z}})\big) dw\, \psi_k({\bf{z}})\pi({\bf{z}})d{\bf{z}}, \\[6pt] &\displaystyle J_k^b(x,v,w)= \int_{I_{{\bf{z}}}}\int_{\mathbb R^d} \big(\sigma_I(x,v,w,{\bf{z}})-\sigma_I(x,-v,w,{\bf{z}})\big) dw\, \psi_k({\bf{z}})\pi({\bf{z}})d{\bf{z}}, \\[6pt] &\displaystyle J_k^c(x,v,w)= \int_{I_{{\bf{z}}}}\int_{\mathbb R^d} \big(\sigma_I(x,v,w,{\bf{z}})+\sigma_I(x,-v,w,{\bf{z}})\big) M_1(w) dw\, \psi_k({\bf{z}})\pi({\bf{z}})d{\bf{z}}, \\[6pt] &\displaystyle J_k^d(x,v,w)= \int_{I_{{\bf{z}}}}\int_{\mathbb R^d} \big(\sigma_I(x,v,w,{\bf{z}})-\sigma_I(x,-v,w,{\bf{z}})\big) M_1(w) dw\, \psi_k({\bf{z}})\pi({\bf{z}})d{\bf{z}}. \end{align*} The fully discretized scheme for the system with random inputs is similar to how we solve the deterministic problem, introduced in section \ref{Det_scheme}, except that each term now is a vector analogy of the corresponding term in the deterministic problem. \subsection{A Stochastic AP Time-splitting} \label{sec:sAP} {\color{blue}Jin, Xiu and Zhu first introduced the notion of stochastic AP (s-AP) in the SG setting \cite{XZJ}. } s-AP schemes require that as $\varepsilon\to 0$, the SG for the model with uncertainties ($\mathcal F_z^{\varepsilon}$) automatically becomes a SG approximation for the limiting stochastic diffusion equation ($\mathcal F_z^0${\color{red}), which is the bipolar drift-diffusion equations (\ref{diff1}) in our case. } In this section, we formally prove that the time-splitting scheme (\ref{s_relax1})--(\ref{s_relax2}) and (\ref{s_trans1})--(\ref{s_trans2}) satisfies the s-AP property. As $\epsilon\to 0$, (\ref{s_relax1}) becomes \begin{equation}\label{s1} {\bf \hat r}_{i}= {\boldsymbol{\hat\rho}}_{i}M_i\,, \end{equation} a result proved in Lemma $3$ of \cite{JinLiu}. The second equation (\ref{s_relax2}) gives \begin{equation}\label{s2} (\hat j_i)_k = -\sum_l (H_i^{-1})_{kl} \left[ v\cdot \nabla_x(\hat r_i)_l \mp \sum_m\sum_n \hat E_m\nabla_v (\hat {r_i})_n\, G_{mnl}\right]. \end{equation} Inserting (\ref{s1}) and (\ref{s2}) into (\ref{s_trans1}) and integrating over $v\in\mathbb R$, one gets \begin{equation} \label{gPC_limit}\partial_t (\hat\rho_i)_k - \nabla_x \cdot\left[T_i \sum_l (H_i^{-1})_{kl} \bigg(\nabla_x (\hat\rho_i)_l \pm \sum_m\sum_n \hat E_m (\hat\rho_i)_n\, G_{mnl}\bigg)\right] =\int_{\mathbb R} ({\bf I}_{\text{i,plus}})_k\, dv, \end{equation} where $$T_i = \int_{\mathbb R} v\otimes v M_i(v)dv, $$ \begin{align} &\displaystyle \int_{\mathbb R} ({\bf I}_{\text{1,plus}})_k\, dv = \frac{1}{2}\int_{I_{{\bf{z}}}}\int_{\mathbb R}\int_{\mathbb R}M_1(v) \big(\sigma_I(x,v,w,{\bf{z}}) + \sigma_I(x,-v,w,{\bf{z}})\big)dwdv \,\psi_k({\bf{z}})\pi({\bf{z}})d{\bf{z}} \notag\\[6pt] &\label{int_I1}\displaystyle \qquad\qquad\qquad\qquad-\sum_m \sum_n (\hat\rho_1)_m (\hat\rho_2)_n \int_{\mathbb R}\int_{\mathbb R} M_1(v)M_2^2(w) F_{mnk}(x,v,w)\, dwdv, \\[6pt] &\displaystyle \int_{\mathbb R} ({\bf I}_{\text{2,plus}})_k\, dv = \frac{1}{2}\int_{I_{{\bf{z}}}}\int_{\mathbb R}\int_{\mathbb R}M_1(w) \big(\sigma_I(x,v,w,{\bf{z}}) + \sigma_I(x,-v,w,{\bf{z}})\big) dwdv \,\psi_k({\bf{z}})\pi({\bf{z}})d{\bf{z}} \notag \\[6pt] &\label{int_I2}\displaystyle \qquad\qquad\qquad\qquad -\sum_m \sum_n (\hat\rho_1)_m (\hat\rho_2)_n \int_{\mathbb R}\int_{\mathbb R} M_1(w)M_2^2(v) F_{mnk}(x,v,w)\, dwdv. \end{align} It is obvious that $\int_{\mathbb R}\, ({\bf I}_{\text{1,plus}})_k\, dv= \int_{\mathbb R}\, ({\bf I}_{\text{2,plus}})_k\, dv$. On the other hand, applying the ansatz $$\rho_i^K =\sum_{k=1}^K (\hat{\rho_i})_k \psi_k({\bf{z}}) = {\boldsymbol{\hat\rho}}_{i}\cdot\boldsymbol{\psi}({\bf{z}}), \qquad E^K =\sum_{k=1}^K \hat{E}_k \psi_k({\bf{z}}), $$ and conducting the Galerkin projection for the limiting drift-diffusion system (\ref{diff1}), one obtains \begin{equation} \label{diff_Gal} \partial_t (\hat\rho_i)_k - \nabla_x \cdot \left[ T_i \sum_{l} (S_i)_{kl}\bigg(\nabla_{x}(\hat\rho_i)_l \pm \sum_m\sum_n \hat E_m (\hat\rho_i)_n\, G_{mnl}\bigg) \right] = {\bf R}_{k}({\boldsymbol{\hat\rho}}_1, {\boldsymbol{\hat\rho}}_2), \end{equation} where $$(S_i)_{kl}=\int_{I_{{\bf{z}}}}\frac{1}{\lambda_i(x,v,{\bf{z}})}\psi_k({\bf{z}})\psi_l({\bf{z}})\pi({\bf{z}})d{\bf{z}},$$ with $\lambda_i$ defined in (\ref{freq1}), and \begin{align} &\displaystyle {\bf R}_{k}({\boldsymbol{\hat\rho}}_1, {\boldsymbol{\hat\rho}}_2) = \int_{I_{{\bf{z}}}}\int_{\mathbb R}\int_{\mathbb R} \sigma_I(v,w,{\bf{z}})M_1(v)dvdw\,\psi_k({\bf{z}})\pi({\bf{z}})d{\bf{z}} \notag\\[4pt] &\label{vec_R}\displaystyle \qquad\qquad\qquad - \sum_m\sum_n (\hat\rho_1)_m (\hat\rho_2)_n \int_{\mathbb R}\int_{\mathbb R} M_1(v)M_2^2(w) F_{mnk}(x,v,w)dvdw. \end{align} By a change of variable $w'=-w$, the first terms of (\ref{int_I1}), (\ref{int_I2}) and (\ref{vec_R}) are all equal, \begin{align*} &\quad\displaystyle\frac{1}{2}\int_{I_{{\bf{z}}}}\int_{\mathbb R}\int_{\mathbb R}M_1(v) \big(\sigma_I(x,v,w,{\bf{z}}) + \sigma_I(x,-v,w,{\bf{z}})\big)dwdv \,\psi_k({\bf{z}})\pi({\bf{z}})d{\bf{z}} \\[4pt] &\displaystyle=\int_{I_{{\bf{z}}}}\int_{\mathbb R}\int_{\mathbb R}\sigma_I(x,v,w,{\bf{z}})M_1(v)dvdw \, \psi_k({\bf{z}})\pi({\bf{z}})d{\bf{z}}, \end{align*} thus the right-hand-side of (\ref{gPC_limit}) and (\ref{diff_Gal}) are equal. We observe that the limiting scheme of gPC-SG method given by (\ref{gPC_limit}) is almost exactly the same as the Galerkin system of the bipolar drift-diffusion equations given by (\ref{diff_Gal}), except for the diffusion coefficient matrix $(H_i)^{-1}$ and $S_i$. It has been demonstrated in \cite{JinLiu} that the matrix $(S_i)_{K\times K} \sim (H_i)^{-1}_{K\times K}$ with spectral accuracy, thus (\ref{gPC_limit}) is a good approximation of (\ref{diff_Gal}). This formally shows that with the deterministic AP solver introduced in section \ref{sec:3}, the fully discrete time and space approximations of the corresponding gPC-SG scheme introduced in section \ref{subsec:gPC} are s-AP, implying that as $\varepsilon\to 0$, with $\Delta t$, $\Delta x$ fixed, the gPC-SG scheme approaches the fully discrete gPC-SG approximation of the bipolar drift-diffusion equations. This will be demonstrated in our numerical tests. {\color{red} {\bf Remark.} With the non-linear generation-recombination integral operators, the proof is different from the previous work \cite{JinLiu}, where the gPC-SG scheme for the linear semiconductor Boltzmann equation with random inputs is studied. } \section{Numerical examples} \label{sec:6} In this section, several numerical tests are shown to illustrate the validity and effectiveness of our AP scheme for the deterministic problem (Test 1) and for the model with uncertainties (Test 2). In application, people often are more interested in the solution statistics, such as the mean and standard deviation of the macroscopic physical quantities. The macroscopic quantities $\rho$, $\mu$ that stand for density and bulk velocity are defined by \begin{equation}\label{macro} \rho = \int_{\mathbb R} f(v)dv, \qquad \mu=\frac{1}{\rho}\int_{\mathbb R}f(v)v dv, \end{equation} and we denote momentum $u=\int_{\mathbb R}f(v)v dv$ in the figures. Given the gPC coefficients $f_k$ of $f$, the statistical mean, variance and standard deviation are $$ E[f]\approx f_1, \qquad\text{Var}[f]\approx\sum_{k=2}^{K} f_{k}^2, \qquad SD[f]=\sqrt{\sum_{k=2}^{K} f_{k}^2}\,. $$ The computational domain is $x\in [0, 1]$ for all the numerical tests. $i=1$ stands for the electrons and $i=2$ stands for the holes. \newpage {\bf{\large Test 1: The deterministic model}} The equilibrium boundary condition in $x$ is assumed, $$f_i(x_L, v, t)=M_i(v),\, v>0\, ; \qquad f_i(x_R, v, t)=M_i(v), \, v<0\,. $$ The initial distribution is $f_i(x,v,t=0)=M_i(v)$, for $i=1, 2$. The collision and generation-recombination kernels are given by $$\sigma_1(v,w)=\sigma_2(v,w)=2, \qquad \sigma_{I}(v,w)=\frac{1}{\sqrt{\pi}}e^{-(v-w)^2}, $$ and $$ \beta=0.9, \qquad \gamma=0.002, \qquad \Phi(x_L)=0, \qquad \Phi(x_R)=5, $$ where $\Phi(x_L), \Phi(x_R)$ are the boundary data of the potential at $x_L$, $x_R$ respectively. $$ c(x) = 1-(1-m)\left[\tanh(\frac{x-x_1}{s})-\tanh(\frac{x-x_2}{s})\right], $$ with $s=0.02$, $m=(1-0.001)/2$, $x_1=0.3$, $x_2=0.7$. The parameters are chosen similarly as \cite{JinLorenzo}. \\[20pt] {\bf\large{Test 1 a): Convergence to the equilibrium test}} Denote the discretized numerical solution $f_i(x_l, v_m, T)$ and $\rho_i(x_l, T)$ by $f_i^{l,m}$ and $\rho_i^{l}$ ($i=1,2$), where $0\leq l\leq N_x$, $0\leq m\leq N_v$, with $N_x$, $N_v$ the number of mesh points used in $x$ and $v$ directions respectively, and $T$ is the final computation time. Figure \ref{Error_AP} shows the asymptotic error in $L^1(x,v)$ norm by the distance between the distribution function $f_i$ and its corresponding local equilibrium $M_{i, \text{eq}}$ ($i=1, 2$), defined by $$ ||f_i - M_{i, \text{eq}}||_{L^1}=||f_i - \rho_i M_i ||_{L^1} = \int_{\mathbb R}\int_{\mathbb R} |f_i-\rho_i M_i|\, dxdv =\sum_{l,m}|f_{i}^{l,m} -\rho_i^{l}M_i|\, \Delta x \Delta v, $$ with $M_i$ the absolute Maxwellian given in (\ref{Max}). We report the results for $\varepsilon=10^{-3}$ and $\varepsilon=10^{-4}$. As expected, the asymptotic error is $O(\varepsilon)$ before it saturates and the numerical errors from spacial, temporal and velocity discretizations start to dominate. \begin{figure}[H] \centering \includegraphics[width=0.496\linewidth]{Error1AP.eps} \centering \includegraphics[width=0.496\linewidth]{Error2AP.eps} \caption{Test 1 a). The time evolution of $||f_i -M_{i, \text{eq}}||_{L^1}$ with respect to different $\varepsilon$. $\Delta x=0.01$, $N_v=20$, $\Delta t=2\times 10^{-6}$. } \label{Error_AP} \end{figure} \newpage{\bf\large{Test 1 b): The AP property}} Figure \ref{Test1b} demonstrates that when $\varepsilon$ is really small ($\varepsilon=10^{-5}$), the solutions of the kinetic system $\rho_1$, $\rho_2$ automatically becomes the solutions of the bipolar drift-diffusion system, known as the desired AP property. The forward Euler in time and the central difference scheme in space is used to compute the numerical approximations (with fine grids) of the drift-diffusion equations. One can observe that two sets of solutions are in good agreement. \begin{figure}[H] \centering \includegraphics[width=0.496\linewidth]{Diffusion1_LT.eps} \centering \includegraphics[width=0.496\linewidth]{Diffusion2_LT.eps} \caption{Test 1 b). Solutions at $T=0.2$. $\varepsilon=10^{-5}$, $\Delta x=0.01$, $N_v=20$, $\Delta t=2\times 10^{-6}$ for the kinetic model; and $N_v=32$, $\Delta x=5\times 10^{-3}$, $\Delta t=2\times 10^{-6}$ for the drift-diffusion system. } \label{Test1b} \end{figure} Test 2 below studies the model with random inputs and validate the efficiency and accuracy of our s-AP gPC-SG method. The stochastic collocation (SC) method \cite{XiuBook} is employed for numerical comparison with the gPC-SG method. We explain the basic idea. Let $\{ {\bf{z}}^{(j)}\}_{j=1}^{N_c} \subset I_{{\bf{z}}}$ be the set of collocation nodes and $N_c$ the number of collocation points. For each individual sample ${\bf{z}}^{(j)}$, $j=1, \cdots, N_c$, one applies the deterministic AP solver to obtain the solution at sampling points $f_j(t,x,v)=f(t,x,v,{\bf{z}}^{(j)})$, then adopts the interpolation approach to construct a gPC approximation, such as $$ f(t,x,v,{\bf{z}})=\sum_{j=1}^{N_c} f_j(t,x,v) l_j({\bf{z}}), $$ where $l_j({\bf{z}})$ depends on the construction method. The Lagrange interpolation is used here by choosing $l_j({\bf{z}}^{(i)})=\delta_{ij}$. In the collocation method, the integrals are approximated by $$\int_{I_{{\bf{z}}}} f(t,x,v,{\bf{z}})\pi({\bf{z}})d{\bf{z}}\approx\sum_{j=1}^{N_c}f(t,x,v,{\bf{z}}^{(j)})w^{(j)}, $$ where $\{w^{(j)}\}$ are the weights corresponding to the sample points $\{{\bf{z}}^{(j)}\}$ ($j=1, \cdots, N_c$) from the quadrature rule. To measure the difference in mean and standard deviation of the macroscopic quantities given in (\ref{macro}), we use $L^2$ norm in $x$ in Test 2 c), \begin{align*}\mathcal E_{\text{mean}}(t)=\left|\left|E[w^h]-E[w]\right|\right|_{L^2}, \\[4pt] \mathcal E_{\text{std}}(t)=\left|\left|SD[w^h]-SD[w]\right|\right|_{L^2}, \end{align*} where $w^h$ and $w$ are numerical solutions of gPC-SG method and reference solutions obtained by the collocation method. In Test 2 a), b), c), we will assume the random variable $z$ obeys a uniform distribution, defined on $[-1,1]$, so the Legendre gPC polynomial basis is used. We put different sources of random inputs including the random doping profile, random collision kernels and random initial data in Test 2 a), b), c) respectively. We report the results obtained for $\varepsilon=10^{-3}$ at output time $T=0.1$ in Test 2 a), b), c). \\[20pt] {\bf\large{Test 2 a): Random doping profile}} We assume a random doping profile $$ c(x,z) = \left[1-(1-m)\bigg(\tanh(\frac{x-x_1}{s})-\tanh(\frac{x-x_2}{s})\bigg)\right](1+0.5z), $$ and random collision kernels $$\sigma_1=\sigma_2=2+z, \qquad \sigma_{I}(v,w)=\frac{1}{\sqrt{\pi}}e^{-(v-w)^2}. $$ Other parameters, initial and boundary data are chosen the same as Test 1. \\[20pt] {\bf\large{Test 2 b): Random collision kernels}} Let $$\sigma_1=\sigma_2=2+0.5z, \qquad \sigma_{I}(v,w)=\frac{1}{\sqrt{\pi}}e^{-(v-w)^2}, $$ and other parameters, initial and boundary data are chosen the same as Test 1. \\[20pt] {\bf\large{Test 2 c): Random initial data}} Assume an initial data with a smooth, random perturbation around its absolute Maxwellian, $$ f_i(x,v,t=0)=\rho(z)M_i(v), \qquad \rho(z)=\sin\left[\frac{\pi}{2}(z+1)\right], $$ for $i=1,2$. Other parameters, boundary data are chosen the same as Test 1. In Figures \ref{Test2a}, \ref{Test2b} and \ref{Test2c}, the high-order stochastic collocation method with $16$ Legendre-Gauss quadrature points is used to obtain the reference solutions. A satisfactory agreement between gPC-SG solutions and the reference solutions is clearly observed. \\[20pt] {\bf\large{Test 2 d): Spectral convergence test}} In this test, the same data and parameters as Test 2 a) are used, where both the doping profile and collision kernels are random. Figure \ref{Error_K} shows a semi-log plot for the errors of mean and variance of physical quantities $\rho_1$, $\rho_2$ (density of electrons and holes) with $\varepsilon=10^{-3}$ or $\varepsilon=10^{-4}$, using different gPC orders $K$. Error plot for mean and variance of the momentum give similar results, and we omit it here. We demonstrate a fast exponential convergence with respect to an increasing $K$. The errors quickly saturate at modest gPC order $K=4$, then the errors from the temporal and spatial discretization start to dominate and contribute more than that from the gPC expansion. This result verifies the s-AP property indicating one can choose $K$ independent of $\varepsilon$. \begin{figure} [H] \centering \includegraphics[width=0.49\linewidth]{Test1a.eps} \centering \includegraphics[width=0.49\linewidth]{Test5a.eps} \centering \includegraphics[width=0.49\linewidth]{Test2a.eps} \centering \includegraphics[width=0.49\linewidth]{Test6a.eps} \centering \includegraphics[width=0.49\linewidth]{Test3a.eps} \centering \includegraphics[width=0.49\linewidth]{Test7a.eps} \centering \includegraphics[width=0.49\linewidth]{Test4a.eps} \centering \includegraphics[width=0.49\linewidth]{Test8a.eps} \caption{Test 2 a). Red solid line: reference solutions by the SC method. Blue line with circles: gPC-SG method with $K=4$. } \label{Test2a} \end{figure} \begin{figure} [H] \centering \includegraphics[width=0.49\linewidth]{Test1.eps} \centering \includegraphics[width=0.49\linewidth]{Test5.eps} \centering \includegraphics[width=0.49\linewidth]{Test2.eps} \centering \includegraphics[width=0.49\linewidth]{Test6.eps} \centering \includegraphics[width=0.49\linewidth]{Test3.eps} \centering \includegraphics[width=0.49\linewidth]{Test7.eps} \centering \includegraphics[width=0.49\linewidth]{Test4.eps} \centering \includegraphics[width=0.49\linewidth]{Test8.eps} \caption{Test 2 b). $\Delta x=0.01, \Delta t=2\times 10^{-6}, N_v=16$. Red solid line: reference solutions by the SC method with $N_c=16$. Blue line with circles: gPC-SG method with $K=4$. } \label{Test2b} \end{figure} \begin{figure} [H] \centering \includegraphics[width=0.49\linewidth]{Test1c.eps} \centering \includegraphics[width=0.49\linewidth]{Test5c.eps} \centering \includegraphics[width=0.49\linewidth]{Test2c.eps} \centering \includegraphics[width=0.49\linewidth]{Test6c.eps} \caption{Test 2 c). Red solid line: reference solutions by the SC method. Blue line with circles: gPC-SG method with $K=4$. } \label{Test2c} \end{figure} \begin{figure} [H] \centering \includegraphics[width=0.496\linewidth]{Test2_Error1.eps} \centering \includegraphics[width=0.496\linewidth]{Test2_Error2.eps} \caption{Test 2 d). Error plots for mean and standard deviation of $\rho_1$, $\rho_2$, $\varepsilon=10^{-3}$ (left) and $\varepsilon=10^{-4}$ (right). Output time is $T=0.005$. } \label{Error_K} \end{figure} \section{Conclusions} \label{sec:7} In this paper, we study the bipolar Boltzmann-Poisson model, both for the deterministic problem and the problem with uncertainties, with asymptotic behavior leading to the drift diffusion-Poisson system as the Knudsen number goes to zero. A s-AP scheme in the gPC-SG framework for the bipolar model with random inputs is designed and numerically verified its efficiency and accuracy. Using hypocoercivity of kinetic operators, we conduct a convergence rate analysis for both the analytical solution and the gPC solution for a simpler model (without electric field), and conclude their convergence rate exponentially decaying in time, under suitable assumptions. A formal proof of s-AP property and a {\it uniform} spectral convergence in the random space for the gPC-SG scheme is obtained. {\color{red}Overall, the author thinks that the development of stochastic asymptotic-preserving methods for the bipolar system with random inputs, combined with the sensitivity analysis and uniform spectral convergence with an exponential decay in time of the numerical error of the gPC-SG scheme in this project is a {\it first, new and nontrivial} contribution to this field of interest and important for potential applications. } Future work include conducting a convergence rate analysis for the full model (with the self-consistent electric field); designing and implementing AP schemes that describe the dynamics of a disparate mass binary gas or plasma system, at various time scales, based on the analysis conducted by Degond and Lucquin-Desreux in \cite{Degond1, Degond2}. {\color{blue} Here, we use a second order space discretization and a first order time splitting, similar to that proposed in \cite{JinLorenzo, JPT2}. It would be nice to improve the first order time approximation and develop a fully second order scheme, for example, by adopting the method introduced in \cite{GL}. This is also considered as a future work. } \section*{Acknowledgement} The author would like to thank Prof. Shi Jin and Prof. Irene Gamba for bringing the author's attention to work on this project and discussions. {\color{magenta} The author also appreciates both referees' comments on helping improve the quality of this paper. } \newpage \section*{Appendix} \renewcommand{\theequation}{A.\arabic{equation}} (i) We first show the following two equations needed when deriving the system (\ref{EO1}) from (\ref{EO}): \begin{equation}\label{arg1}2\int \sigma(v,w)r(w)dw=\int \sigma(v,w)f(w)dw+\int \sigma(-v,w)f(w)dw, \end{equation} and \begin{equation}\label{arg2}\int\sigma(v,w)j(w)dw=\frac{1}{2\epsilon}\left[\int \sigma(v,w)f(w)dw-\int \sigma(-v,w)f(w)dw\right], \end{equation} for $v>0$. Denote $R(v)=\int \sigma(v,w)r(w)dw$, then \begin{align} &\displaystyle R(v)=\int_{w>0}\sigma(v,w)r(w)dw+\int_{w<0}\sigma(v,w)r(w)dw =\int_{w>0}\sigma(v,w)r(w)dw+\int_{w>0}\sigma(v,-w)r(w)dw \notag\\[4pt] &\label{R}\displaystyle\qquad=\frac{1}{2}\int_{w>0}\sigma(v,w)\left[f(w)+f(-w)\right]dw+\frac{1}{2}\int_{w>0}\sigma(v,-w)\left[f(w)+f(-w)\right]dw. \end{align} For $v>0$, RHS of (\ref{arg1}) is given by \begin{align} &\displaystyle\qquad\int\sigma(v,w)f(w)dw+\int\sigma(-v,w)f(w)dw \notag\\[4pt] &\displaystyle\label{RHS1}=\int_{w>0}\sigma(v,w)f(w)dw+\int_{w<0}\sigma(v,w)f(w)dw +\int_{w>0}\sigma(-v,w)f(w)dw+\int_{w<0}\sigma(-v,w)f(w)dw \\[4pt] &\displaystyle\label{RHS2}=\int_{w>0}\sigma(v,w)f(w)dw+\int_{w>0}\sigma(v,-w)f(-w)dw +\int_{w>0}\sigma(v,-w)f(w)dw+\int_{w>0}\sigma(v,w)f(-w)dw \\[4pt] &\displaystyle \notag = 2 R(v). \end{align} The last step is obvious from (\ref{R}). To check the second equality, we use the change of variable $w'=-w$; rotationally invariance and the symmetry of $\sigma$. \noindent The third term of (\ref{RHS1}) equals to \begin{align*} &\qquad\int_{w>0}\sigma(-v,w)f(w)dw=\int_{w'<0}\sigma(-v,-w')f(-w')dw'=\int_{w'<0}\sigma(v,w')f(-w')dw' \\[2pt] &=\int_{w'<0}\sigma(w',v)f(-w')dw'=\int_{w>0}\sigma(-w,v)f(w)dw=\int_{w>0}\sigma(v,-w)f(w)dw, \end{align*} which is the third term of (\ref{RHS2}). The fourth term of (\ref{RHS1}) equals to $$\int_{w<0}\sigma(-v,w)f(w)dw=\int_{w'>0}\sigma(-v,-w')f(-w')dw'=\int_{w>0}\sigma(v,w)f(-w)dw, $$ which is the fourth term of (\ref{RHS2}). It is obvious that the first and second term of (\ref{RHS1}) equal to (\ref{RHS2}), respectively. Thus we proved (\ref{arg1}). Similarly, one can prove (\ref{arg2}), then we have $$\int\sigma(v,w)j(w)dw=0, $$ due to the odd function $j$. \\[10pt] (ii) We now derive the definitions for the operators $I_{\text{i,plus}}$, $ I_{\text{i,minus}}$. For $v>0$, one has \begin{align} &\displaystyle \label{eqn0}\quad\frac{1}{2} \left[I_1(f_1,f_2)(v)+I_1(f_1,f_2)(-v)\right] \\[4pt] &\displaystyle \label{eqn1}=\frac{1}{2}\int \left( \sigma_I(v,w) + \sigma_I(-v,w)\right)dw\, M_1(v) \\[4pt] &\displaystyle\quad-\int \sigma_I(v,w)r_2(w)M_2(w)dw\, r_1(v) -\epsilon \int \sigma_I(v,w)j_2(w)M_2(w)dw\, j_1(v) \notag\\[4pt] &\displaystyle =\frac{1}{2}\int \left( \sigma_I(v,w) + \sigma_I(-v,w)\right)dw\, M_1(v)-\int \sigma_I(v,w)r_2(w)M_2(w)dw\, r_1(v) \notag\\[4pt] &\displaystyle \notag := I_{\text{1,plus}}(r_1,r_2), \end{align} where $j$ being an odd function is used in the second equality. To derive (\ref{eqn1}) from (\ref{eqn0}), note that \begin{align*} &\displaystyle\int \sigma_I(v,w)f_2(w)M_2(w)dw\, f_1(v) + \int\sigma_I(-v,w)f_2(w)M_2(w)dw\, f_1(-v) \\[4pt] &\displaystyle=\left(\int_{w>0}\sigma_I(v,w)f_2(w)M_2(w)dw +\int_{w>0}\sigma_I(v,-w)f_2(-w)M_2(w)dw \right) \left(r_1(v)+\epsilon j_1(v)\right) \\[4pt] &\displaystyle\quad+ \left(\int_{w>0}\sigma_I(v,-w)f_2(w)M_2(w)dw +\int_{w>0}\sigma_I(v,w)f_2(-w)M_2(w)dw\right) \left(r_1(v)-\epsilon j_1(v)\right), \end{align*} and \begin{align*} &\quad\displaystyle\int\sigma_I(v,w)r_2(w)M_2(w)dw \\[4pt] &=\displaystyle\frac{1}{2}\int_{w>0}\sigma_I(v,w)\left(f_2(w)+f_2(-w)\right)M_2(w)dw +\frac{1}{2}\int_{w>0}\sigma_I(v,-w)\left(f_2(w)+f_2(-w)\right)M_2(w)dw, \end{align*} and also \begin{align*} &\quad\displaystyle\int\sigma_I(v,w)j_2(w)M_2(w)dw \\[4pt] &=\displaystyle\frac{1}{2}\int_{w>0}\sigma_I(v,w)\left(f_2(w)-f_2(-w)\right)M_2(w)dw -\frac{1}{2}\int_{w>0}\sigma_I(v,-w)\left(f_2(w)-f_2(-w)\right)M_2(w)dw, \end{align*} thus it is easy to see that (\ref{eqn1}) equals to (\ref{eqn0}). We derived the definition for $I_{1,\text{plus}}$, which can be written as a function of $r_1$ and $r_2$. Similarly for $I_{\text{1,minus}}$, one gets \begin{align} &\displaystyle\quad\frac{1}{2} \left[I_1(f_1,f_2)(v)-I_1(f_1,f_2)(-v)\right] \notag \\[4pt] &\displaystyle=\frac{1}{2}\int \left(\sigma_I(v,w) -\sigma_I(-v,w)\right)dw M_1(v) \notag\\[4pt] &\displaystyle\quad-\epsilon\int \sigma_I(v,w)j_2(w)M_2(w)dw r_1(v) - \epsilon \int \sigma_I(v,w)r_2(w)M_2(w)dw\, j_1(v) \notag \\[4pt] &\displaystyle=\frac{1}{2}\int \left(\sigma_I(v,w) -\sigma_I(-v,w)\right)dw M_1(v)- \epsilon \int \sigma_I(v,w)r_2(w)M_2(w)dw\, j_1(v) \notag\\[4pt] &\displaystyle\notag:= I_{\text{1,minus}}(r_2, j_1). \end{align} $I_{\text{2,plus}}$, $I_{\text{2,minus}}$ can be similarly obtained, and we omit the details. The definitions of these four operators are given in equations (\ref{I_DEF}). \bibliographystyle{siam}
1,108,101,565,899
arxiv
\section{Introduction} Traversable wormholes are solutions to the Einstein field equation that violate the classical energy conditions and are primarily useful as ``gedanken-experiments'' and as a theoretician's probe of the foundations of General Relativity (GR). They are obtained by solving the Einstein field equation in the reverse direction, namely, one first considers an interesting and exotic spacetime metric, then finds the matter source responsible for the respective geometry. It is interesting to note that they allow ``effective'' superluminal travel, although the speed of light is not surpassed {\it locally}, and generate closed timelike curves, with the associated causality violations. In this rapporteur article, we consider a brief historical review ranging from the Flamm-Einstein-Rosen bridge,\cite{Flamm,Einstein-Rosen} to the geon wormhole-like structure obtained by Wheeler in 1955,\cite{geons} to the modern renaissance of wormhole physics.\cite{Morris,Visser} \subsection{The Einstein-Rosen bridge} Wormhole physics can originally be traced back to Flamm in 1916,\cite{Flamm} when he analyzed the then recently discovered Schwarzschild solution. One finds next that wormhole-type solutions were considered, in 1935, by Einstein and Rosen (ER),\cite{Einstein-Rosen} where they constructed an elementary particle model represented by a ``bridge'' connecting two identical sheets. This mathematical representation of physical space being connected by a wormhole-type solution was denoted an ``Einstein-Rosen bridge''. Indeed, ER were attempting to build a geometrical model of a physical elementary ``particle'' that is finite and singularity-free. They based their discussion in terms of neutral and quasicharged ``bridges'' across a double-sheeted physical space. However, these can can easily be generalized.\cite{Visser} It is important to emphasize that at the time ER were writing, the notions of ``coordinate singularity'' and ``physical singularity'' were not cleanly separated: It was supposed that the event horizon was the singularity. In this section we follow Ref. \refcite{Visser} closely. The neutral Einstein-Rosen bridge is an observation that a suitable coordinate change seems to make the Schwarzschild (coordinate) singularity disappear, at $r=2M$. More specifically, ER discovered that certain coordinate systems naturally cover only two asymptotically flat regions of the maximally extended Schwarzschild spacetime. To see this, consider the ordinary Schwarzschild geometry: \begin{equation} ds^2=-\left( 1-\frac{2M}{r} \right)\,dt^2 + \left( 1-\frac{2M}{r} \right)^{-1}\,dr^2 + r^2 d\Omega^2 \,, \end{equation} followed by a coordinate change $u^2=r-2M$, so that the line element is represented by the ER form: \begin{equation} ds^2=-\frac{u^2}{u^2+2M}\,dt^2 + 4\left(u^2+2M \right)\,du^2 + \left(u^2+2M \right)^2\, d\Omega^2 \,, \end{equation} with $u\in (-\infty, + \infty)$. Note that this coordinate change discards the region containing the curvature singularity $r \in [0,2M)$. The region near $u=0$ is interpreted as a ``bridge'' connecting the asymptotically flat region near $u= + \infty$ with the asymptotically flat region near $u=-\infty$.\cite{Einstein-Rosen} To justify the ``bridge'' appellation, consider a spherical surface, with constant $u$, so that the area of the surface is given by $A(u) = 4\pi(2M+u^2 )^2$. The latter possesses a minimum at $u=0$, with $A(0)= 4\pi(2M)^2$, which in modern terminology is defined as the ``throat'', while the nearby region is denoted the bridge, or the ``wormhole''. Thus, the neutral ``Einstein-Rosen'' bridge, or the ``Schwarzschild wormhole'', is identical to a part of the maximally extended Schwarzschild geometry. However, this wormhole is non-traversable, as the throat will pinch off before an observer may traverse the throat.\cite{Visser} Relative to the quasi-charged Einstein-Rosen bridge, one starts off with the Reissner-Nordstrom metric: \begin{equation} ds^2=-\left( 1-\frac{2M}{r} +\frac{Q^2}{r^2} \right)\,dt^2 + \left( 1-\frac{2M}{r} +\frac{Q^2}{r^2} \right)^{-1}\,dr^2 + r^2 d\Omega^2 \,. \end{equation} In order to obtain the bridge construction, ER reversed the sign of the electromagnetic stress-energy tensor, which implies a negative energy-density. \cite{Einstein-Rosen} Considering $M=0$, with a coordinate change $u^2= r^2-\varepsilon^2/2$, results in: \begin{equation} ds^2=-\frac{u^2}{u^2+\varepsilon^2/2}\,dt^2 + 4\left(u^2++\varepsilon^2/2 \right)^2\,du^2 + \left(u^2+\varepsilon^2/2 \right)^2\, d\Omega^2 \,, \end{equation} which is indeed a very peculiar geometry, as it represents a massless, quasicharged object, with a negative energy density, possessing an horizon at $r= \varepsilon$ or $u=0$. In fact, this was the object that ER wished to interpret as an ``electron''. Thus, the key ingredient of the bridge construction is the existence of an event horizon. The ER bridge is a coordinate artifact arising from choosing a coordinate patch, which is defined to double-cover the asymptotically flat region exterior to the black hole event horizon. One may easily consider generalizations of these contructions (see Ref. \refcite{Visser} for more details). \subsection{Geons and spacetime foam} After the pioneering work by Einstein and Rosen, in 1935, the field lay dormant for approximately two decades. In 1955, John Wheeler was beginning to be interested in topological issues in GR.\cite{geons} It is interesting to note that Wheeler considered a multiply-connected spacetime, where two widely separated regions were connected by a tunnel, which was denoted by a ``geon''. The ``geon'' concept denoted a ``gravitational-electromagnetic entity'', which are hypothesized solutions to the coupled Einstein-Maxwell field equations. In modern language the geon may be considered as a hypothetical ``unstable gravitational-electromagnetic quasisoliton''.\cite{Visser} Furthermore, Wheeler's concept can be used as the basis for building a model of nonzero charge with an everywhere ``zero charge density'', where one of the tunnel mouths will ``send forth lines of force into the space, and appear to have a charge''. While the other mouth ``must manifest an equal and opposite charge''.\cite{geons} Essentially, two routes of research were then available. First, intensive research was dedicated to the classical dynamics of these tunnel configurations, assuming their possible existence. Second, the investigation of the quantum gravitational processes that might give rise to such configurations were explored. This led Wheeler to propose the concept of ``spacetime foam''.\cite{Visser,wheeler1} In 1957, Misner and Wheeler presented an extensive analysis, where Riemannian geometry of manifolds of nontrivial topology was investigated with an ambitious view to explaining all of physics.\cite{Misner:1957mt} Indeed, this work was one of the first uses of abstract topology, homology, cohomology, and differential geometry in physics,\cite{Visser} and their point of view is best summarised by their phrase: ``Physics is geometry''. This is also the first paper\cite{Misner:1957mt} that introduces the term ``wormhole''. It is interesting to note that Misner and Wheeler considered that the existing well-established ``already unified classical theory'' allows one to describe in terms of empty curved space,\cite{Misner:1957mt} the following concepts: {\it gravitation without gravitation}; {\it electromagnetism without electromagnetism}; {\it charge without charge}; and {\it mass without mass} (where around the mouth of the ``wormhole'' lies a concentration of electromagnetic energy that gives mass to this region of space). The objective of Misner and Wheeler was essentially to use the source-free Maxwell equations, coupled to Einstein gravity, in the context of nontrivial topology, to build models for classical electrical charges and all other particle-like entities in classical physics.\cite{Visser} It is now known that this geon solutions are unstable, so that classically the tunnels will collapse to form black holes, which inevitably hides the interesting multiply-connected topology behind event horizons. Wheeler also noted the overwhelming importance of the Planck scale in gravitational physics, where at distances below the Planck length quantum fluctuations are considered to be extremely large, so that linearized theory breaks down and the quantum physics of full nonlinear Einstein gravity must be faced.\cite{Visser} Once the metric fluctuations become nonlinear and strongly interacting, one may expect that spacetime is flooded with a ``foamlike'' structure. In this context, the phrase ``spacetime foam'' is often referred to Wheeler's suggestion that the geometry and topology of space might be constantly fluctuating. An outstanding question is if these fluctuation may induce a change in topology, in order to form microscopic wormholes. \subsection{Modern renaissance of wormhole physics} After the geon solutions devised by Wheeler and Misner, there is a thirty year gap between their original work and the 1988 Morris-Thorne renaissance of wormhole physics.\cite{Morris} In fact, despite the fact that considerable effort was invested in attempting to understand the ``spacetime foam'' picture and the ``geon'' concept, during this period the geonlike-wormhole structures seem to have been considered a mere curiosity and were inevitably relegated to a backstage. However, isolated pieces of work did appear, such as the Homer Ellis' drainhole\cite{homerellis,homerellis2} concept and Bronnikov's tunnel-like solutions,\cite{bronikovWH} in the 1970s. It is only in 1988 that a full-fledged renaissance of wormhole physics took place, through the seminal paper by Morris and Thorne.\cite{Morris} As effective technology, traversable wormholes are greatly lacking, and the fundamental approach was to ask the question: ``What do the laws of physics permit?'' In finding wormhole solutions, one adopts the reverse philosophy in solving the Einstein field equation, namely, one first considers an interesting and exotic spacetime metric, then finds the matter source responsible for the respective geometry. In this manner, it was found that some of these solutions possess a peculiar property, namely ``exotic matter'', involving a stress-energy tensor that violates the null energy condition (NEC). These geometries also allow closed timelike curves, with the respective causality violations.\cite{Visser,Morris:1988tu,Lobo:2010sz} Another interesting feature of these spacetimes is that they allow ``effective'' superluminal travel, although, locally, the speed of light is not surpassed. These solutions are primarily useful as ``gedanken-experiments'' and as a theoretician's probe of the foundations of GR, and will be extensively reviewed throughout this review paper.\cite{Visser,Lobo:2007zb} \section{Wormhole physics} \subsection{Static geometry: Spacetime metric and field equations} Consider the following static and spherically symmetric wormhole solution \begin{equation} ds^2=-e ^{2\Phi(r)} \,dt^2+\frac{dr^2}{1- b(r)/r}+r^2 \,(d\theta ^2+\sin ^2{\theta} \, d\phi ^2) \,, \label{metricwormhole} \end{equation} where $\Phi(r)$ and $b(r)$ are arbitrary functions of the radial coordinate $r$. $\Phi(r)$ is denoted the redshift function, for it is related to the gravitational redshift, and $b(r)$ is denoted the shape function, as can be shown by embedding diagrams, it determines the shape of the wormhole.\cite{Morris} For the wormhole to be traversable it must have no horizons, which implies that $g_{tt}=-e^{2\Phi(r)}\neq 0$, so that $\Phi(r)$ must be finite everywhere. The coordinate $r$ is non-monotonic in that it decreases from $+\infty$ to a minimum value $r_0$, representing the location of the throat of the wormhole, where $b(r_0)=r_0$, and then it increases from $r_0$ to $+\infty$. Although the metric coefficient $g_{rr}$ becomes divergent at the throat, which is signalled by the coordinate singularity, the proper radial distance $ l(r)=\pm\,\int_{r_0}^r{[1-b(r)/r]^{-1/2}}\,dr$ is required to be finite everywhere. The proper distance decreases from $l=+\infty$, in the upper universe, to $l=0$ at the throat, and then from zero to $-\infty$ in the lower universe. Using the Einstein field equation, $G_{\mu\nu}=8\pi \,T_{\mu\nu}$ (with $c=G=1$), we obtain the following stress-energy scenario \begin{eqnarray} \rho(r)&=&\frac{1}{8\pi} \;\frac{b'}{r^2} \label{rhoWH}\,,\\ p_r(r)&=&\frac{1}{8\pi} \left[2 \left(1-\frac{b}{r} \right) \frac{\Phi'}{r} -\frac{b}{r^3}\right] \label{prWH}\,,\\ p_t(r)&=&\frac{1}{8\pi} \left(1-\frac{b}{r}\right)\left[\Phi ''+ (\Phi')^2- \frac{b'r-b}{2r(r-b)}\Phi' -\frac{b'r-b}{2r^2(r-b)}+\frac{\Phi'}{r} \right] \label{ptWH}\,, \end{eqnarray} where $\rho(r)$ is the energy density, $p_r(r)$ is the radial pressure, and $p_t(r)$ is the lateral pressure measured in the orthogonal direction to the radial direction. Using the conservation of the stress-energy tensor, $T^{\mu\nu}{}_{;\nu}=0$, we obtain the following equation \begin{equation} p_r'=\frac{2}{r}\,(p_t-p_r)-(\rho +p_r)\,\Phi ' \label{prderivative} \,, \end{equation} which can be interpreted as the relativistic Euler equation, or the hydrostatic equation of equilibrium for the material threading the wormhole. Note that one now has three equations with five unknown functions of the radial coordinate. Several strategies to solve these equations are available, for instance, one can impose an equation of state,\cite{Sushkov:2005kj,Lobo:2005us,Lobo:2005yv,Lobo:2005vc,Lobo:2006ue} and consider a specific choice of the shape function or of the redshift function. To be a solution of a wormhole, one needs to impose that the throat flares out. Using embedding diagrams,\cite{Morris,Misner} this flaring-out condition entails the following condition \begin{equation} \frac{d^2r}{dz^2}=\frac{b-b'r}{2b^2}>0 \label{flareout}\,. \end{equation} At the throat, we verify that the shape function satisfies the condition $b'(r_0)<1$. Note that the above treatment has the drawback of being coordinate dependent. For a covariant treatment, we refer to the analysis outlined in Refs. [\refcite{hochvisserPRL98,Hochberg1}]. We will see below that this condition plays a fundamental role in the analysis of the violation of the energy conditions. \subsection{The violations of the energy conditions} \subsubsection{Classical pointwise energy conditions} The NEC asserts that for {\it any} null vector $k^{\mu}$, we have $T_{\mu\nu}k^{\mu}k^{\nu}\geq 0$. For a diagonal stress-energy tensor, i.e., $T_{\nu }^{\mu }=\mathrm{diag} \left[-\rho(r),\; p_{r}( r), \; p_{t}(r),\; p_{t}(r)\right]$, this implies $\rho+p_{r}\geq 0$ and $\rho+p_{t}\geq 0$. Using the Einstein field equations (\ref{rhoWH}) and (\ref{prWH}), evaluated at the throat $r_0$, and taking into account the finite character of the redshift function so that $(1-b/r)\Phi'|_{r_0} \rightarrow 0$, the flaring-out condition (\ref{flareout}) imposes the condition $(\rho+p_r)|_{r_0}<0$. This violates the NEC. In fact, it implies the violation of all the pointwise energy conditions.\cite{Visser} Although classical forms of matter are believed to obey the energy conditions, it is a well-known fact that they are violated by certain quantum fields, amongst which we may refer to the Casimir effect. Thus, the flaring-out condition (\ref{flareout}) entails the violation of the NEC, at the throat. Note that negative energy densities are not essential, but negative pressures at the throat, $p_r(r_0)=-1/(8\pi r_0^2)$, are necessary to sustain the wormhole throat. As the violation of the energy conditions is a problematic issue, it is useful to minimize this violation. Several approaches have been explored extensively in the literature, namely, rotating solutions,\cite{teo} evolving wormhole spacetimes,\cite{kar1,kar2,Arellano:2006ex} thin-shell wormholes using the cut-and-paste procedure,\cite{Poisson:1995sv,Lobo:2003xd,Eiroa:2003wp,Lobo:2004rp,Garcia:2011aa} and modified theories of gravity.\cite{Harko:2013yb,Garcia:2010xb,MontelongoGarcia:2010xd,Capozziello:2012hr} It is interesting to note that the violations of the pointwise energy conditions led to the averaging of the energy conditions over timelike or null geodesics.\cite{Tipler} The averaged energy conditions permit localized violations of the energy conditions, as long as on average the energy conditions hold when integrated along timelike or null geodesics. Now, as the averaged energy conditions involve averaging over a line integral, with dimensions (mass)/(area), not a volume integral, they do not provide useful information regarding the ``total amount'' of energy-condition violating matter. In order to overcome this shortcoming, the ``volume integral quantifier'' was proposed.\cite{VKarDad} Thus, the amount of energy condition violations is then the extent that these integrals become negative. \subsubsection{Quantum Inequalities} A new set of energy constraints was introduced by Ford and Roman in 1995, \cite{Ford1} denoted by the Quantum Inequalities (QI). Contrary to the averaged energy conditions, one does not average over the entire wordline of the observer, but weights the integral with a sampling function of characteristic width. Essentially, the inequality limits the magnitude of the negative energy violations and the time for which they are allowed to exist. Ford and Roman applied the analysis to static and spherically symmetric Morris-Thorne wormholes and concluded that either the wormhole possesses a throat size which is only slightly larger than the Planck length, or there are large discrepancies in the length scales which characterize the geometry of the wormhole.\cite{Ford2} Due to these results, Ford and Roman argued that the existence of macroscopic traversable wormholes is very improbable. But, there are a series of considerations that can be applied to the QI. Firstly, the QI is only of interest if one is relying on quantum field theory to provide the exotic matter to support the wormhole throat. But there are classical systems (non-minimally coupled scalar fields) that violate the null and the weak energy conditions,\cite{barcelovisser1} while presenting plausible results when applying the QI. Secondly, even if one relies on quantum field theory to provide exotic matter, the QI does not rule out the existence of wormholes, although they do place serious constraints on the geometry. \subsubsection{Semi-classical and nonlinear energy conditions} Building on the Ford-Roman QIs, Martin-Moruno and Visser propsed classical and quantum versions of a ``flux energy condition'' (FEC and QFEC),\cite{Martin-Moruno:2013sfa} based on the notion of constraining the possible fluxes measured by timelike observers. The naive classical FEC were shown to be satisfied in certain situations, and even for some quantum vacuum states, while its quantum analogue (the QFEC) was satisfied under a rather wide range of conditions. Furthermore, Martin-Moruno and Visser presented and developed several nonlinear energy conditions suitable for usage in the semiclassical regime. More specifically, they considered the FEC, and novel concepts such the ``trace-of-square'' (TOSEC) and ``determinant'' (DETEC) energy conditions, and showed that these nonlinear energy conditions behave much better than the classical linear energy conditions in the presence of semiclassical quantum effects.\cite{Martin-Moruno:2013wfa} Moreover, whereas the quantum extensions of these nonlinear energy conditions seem to be quite widely satisfied as one enters the quantum realm, analogous quantum extensions are generally not useful for the linear classical energy conditions.\cite{Martin-Moruno:2015ena} \subsubsection{Buchert averaging and energy conditions} An interesting application of the energy conditions is to the Buchert averaging.\cite{Visser:2015mur} Note that a key feature of Buchert averaging is the realization that, either with spatial averaging or averaging over a suitably defined ensemble of spacetimes, the average of the Einstein tensor is typically not equal to the Einstein tensor of the average spacetime. The discrepancy can be viewed as an ``effective'' stress-energy, one that often violates the classical energy conditions. A particularly attractive example of this phenomenon arises when one considers spatial averages in a conformal-FLRW (CFLRW) cosmology or the ensemble average over conformal deformations of a specific FLRW geometry. These CFLRW-based models are particularly tractable, and attractive for observational reasons, as the CMB is not distorted. Furthermore, it is possible to prove some rigorous theorems regarding the interplay between Buchert averaging and the classical energy conditions. We refer the reader to Ref. \refcite{Visser:2015mur} for more details. \subsubsection{Two roads to the null energy condition} The null energy condition has sweeping consequences in GR. However, it has been argued that it has been misunderstood as a property of matter,\cite{Parikh:2015wae} when in fact it is better viewed as a constraint on spacetime geometry. In fact, the geometric formulation of the NEC was derived from worldsheet string theory, where it is simply the Virasoro condition for a closed string moving in a curved background. Furthermore, it was shown that there is an entirely different thermodynamic origin of the NEC, if gravity emerges from some holographic theory. Thus, rather than being an incidental property of matter, it was argued in Ref. \refcite{Parikh:2015wae} that the validity of the NEC appears to hint at the origins of gravity. \subsubsection{Probing faster than light travel and chronology protection with superluminal warp drives} It is interesting that wormholes are not the only geometries that violate the energy conditions, and it was shown that superluminal spacetimes, such as the {\it warp drive}, also violate the weak energy condition (WEC).\cite{Alcubierre:1994tu,Lobo:2004wq} Indeed, while GR ranks undoubtedly among the best theories of physics ever developed, it is also among those with the most striking implications. In particular, GR admits solutions which allow faster than light motion, and consequently effective time travel. It was shown that a ``pre-emptive'' chronology protection mechanism destabilises superluminal warp drives via quantum matter back-reaction, and therefore forbids even the conceptual possibility to use these solutions for building a time machine.\cite{Finazzi:2009jb,Liberati:2016brg} This result was considered both in standard quantum field theory in curved spacetime as well as in the case of a quantum field theory with Lorentz invariance breakdown at high energies. \subsection{Evolving wormholes and flashes of WEC violations} Its interesting to note that evolving wormhole spacetimes may involve ``flashes'' of WEC violation, where the matter threading the wormhole violates the energy conditions for small intervals of time. One can consider specific cases, in which the intervals of WEC violation can be chosen to be very small. For instance, consider the following line element of a wormhole in a cosmological background given by \begin{equation}\label{evolvingWHmetric} ds^{2} = \Omega ^{2}(t) \left[- e ^{2\Phi(r)}\, dt^{2} + {{dr^{2}}\over {1-kr^2- \frac{b(r)}{r}}} + r^2 \,\left(d\theta ^2+\sin ^2{\theta} \, d\phi ^2 \right) \right]\,, \end{equation} where $\Omega ^{2}(t)$ is the conformal factor, which is finite and positive definite throughout the domain of $t$. It is also possible to write the metric (\ref{evolvingWHmetric}) using ``physical time'' instead of ``conformal time'', by replacing $t$ by $\tau = \int \Omega (t)dt$ and therefore $\Omega (t)$ by $R(\tau)$, where the latter is the functional form of the metric in the $\tau$ coordinate.\cite{kar1,kar2} When the shape function and the redshift function vanish, $b(r)\rightarrow 0$ and $\Phi(r)\rightarrow 0$, respectively, the metric (\ref{evolvingWHmetric}) becomes the FRW metric. As $\Omega(t)\rightarrow {\rm const}$ and $k\rightarrow 0$, it approaches the static wormhole metric (\ref{metricwormhole}). One can easily use the Raychaudhuri equation to verify the non-violation of the WEC for dynamic wormholes.\cite{kar2} Recall that the Raychaudhuri equation for a congruence of null rays is given by \begin{equation} {\frac{d\hat{\theta}}{d\lambda}}=-\frac{1}{2}{\hat{\theta}}^{2} -R_{\mu\nu}\,{k}^{\mu}\, {k}^{\nu} -2{\hat{\sigma}}^{2} +2\hat{\omega}^2 \,, \label{nullRaychaud} \end{equation} where $\hat{\theta}$ is the expansion of the congruence of null rays; $\hat{\sigma}$ and $\hat{\omega}$ are the shear and vorticity of the geodesic bundle, respectively, which are zero for this case. From the Einstein field equation we have $R_{\mu\nu}\, k^{\mu}\,k^{\nu}=T_{\mu\nu}\,k^{\mu}\,k^{\nu}$ for all null $k^{\mu}$. So if $T_{\mu\nu}\,k^{\mu}\,k^{\nu} \ge 0$, we have $\hat{\theta}^{-1}\ge {\hat{\theta}}_{0}^{-1}+\frac{\lambda}{2}$ by (\ref{nullRaychaud}). Thus if $\hat{\theta}$ is negative anywhere it tends to $-\infty$ at a finite value of the affine parameter, $\lambda$, i.e., the bundle must necessarily come to a focus. For the case of a static wormhole, the expansion $\hat{\theta}$ is given by \begin{equation} \hat{\theta} = \frac{2\beta}{r} \frac{dr}{dl} \,, \end{equation} where $\beta$ is a positive quantity. For $l<0$, $dr/dl$ is negative then so is $\hat{\theta}$ negative. However, $\hat{\theta} \rightarrow -\infty$ only if $r \rightarrow 0$, since $dr/dl$ is always finite. Therefore, either the wormhole has a vanishing throat radius, which renders it as non-traversable, or the WEC is violated. For the evolving case, the expansion is given by\cite{kar2} \begin{equation} \hat{\theta} = \frac{2\beta}{R(\tau)}\left (\frac{dR(\tau)}{d\tau}+\frac{1}{r}\,\frac{dr}{dl} \right) \,. \end{equation} Real time $\tau$ has now been used, and as long as $dR/d\tau >(1/r)|dr/dl|$, i.e., the wormhole is opening out fast enough, so that $\hat{\theta}$ is never negative. Thus, the fact that the bundle does not focus no longer implies that the WEC is satisfied. We refer the reader to \refcite{kar2} for specific solutions. \subsection{Rotating wormhole solutions} \subsubsection{The Teo solution} Consider the stationary and axially symmetric $(3+1)-$dimensional spacetime. The latter possesses a time-like Killing vector field, which generates invariant time translations, and a spacelike Killing vector field, which generates invariant rotations with respect to the angular coordinate $\phi$. Consider the following metric\cite{teo} \begin{equation}\label{3rwh} ds^2=-N^2dt^2+e^{\mu}\,dr^2+r^2K^2\left[d\theta^2 +\sin^2\theta(d\phi-\omega\,dt)^2\right]\,, \end{equation} where $N$, $K$, $\omega$ and $\mu$ are functions of $r$ and $\theta$. $\omega(r,\theta)$ may be interpreted as the angular velocity $ d\phi/ dt$ of a particle that falls freely from infinity to the point $(r,\theta)$. Consider the definition $e^{-\mu(r,\theta)}=1-b(r,\theta)/r$, which can be used to describe a traversable wormhole. Assume that $K(r,\theta)$ is a positive, nondecreasing function of $r$ that determines the proper radial distance $R$, i.e., $R\equiv rK$ and $R_r>0$. We shall adopt the notation that the subscripts $_r$ and $_{\theta}$ denote the derivatives in order of $r$ and ${\theta}$, respectively.\cite{teo} Note that an event horizon appears whenever $N=0$.\cite{teo} The regularity of the functions $N$, $b$ and $K$ are imposed, which implies that their $\theta$ derivatives vanish on the rotation axis, $\theta=0,\,\pi$, to ensure a non-singular behavior of the metric on the rotation axis. The metric (\ref{3rwh}) reduces to the Morris-Thorne spacetime metric (\ref{metricwormhole}) in the limit of zero rotation and spherical symmetry, $N(r,\theta)\rightarrow{\rm e}^{\Phi(r)}$, $b(r,\theta)\rightarrow b(r)$, $K(r,\theta)\rightarrow 1$ and $\omega(r,\theta)\rightarrow 0$. In analogy with the Morris-Thorne case, $b(r_0)=r_0$ is identified as the wormhole throat, and the factors $N$, $K$ and $\omega$ are assumed to be well-behaved at the throat. Thus, one may conclude that the metric (\ref{3rwh}) describes a rotating wormhole geometry, with an angular velocity $\omega$. The factor $K$ determines the proper radial distance. $N$ is the analog of the redshift function in the Morris-Thorne wormhole and is finite and nonzero to ensure that there are no event horizons or curvature singularities. $b$ is the shape function which satisfies $b\leq r$; it is independent of $\theta$ at the throat, i.e., $b_\theta=0$; and obeys the flaring out condition $b_r<1$. The NEC at the throat is given by \begin{eqnarray}\label{NEC} 8\pi\,T_{\hat{\mu} \hat{\nu}}k^{\hat{\mu}} k^{\hat{\nu}}={\rm e}^{-\mu}\mu_r{(rK)_r\over rK} -{\omega_\theta{}^2\sin^2\theta\over2N^2} +{(N_\theta\sin\theta)_\theta\over(rK)^2N\sin\theta}\,. \end{eqnarray} Rather than reproduce the analysis here, we refer the reader to Ref. \refcite{teo}, where it was shown that the NEC is violated in certain regions, and is satisfied in others. Thus, it is possible for an infalling observer to move around the throat, and avoid the exotic matter supporting the wormhole. However, it is important to emphasize that one cannot avoid the use of exotic matter altogether. \subsubsection{Properties of rotating wormholes supported by phantom scalar fields and stability} Recently, rotating wormhole solutions in GR, supported by phantom scalar fields, were further presented.\cite{Kleihaus:2014dla} It was shown that in four space--time dimensions this family of solutions evolves from the static Ellis wormhole, when a finite angular velocity is imposed at the throat. At a maximal value of the rotational velocity, the family of wormhole solutions ends in an extremal Kerr solution. The properites of these rotating wormhole solutions including their mass, angular momentum, quadrupole moment, and ergosphere were extensively discussed. In five space--time dimensions rotating wormholes with equal magnitude angular momenta were also presented. Applying stability analysis the altter, it was shown that the unstable mode of the Ellis solutions disappears, when the angular momentum of the wormhole is sufficiently high.\cite{Kleihaus:2014dla} It is also interesting to note that for the static and spherically symmetric case, wormholes were also considered by two scalar fields.\cite{Dzhunushaliev:2015sla} These provide the possibility to obtain topologically trivial solutions in contrast with wormholes created by one scalar field. Wormholes with one scalar field are topologically non-trivial because the scalar field possesses a kink-like behaviour. The solutions with different potentials were considered, and furthermore, the profile of mass vs parameters of scalar fields was obtained.\cite{Dzhunushaliev:2015sla} \subsubsection{Novel cosmic censorship from the Kerr-like wormhole} The Kerr-like wormhole with phantom matter as a source, was also analyzed.\cite{Miranda:2013gqa} It was show to possess three parameters, namely, mass, angular momentum and the scalar field charge. The wormhole presented was shown to have a naked ring singularity, otherwise it is regular everywhere. The main feature of the solution is that the throat lies on a sphere of the same radius as the ring singularity, and impedes an observer to see or to reach the singularity, so that it essentially behaves as an anti-horizon. The geodesics of the wormhole were also analyzed, and it was found that an observer can traverse without consequences, however, the equator presents an infinite potential barrier which prevents the observer of reaching the throat. Furthermore, this wormhole contains a ring singularity only on the south hemisphere, without an horizon, but as in the Kerr solution, there is no manner to see the singularity. Thus, it was argued that this solution is a new kind of cosmic censorship.\cite{Miranda:2013gqa} \subsection{Thin-shell wormholes} Consider two distinct spacetime manifolds, ${\cal M_+}$ and ${\cal M_-}$, with metrics given by $g_{\mu \nu}^+(x^{\mu}_+)$ and $g_{\mu \nu}^-(x^{\mu}_-)$, in terms of independently defined coordinate systems $x^{\mu}_+$ and $x^{\mu}_-$, respectively. The manifolds are bounded by hypersurfaces $\Sigma_+$ and $\Sigma_-$, respectively, with induced metrics $g_{ij}^+$ and $g_{ij}^-$. A single manifold ${\cal M}$ is obtained by gluing together ${\cal M_+}$ and ${\cal M_-}$ at their boundaries, i.e., ${\cal M}={\cal M_+}\cup {\cal M_-}$, with the natural identification of the boundaries $\Sigma=\Sigma_+=\Sigma_-$. Now, taking into account that the interior wormhole spacetime is given by metric (\ref{metricwormhole}) and the exterior geometry is the Schwarzschild solution, the surface stresses are given by the following quantities \begin{eqnarray} \sigma&=&-\frac{1}{4\pi a} \left(\sqrt{1-\frac{2M}{a}+\dot{a}^2}- \sqrt{1-\frac{b(a)}{a}+\dot{a}^2} \, \right) \label{surfenergy} ,\\ {\cal P}&=&\frac{1}{8\pi a} \Bigg[\frac{1-\frac{M}{a} +\dot{a}^2+a\ddot{a}}{\sqrt{1-\frac{2M}{a}+\dot{a}^2}} -\frac{(1+a\Phi') \left(1-\frac{b}{a}+\dot{a}^2 \right)+a\ddot{a}-\frac{\dot{a}^2(b-b'a)}{2(a-b)}}{\sqrt{1-\frac{b(a)}{a}+\dot{a}^2}} \, \Bigg] \,, \label{surfpressure} \end{eqnarray} where $\sigma$ and ${\cal P}$ are the surface energy density and the tangential surface pressure, respectively. The conservation equation provides us with \begin{equation} \sigma'=-\frac{2}{a}\,(\sigma+{\cal P})+\Xi \,,\label{consequation2} \end{equation} where $\Xi$, defined for notational convenience, is given by \begin{eqnarray} \Xi=-\frac{1}{4\pi a^2} \left[\frac{b'a-b}{2a\left(1-\frac{b}{a} \right)}+a\Phi' \right] \sqrt{1-\frac{b}{a}+\dot{a}^2} \,. \label{H(a)} \end{eqnarray} The construction of dynamic shells in wormholes have been extensively analyzed in the literature \cite{Poisson:1995sv,Lobo:2003xd,Eiroa:2003wp,Lobo:2004rp,Garcia:2011aa}, where the stability of generic spherically symmetric thin shells to linearized perturbations around static solutions were considered, and applying the analysis to traversable wormhole geometries, by considering specific choices for the shape function, the stability regions were deduced. \subsubsection{Negative tension branes as stable thin shell wormholes} Negative tension branes as stable thin-shell wormholes in Reissner-Nordstroem-(anti) de Sitter spacetimes in $d$ dimensional Einstein gravity, were also investigated.\cite{Kokubu:2014vwa} Imposing $Z_2$ symmetry, traversable static thin-shell wormholes were constructed and classified in spherical, planar and hyperbolic symmetries. In spherical geometry, it was found that the higher-dimensional counterpart of Barcelo and Visser's wormholes\cite{Barcelo:2000ta} are stable against spherically symmetric perturbations. Classes of thin-shell wormholes in planar and hyperbolic symmetries with a negative cosmological constant were also found, which are stable against perturbations. In most cases, stable wormholes were found with the combination of an electric charge and a negative cosmological constant. However, as special cases, stable wormholes were found with a vanishing cosmological constant in spherical symmetry and with vanishing electric charge in hyperbolic symmetry. \subsubsection{Thin shell collapse in CMC/maximal slicing and shape dynamics} An interesting application of thin-shell collapse was also investigated.\cite{Gomes:2015ila} More specifically, the gravitational collapse of (massive or null) thin shells of dust in the ADM Hamiltonian formalism was studied, in a particular foliation, namely, for a constant-mean-extrinsic-curvature (CMC) in the case of a spatially compact universe, and its analogue in the case of an asymptotically flat space. Exact solutions to Einstein's equations at the nonlinear level were obtained, which take into account the backreaction of matter on geometry. The result is interesting because, in addition to providing an exact solution of GR, it also represents a solution of the newly discovered theory of Shape Dynamics.\cite{Gomes:2015ila} This theory is classically equivalent to GR, but it highlights a different (dual) symmetry to refoliation invariance, namely, spatial Weyl invariance. For this reason Shape Dynamics is expected to differ at the quantum level from the standard covariant quantization schemes of GR, and suggests that the fundamental degrees of freedom of GR are spatial conformal and diffeorphism invariant. \section{Wormholes in modified theories of gravity} \label{2} Wormholes have also been extensively studied in modified theories of gravity. Without a significant loss of generality, consider the generalized gravitational field equations for a large class of modified theories of gravity, given by the following field equation\cite{Harko:2013yb} \begin{equation} g_1(\Psi^i)(G_{\mu\nu}+H_{\mu\nu})-g_2(\Psi^j)\,T_{\mu\nu}=\kappa^2\,T_{\mu% \nu}\,, \label{generalfieldeq} \end{equation} where $g_i(\Psi^j)$ ($i=1,2$) are multiplicative factors that modify the geometrical sector of the field equations, and $\Psi^j$ denote generically curvature invariants or gravitational fields such as scalar fields; the term $g_2(\Psi^i)$ covers the coupling of the curvature invariants or the scalar fields with the matter stress-energy tensor, $T_{\mu\nu}$. The additional geometric term $H_{\mu\nu}$ includes the geometrical modifications inherent in the modified gravitational theory under consideration. In order to analyse a generalized form of the energy condition, it is rather useful to rewrite this field equation as an effective Einstein field equation, $G_{\mu\nu}=\kappa^2\, T_{\mu\nu}^{\mathrm{eff}}$, where the effective stress-energy tensor is given by \begin{equation} T_{\mu\nu}^{\mathrm{eff}} \equiv \frac{1+\bar{g}_2(\Psi^j)}{g_1(\Psi^i)} \,T_{\mu\nu} -\bar{H}_{\mu\nu}\,, \end{equation} where $\bar{g}_2(\Psi^j)=g_2(\Psi^j)/\kappa^2$ and $\bar{H} _{\mu\nu}=H_{\mu\nu}/\kappa^2$ are defined for notational convenience. For this case, the violation of the generalized NEC, $T^{\mathrm{eff}}_{\mu\nu} k^\mu k^\nu < 0$, implies the following restriction \begin{equation} \frac{1+\bar{g}_2(\Psi^j)}{g_1(\Psi^i)}\,T_{\mu\nu} k^\mu k^\nu < \bar{H}% _{\mu\nu}k^\mu k^\nu \,. \end{equation} Note that imposing $g_1(\Psi^j)=1$ , $g_2(\Psi^j)=0$, and $H_{\mu\nu}=0$, we recover GR and the standard violation of the NEC for the matter threading the wormhole, i.e., $T_{\mu\nu} k^\mu k^\nu < 0$. If one imposes an additional condition given by $[1+\bar{g}_2(\Psi^j)]/g_1(\Psi^i)>0$, one obtains a general bound for the normal matter threading the wormhole, given by \begin{equation} 0 \leq T_{\mu\nu} k^\mu k^\nu < \frac{g_1(\Psi^i)}{1+\bar{g}_2(\Psi^j)}\, \bar{H}_{\mu\nu}k^\mu k^\nu \,. \end{equation} One may demand that the latter condition is fulfilled even if the matter stress-energy tensor satisfies the usual NEC.\cite{Harko:2013yb} One may also impose the WEC, i.e., $T_{\mu\nu}u^\mu u^\nu\geq 0$, where $u^\mu$ is the four-velocity of an observer. In order for normal matter to satisfy the WEC, in order to have normal matter threading the wormhole, one also needs to impose the following condition \begin{equation} T_{\mu\nu}u^\mu u^\nu = \frac{g_1(\Psi^i) }{\kappa^2+g_2(\Psi^j)} \left(G_{\mu\nu} + H_{\mu\nu} \right) u^\mu u^\nu \geq 0 \,. \end{equation} Thus, imposing $T_{\mu\nu}u^\mu u^\nu\geq 0$ entails a restriction on the geometry arising from the modified gravity under consideration. Considering that normal matter is given by a diagonal stress-energy tensor, one can physically interpret $T_{\mu\nu}u^\mu u^\nu$ as the energy density measured by any timelike observer with four-velocity $u^\mu$. This definition is useful as using local Lorentz transformations it is possible to show that $T_{\mu\nu}u^\mu u^\nu \geq 0$ implies that the energy density is positive in all local frames of reference. \subsection{Exact wormhole solutions with a nonminimal kinetic coupling} In a scalar-tensor theory of gravity with a scalar field possessing the nonminimal kinetic coupling to the curvature, static and spherically symmetric solutions were considered.\cite{Sushkov:2011jh} The lagrangian of the theory contains the term $(\varepsilon g^{\mu\nu}+\eta G^{\mu\nu})\phi_{,\mu}\phi_{,\nu}$ and represents a particular case of the general Horndeski lagrangian, which leads to second-order equations of motion. The Rinaldi approach was used to construct analytical solutions describing wormholes with nonminimal kinetic coupling. It was shown that wormholes exist only if $\varepsilon=-1$ (phantom case) and $\eta>0$. Furthermore, the wormhole throat connects two anti-de Sitter spacetimes, and the metric possesses a coordinate singularity at the throat. However, since all curvature invariants are regular, there is no curvature singularity there.\cite{Sushkov:2011jh} \subsection{Geons as wormholes of modified gravity} As shown above, it is possible that wormholes arise as solutions of extensions of GR without violations of the energy conditions. Working in a metric-affine framework, which is experimentally supported by the physics of crystalline structures with defects,\cite{Lobo:2014nwa} explicit models supporting such solutions in four and higher dimensions were found.\cite{Olmo:2016ags,Lobo:2013prg,Lobo:2014zla,Lobo:2014nwa} It is shown that they actually represent explicit realizations of the concept of the geon introduced by Wheeler, whcih were interpreted as topologically non-trivial self-consistent bodies generated by an electromagnetic field without sources. Several of their properties were discussed, and we refer the reader to Refs. \refcite{Olmo:2016ags,Lobo:2013prg,Lobo:2014zla,Lobo:2014nwa} for more details. \subsection{Wormholes as a cure for black hole singularities.} Furthermore, using exactly solvable models, it was shown that black hole singularities in different electrically charged configurations can be cured.\cite{Olmo:2016hey,Bambi:2015zch} The solutions obtained describe black hole space-times with a wormhole giving structure to the otherwise point-like singularity. Furthermore, it was shown that geodesic completeness was satisfied despite the existence of curvature divergences at the wormhole throat. In some cases, physical observers can go through the wormhole and in other cases the throat lies at an infinite affine distance. The removal of singularities occurs in a non-perturbative way.\cite{Olmo:2016hey,Bambi:2015zch} \subsection{Gravity's Rainbow and traversable wormholes} In the context of Gravity's Rainbow, the graviton one-loop contribution to a classical energy was computed in a traversable wormhole background.\cite{Garattini:2013pha,Garattini:2015pmo} The form of the shape function considered was obtained by a linear equation of state $p=w \rho$. The approach was evaluated by means of a variational approach with Gaussian trial wave functionals. Instead of using a regularization/renormalization process, and to to handle the divergences, the distortion induced by Gravity's Rainbow was used. The energy density of the graviton one-loop contribution, or equivalently the background spacetime, was let to evolve, and consequently the classical energy was determined. More specifically, the background metric was fixed to be Minkowskian in the equation governing the quantum fluctuations, which behaves essentially as a backreaction equation, and the quantum fluctuations were let to evolve. The classical energy, which depends on the evolved metric functions, was then evaluated. Analysing this procedure, a natural ultraviolet (UV) cutoff was obtained, which forbids the presence of an interior spacetime region, and it was argued that this may result in a multipy-connected spacetime. Thus, in the context of Gravity's Rainbow, this process may be interpreted as a change in topology, and in principle results in the presence of a Planckian wormhole. \subsection{On wormholes creation by quantum tunnelling} The process of quantum tunneling was studied in a self-interacting scalar field theory with non-minimal coupling to gravity.\cite{Battarra:2014naa,Battarra:2016plm} It was demonstrated that in these theories gravitational instantons can develop a neck, which is a feature prohibited in theories with minimal coupling. Furthermore, it was show that such instantons with necks lead to the materialization of bubble geometries containing a wormhole region. The relationship of neck geometries to violations of the NEC was also explored, and the bound on the size of the neck relative to that of the instanton was derived. \subsection{Off-diagonal wormhole and black hole deformations in modified gravity theories} General parameterizations for generic off-diagonal spacetime metrics and matter sources in GR and modified gravity were found,\cite{Vacaru:2014cwa} when the field equations decouple with respect to nonholonomic frames of reference. This allows one to construct various classes of exact solutions when the coefficients of the fundamental geometric/physical objects depend on all spacetime coordinates via corresponding classes of generating and integration functions and/or constants. Such (modified) spacetimes display Killing and non-Killing symmetries, describe nonlinear vacuum configurations and effective polarizations of cosmological and interaction constants. Certain examples of exact locally anisotropic wormholes and generic off-diagonal cosmological solutions were analysed in modified gravity, such as, in $f(R,T)$ gravity.\cite{Harko:2011kv} It was concluded that considering generic off-diagonal nonlinear parametric interactions in GR it is possible to mimic various effects in massive and/or modified gravity, or to distinguish certain classes of ``generic'' modified gravity solutions which cannot be encoded in GR. \section{Summary and conclusion} In this paper, we have considered a brief review of wormhole physics. It is important to emphasize that these solutions are primarily useful as ``gedanken-experiments'' and as a theoretician's probe of the foundations of general relativity. They have been extremely important in stimulating research in the issues of the energy condition violations, closed timelike curves and the associated causality violations and ``effective'' superluminal travel. We have outlined a review dating from the ``(Flamm)-Einstein-Rosen'' bridge, the revival of the topic by Wheeler with the introduction of the ``geon'' concept in the 1960s, the full renaissance of the subject by Thorne and collaborators in the late 1980s, culminating in the monograph by Visser, and detailed the issues that branched therefrom to the present date. More specifically, we have presented a mathematical overview of the Morris-Thorne wormhole, paying close attention to the pointwise and averaged energy condition violations, the Quantum Inequality and modern generalizations of semi-classical nonlinear energy condition. We then, treated rotating wormholes and evolving wormholes, focussing on the energy condition violations. Indeed, a fundamental ingredient in wormhole physics is the flaring-out condition at the throat which, in classical general relativity, entails the violation of the null energy condition. We have also presented the most general conditions in the context of modified gravity, in which the matter threading the wormhole throat satisfies all of the energy conditions, and it is the higher order curvature terms, which may be interpreted as a gravitational fluid, that support these nonstandard wormhole geometries. Thus, we explicitly show that wormhole geometries can be theoretically constructed without the presence of exotic matter, but are sustained in the context of modified gravity. Specific models were also briefly outlined. \section*{Acknowledgments} FSNL was supported by the Funda\c{c}\~{a}o para a Ci\^{e}ncia e Tecnologia (FCT) through the grants EXPL/FIS-AST/1608/2013, UID/FIS/04434/2013 and by a FCT Research contract, with reference IF/00859/2012.
1,108,101,565,900
arxiv
\section{Introduction} Probability answer set programming \cite{Saad_NHPP,Saad_EHPP,Saad_DHPP} is a declarative programming framework which aims to solve hard search problems in probability environments, and shown effective for probability knowledge representation and probability reasoning applications. It has been shown that many interesting probability reasoning problems are represented and solved by probability answer set programming, where probability answer sets describe the set of possible solutions to the problem. These probability reasoning problems include, but not limited to, reasoning about actions with probability effects and probability planning \cite{SaadPlan}, reinforcement learning in MDP environments \cite{Saad_MDP}, reinforcement learning in POMDP environments \cite{Saad_Learn_Sense}, contingent probability planning \cite{Saad_Sensing}, and Bayesian reasoning \cite{SaadSSAT}. However, the unavailability of probability optimization aggregates, e.g., minimum and maximum in the language of probability answer set programming \cite{Saad_NHPP,Saad_EHPP,Saad_DHPP} disallows the natural and concise representation of many interesting stochastic optimization problems that are based on minimization and maximization of some desired criteria imposed by the problem. The following stochastic optimization with recourse problem illuminates the need for these aggregates. \begin{example} Assume that a company produces some product, $G$, and need to make a decision on the amount of units of $G$ to produce based on the market demand. The company made a decision on the amounts of units of product $G$ to produce at cost of \$$2$ per unit of $G$ (first stage). However, market demand is stochastic with a discrete probability distribution and the market demand must be met in any scenario. The company can produce extra units of product $G$ to meet the market observed demands but with the cost of \$$3$ per unit (second stage). This means a recourse to extra production to meet the excess in demand. Assume that the probability distribution, $p_i$, over market demand, $D_i$, is given as follows where two scenarios are available, $D_1 = 500$ with $p_1 = 0.6$ and $D_2 = 700$ with $p_2 = 0.4$. Formally, let $x$ be the number of units of product $G$ the company produces at the first stage and let $y_i$, called recourse variable, be the number of units the company produces at the second stage to meet the market stochastic demand at scenario $i$. The objective is to {\em minimize the total expected cost}. This two stages stochastic optimization problem is formalized as: \[ minimize \; 2x + \sum_{i=1}^I p_i (3y_i) \] subject to \[ \begin{array}{lcl} x + y_i \geq D_i && i = 1, \ldots, I \\ x \geq 0 && \\ y_i \geq 0 && i = 1, \ldots, I \end{array} \] where the constraint $x + y_i \geq D_i$ guarantee that demand is always met in any scenario and $I = 2$. The optimal solution to this two stages stochastic optimization with recourse problem is $x = 500$, $y_1 = 0$, $y_2 = 200$, and with minimum total expected cost equal to $\$1240$. \label{ex:finance} \end{example} To represent this stochastic optimization problem in probability answer set programming and to provide correct solution to the problem, the probability answer set programming representation of the problem has to be able to represent the probability distributions of the problem domain and any probability distribution that may arise to the problem constraints along with the preference relation that minimizes or maximizes the objective function including the expected values that always appear in the objective functions of these types of stochastic optimization problems, and to be able to compare for the minimum or the maximum of the objective value across the generated probability answer sets. However, the current syntax and semantics of probability answer set programming do not define probability preference relations or rank probability answer sets based on minimization or maximization of some desired criterion specified by the user. Therefore, in this paper we extend probability answer set programming with probability aggregate preferences to allow the ability to represent and reason and intuitively solve stochastic optimization problems. The proposed probability aggregates probability answer set optimization framework presented in this paper modifies and generalizes the classical aggregates classical answer set optimization presented in \cite{Saad_ASOG} as well as the classical answer set optimization introduced in \cite{ASO}. We show the application of probability aggregates probability answer set optimization to a two stages stochastic optimization with recourse problem described in Example (\ref{ex:finance}), where a probability answer set program \cite{Saad_DHPP} (disjunctive hybrid probability logic program with probability answer set semantics) is used as probability answer sets generator rules. \section{Probability Aggregates Probability Answer Set Optimization} Probability answer set optimization programs are probability logic programs under the probability answer set semantics whose probability answer sets are ranked according to probability preference rules represented in the programs. A probability answer set optimization program, $\Pi$, is a pair of the form \\ $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$, where $R_{gen} \cup R_{pref}$ is a union of two sets of probability logic rules and $\tau$ is a mapping, $\tau: {\cal B_L} \rightarrow S_{disj}$, associated to the set of probability logic rules $R_{gen}$. The first set of probability logic rules, $R_{gen}$, is called the generator rules that generate the probability answer sets that satisfy every probability logic rule in $R_{gen}$ and the mapping $\tau$ associates to each atom, $a$, appearing in $R_{gen}$, a disjunctive p-strategy that is used to combine the probability intervals obtained from different probability logic rules in $R_{gen}$ with an atom $a$ appearing in their heads. $R_{gen}$ is any set of probability logic rules with well-defined probability answer set semantics including normal, extended, and disjunctive hybrid probability logic rules \cite{Saad_NHPP,Saad_EHPP,Saad_DHPP}, as well as hybrid probability logic rules with probability aggregates (all are forms of {\em probability answer set programming}). The second set of probability logic rules, $R_{pref}$, is called the {\em probability preference rules}, which are probability logic rules that represent the user's {\em probability quantitative} and {\em qualitative preferences} over the probability answer sets generated by $R_{gen}$. The probability preference rules in $R_{pref}$ are used to rank the generated probability answer sets from $R_{gen}$ from the top preferred probability answer set to the least preferred probability answer set. Similar to \cite{ASO}, an advantage of probability answer set optimization programs is that $R_{gen}$ and $R_{pref}$ are independent. This makes probability preference elicitation easier and the whole approach is more intuitive and easy to use in practice. In our introduction of probability answer set optimization programs, we focus on the syntax and semantics of the {\em probability preference rules}, $R_{pref}$, of the probability answer set optimization programs, since the syntax and semantics of the probability answer sets generator rules, $R_{gen}$, are the same as syntax and semantics of any set of probability logic rules with well-defined probability answer set semantics as described in \cite{Saad_NHPP,Saad_EHPP,Saad_DHPP}. \subsection{Basic Language} Let ${\cal L}$ be a first-order language with finitely many predicate symbols, function symbols, constants, and infinitely many variables. A literal is either an atom $a$ in ${\cal B_L}$ or the negation of an atom $a$ ($\neg a$), where ${\cal B_L}$ is the Herbrand base of ${\cal L}$ and $\neg$ is the classical negation. Non-monotonic negation or the negation as failure is denoted by $not$. The Herbrand universe of $\cal L$ is denoted by $U_{\cal L}$. Let $Lit$ be the set of all literals in ${\cal L}$, where $Lit = \{a \:| \: a \in {\cal B_L} \} \cup \{\neg a \: | \: a \in {\cal B_L}\}$. A \emph{probability annotation} is a probability interval of the form $[\alpha_1, \alpha_2]$, where $\alpha_1, \alpha_2$ are called probability annotation items. A \emph{probability annotation item} is either a constant in $[0, 1]$ (called {\em probability annotation constant}), a variable ranging over $[0, 1]$ (called \emph{probability annotation variable}), or $f(\alpha_1,\ldots,\alpha_n)$ (called \emph{probability annotation function}) where $f$ is a representation of a monotone, antimonotone, or nonmonotone total or partial function $f: ([0, 1])^n \rightarrow [0, 1]$ and $\alpha_1,\ldots,\alpha_n$ are probability annotation items. Let $S = S_{conj} {\cup} S_{disj}$ be an arbitrary set of p-strategies, where $S_{conj}$ ($S_{disj}$) is the set of all conjunctive (disjunctive) p-strategies in $S$. A \emph{hybrid literals} is an expression of the form $l_1 \wedge_\rho \ldots \wedge_\rho l_n$ or $l_1 \vee_{\rho'} \ldots \vee_{\rho'} l_n$, where $l_1, \ldots, l_n$ are literals and $\rho$ and $\rho'$ are p-strategies from $S$. $bf_S(Lit)$ is the set of all ground hybrid literals formed using distinct literals from $Lit$ and p-strategies from $S$. If $L$ is a hybrid literal $\mu$ is a probability annotation then $L:\mu$ is called a probability annotated hybrid literal. A symbolic probability set is an expression of the form $\{ X : [P_1, P_2] \; | \; C \}$, where $X$ is a variable or a function term and $P_1$, $P_2$ are probability annotation variables or probability annotation functions, and $C$ is a conjunction of probability annotated hybrid basic formulae. A ground probability set is a set of pairs of the form $\langle x : [p_1, p_2] \; | \; C^g \rangle$ such that $x$ is a constant term and $p_1, p_2$ are probability annotation constants, and $C^g$ is a ground conjunction of probability annotated hybrid basic formulae. A symbolic probability set or ground probability set is called a probability set term. Let $f$ be a probability aggregate function symbol and $S$ be a probability set term, then $f(S)$ is said a probability aggregate, where $f \in \{$ $val_E$, $sum_E$, $times_E$, $min_E$, $max_E$, $count_E$, $sum_P$, $times_P$, $min_P$, $max_P$, $count_P$ $\}$. If $f(S)$ is a probability aggregate and $T$ is an interval $[\theta_1, \theta_2]$, called {\em guard}, where $\theta_1, \theta_2$ are constants, variables or functions terms, then we say $f(S) \prec T$ is a probability aggregate atom, where $\prec \in \{=, \neq, <, >, \leq, \geq \}$. A {\em probability optimization aggregate} is an expression of the form $max_\mu (f(S))$, $min_\mu (f(S))$, $max_x (f(S))$, $min_x (f(S))$, $max_{x \mu} (f(S))$, and $min_{x \mu} (f(S))$, where $f$ is a probability aggregate function symbol and $S$ is a probability set term. \subsection{Probability Preference Rules Syntax} Let ${\cal A}$ be a set of probability annotated hybrid literals, probability annotated probability aggregate atoms and probability optimization aggregates. A boolean combination over ${\cal A}$ is a boolean formula over probability annotated hybrid literals, probability annotated probability aggregate atoms, and probability optimization aggregates in ${\cal A}$ constructed by conjunction, disjunction, and non-monotonic negation ($not$), where non-monotonic negation is combined only with probability annotated hybrid literals and probability annotated probability aggregate atoms \begin{definition} A probability preference rule, $r$, over a set of probability annotated hybrid literals, probability annotated probability aggregate atoms and probability optimization aggregates, ${\cal A}$, is an expression of the form \begin{eqnarray} C_1 \succ C_2 \succ \ldots \succ C_k \leftarrow L_{k+1}:\mu_{k+1},\ldots, L_m:\mu_m, \notag \\ not\; L_{m+1}:\mu_{m+1},\ldots, not\;L_{n}:\mu_{n} \label{rule:pref} \end{eqnarray} where $L_{k+1}:\mu_{k+1}, \ldots, L_{n}:\mu_{n}$ are probability annotated hybrid literals and probability annotated probability aggregate atoms and $C_1, C_2, \ldots, C_k$ are boolean combinations over ${\cal A}$. \end{definition} Let $body(r) = L_{k+1}:\mu_{k+1},\ldots, L_m:\mu_m, not\; L_{m+1}:\mu_{m+1},\ldots, not\;L_{n}:\mu_{n}$ and $head(r) = C_1 \succ C_2 \succ \ldots \succ C_k$, where $r$ is a probability preference rule of the form (\ref{rule:pref}). Intuitively, a probability preference rule, $r$, of the form (\ref{rule:pref}) means that any probability answer set that satisfies $body(r)$ and $C_1$ is preferred over the probability answer sets that satisfy $body(r)$, some $C_i$ $(2 \leq i \leq k)$, but not $C_1$, and any probability answer set that satisfies $body(r)$ and $C_2$ is preferred over probability answer sets that satisfy $body(r)$, some $C_i$ $(3 \leq i \leq k)$, but neither $C_1$ nor $C_2$, etc. \begin{definition} A probability answer set optimization program, $\Pi$, is a pair of the form $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$, where $R_{gen}$ is a set of probability logic rules with well-defined probability answer set semantics, the {\em generator} rules, $R_{pref}$ is a set of probability preference rules, and $\tau$ is the mapping $\tau: {\cal L}it \rightarrow S_{disj}$ that associates to each literal, $l$, appearing in $R_{gen}$ a disjunctive p-strategy. \end{definition} Let $f(S)$ be a probability aggregate. A variable, $X$, is a local variable to $f(S)$ if and only if $X$ appears in $S$ and $X$ does not appear in the probability preference rule that contains $f(S)$. A global variable is a variable that is not a local variable. Therefore, the {\em ground instantiation} of a symbolic probability set $$S = \{ X:[P_1,P_2] \; | \; C \}$$ is the set of all ground pairs of the form $\langle \theta\; (X) :[\theta\; (P_1), \theta\; (P_2)]\; | \; \theta \; (C) \rangle$, where $\theta$ is a substitution of every local variable appearing in $S$ to a constant from $U_{\cal L}$. A ground instantiation of a probability preference rule, $r$, is the replacement of each global variable appearing in $r$ to a constant from $U_{\cal L}$, then followed by the ground instantiation of every symbolic probability set, $S$, appearing in $r$. The ground instantiation of a probability aggregates probability answer set optimization program, $\Pi$, is the set of all possible ground instantiations of every probability rule in $\Pi$. \begin{example} The two stages stochastic optimization with recourse problem presented in Example (\ref{ex:finance}) can be represented as a probability aggregates probability answer set optimization program $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$, where $\tau$ is any assignments of disjunctive p-strategies and $R_{gen}$ is a set of disjunctive hybrid probability logic rules with probability answer set semantics \cite{Saad_DHPP} of the form: \[ \begin{array}{r} domX(500) \vee domX(550) \vee domX(600) \vee domX(650) \vee \\ domX(700). \\ domY_1(0):p_1 \vee domY_1(50):p_1 \vee domY_1(100):p_1 \vee \\ domY_1(150):p_1 \vee domY_1(200):p_1. \\ domY_2(0):p_2 \vee domY_2(50):p_2 \vee domY_2(100):p_2 \vee \\ domY_2(150):p_2 \vee domY_2(200):p_2. \end{array} \] \[ \begin{array}{r} objective(X,Y_1,Y_2, 2 * X + 3 * p_1 * Y_1 + 3 * p_2 * Y_2) \\ \leftarrow domX(X), domY_1(Y_1):p_1, \\ domY_1(Y_2):p_2. \end{array} \] \[ \begin{array}{lcl} & \leftarrow & domX(X), domY_1(Y_1):p_1, X + Y_1 < 500.\\ & \leftarrow & domX(X), domY_2(Y_2):p_2, X + Y_2 < 700. \end{array} \] where $p_1 = 0.6$ and $p_2 = 0.4$, $domX(X)$, $domY_1(Y_1)$, $domY_2(Y_2)$ are predicates represent the domains of possible values for the variables $X$, $Y_1$, $Y_2$ that represent the units of product $G$ corresponding to the variables $x, y_1, y_2$ described in Example (\ref{ex:finance}), $objective(X, Y_1, Y_2, Cost)$ is a predicate that represents the objective value, $Cost$, for the assignments of units of a product $G$ to the variables $X$, $Y_1$, $Y_2$ where $Cost$ is the expected cost for this assignment of variables. The set of probability preference rules, $R_{pref}$, of $\Pi$ consists of the probability preference rule \[ \begin{array}{r} min_x \{Cost : 1 \; | \; objective(X,Y_1,Y_2, Cost) \} \leftarrow \end{array} \] \label{ex:finance-code} \end{example} \section{Probability Aggregates Probability Answer Set Optimization Semantics} Let $\mathbb{X}$ denotes a set of objects. Then, we use $2^\mathbb{X}$ to denote the set of all {\em multisets} over elements in $\mathbb{X}$. Let $C[0, 1]$ denotes the set of all closed intervals in $[0, 1]$, $\mathbb{R}$ denotes the set of all real numbers, $\mathbb{N}$ denotes the set of all natural numbers, and $U_{\cal L}$ denotes the Herbrand universe. Let $\bot$ be a symbol that does not occur in ${\cal L}$. Therefore, the semantics of the probability aggregates are defined by the mappings: \begin{itemize} \item $val_E : 2^{\mathbb{R} \times {\cal C}[0, 1]} \rightarrow [\mathbb{R}, \mathbb{R}]$. \item $sum_E : 2^{\mathbb{R} \times {\cal C}[0, 1] } \rightarrow [\mathbb{R}, \mathbb{R}]$. \item $times_E: 2^{\mathbb{R} \times {\cal C}[0, 1] } \rightarrow [\mathbb{R}, \mathbb{R}]$. \item $min_E, max_E: (2^{\mathbb{R} \times {\cal C}[0, 1]} - \emptyset) \rightarrow [\mathbb{R}, \mathbb{R}]$. \item $count_E : 2^{U_{\cal L} \times {\cal C}[0, 1]} \rightarrow [\mathbb{R}, \mathbb{R}]$. \item $sum_P : 2^{\mathbb{R} \times {\cal C}[0, 1] } \rightarrow \mathbb{R} \times {\cal C}[0, 1]$. \item $times_P: 2^{\mathbb{R} \times {\cal C}[0, 1] } \rightarrow \mathbb{R} \times {\cal C}[0, 1]$. \item $min_P, max_P: (2^{\mathbb{R} \times {\cal C}[0, 1] } - \emptyset) \rightarrow \mathbb{R} \times {\cal C}[0, 1]$. \item $count_P : 2^{U_{\cal L} \times {\cal C}[0, 1]} \rightarrow \mathbb{N} \times {\cal C}[0, 1]$. \end{itemize} The application of $sum_E$ and $times_E$ on the empty multiset return $[0,0]$ and $[1,1]$ respectively. The application of $val_E$ and $count_E$ on the empty multiset returns $[0,0]$. The application of $sum_P$ and $times_P$ on the empty multiset return $(0,[1,1])$ and $(1,[1,1])$ respectively. The application of $count_P$ on the empty multiset returns $(0,[1,1])$. However, the application of $max_E$, $min_E$, $max_P$, $min_P$ on the empty multiset is undefined. The semantics of probability aggregates and probability optimization aggregates in probability aggregates probability answer set optimization is defined with respect to a probability answer set, which is, in general, a total or partial mapping, $h$, from $bf_S({\cal L}it)$ to ${\cal C}[0,1]$. In addition, the semantics of probability optimization aggregates $max_\mu (f(S))$, $min_\mu (f(S))$, $max_x (f(S))$, $min_x (f(S))$, $max_{x \mu} (f(S))$, and $min_{x \mu} (f(S))$ are based on the semantics of the probability aggregates $f(S)$. We say, a probability annotated hybrid literal, $L\mu$, is true (satisfied) with respect to a probability answer set, $h$, if and only if $\mu \leq h(L)$. The negation of a probability hybrid literal, $not \; L:\mu$, is true (satisfied) with respect to $h$ if and only if $\mu \nleq h(L)$ or $L$ is undefined in $h$. The evaluation of probability aggregates and the truth valuation of probability aggregate atoms with respect to probability answer sets are given as follows. Let $f(S)$ be a ground probability aggregate and $h$ be a probability answer set. In addition, let $S_h$ be the multiset constructed from elements in $S$, where $S_h = \{\!\!\{ x : [p_1,p_2] \; | \; \langle x : [p_1,p_2] \; | \; C^g \rangle \in S \wedge$ $C^g$ is true w.r.t. $h \}\!\!\}$. Then, the evaluation of $f(S)$ with respect to $h$ is, $f(S_h)$, the result of the application of $f$ to $S_h$, where $f(S_h) = \bot$ if $S_h$ is not in the domain of $f$ and \begin{itemize} \item $val_E(S_h) = \sum_{x : [p_1, p_2] \in S_h} \;( x \times [p_1, p_2])$ \item $sum_E(S_h) = (\sum_{x : [p_1, p_2] \in S_h} \; x) \; \times \; (\prod_{x : [p_1, p_2] \in S_h} \; [p_1, p_2])$ \item $times_E(S_h) = (\prod_{x : [p_1, p_2] \in S_h} \; x ) \; \times \; (\prod_{x : [p_1, p_2] \in S_h} \; [p_1, p_2])$ \item $min_E (S_h)= (\min_{x : [p_1, p_2] \in S_h} \; x ) \; \times \; (\prod_{x : [p_1, p_2] \in S_h} \; [p_1, p_2])$ \item $max_E (S_h)= (\max_{x : [p_1, p_2] \in S_h} \; x ) \; \times \; (\prod_{x : [p_1, p_2] \in S_h} \; [p_1, p_2])$ \item $count_E (S_h)= (count_{x : [p_1, p_2] \in S_h} \; x ) \; \times \; (\prod_{x : [p_1, p_2] \in S_h} \; [p_1, p_2])$ \\ \item $sum_P (S_h) = (\sum_{x : [p_1, p_2] \in S_h} \; x \;, \; \prod_{x : [p_1, p_2] \in S_h} \; [p_1, p_2])$ \item $times_P (S_h) = (\prod_{x : [p_1, p_2] \in S_h} \; x \;, \; \prod_{x : [p_1, p_2] \in S_h} \; [p_1, p_2])$ \item $min_P (S_h) = (\min_{x : [p_1, p_2] \in S_h} \; x \; , \; \prod_{x : [p_1, p_2] \in S_h} \; [p_1, p_2])$ \item $max_P (S_h) = (\max_{x : [p_1, p_2] \in S_h} \; x \; , \; \prod_{x : [p_1, p_2] \in S_h} \; [p_1, p_2])$ \item $count_P (S_h) = (count_{x : [p_1, p_2] \in S_h} \; x \; , \; \prod_{x : [p_1, p_2] \in S_h} \; [p_1, p_2])$ \end{itemize} \subsection{Probability Preference Rules Semantics} In this section, we define the notion of satisfaction of probability preference rules with respect to probability answer sets. We consider that probability annotated probability aggregate atoms that involve probability aggregates from $\{$$val_E$, $sum_E$, $times_E$, $min_E$, $max_E$, $count_E$$\}$ are associated to the probability annotation $[1,1]$. Let $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$ be a ground probability aggregates probability answer set optimization program, $h,h'$ be probability answer sets for $R_{gen}$ (possibly partial), $f \in \{$$val_E$, $sum_E$, $times_E$, $min_E$, $max_E$, $count_E$$\}$ and $g \in \{$$sum_P$, $times_P$, $min_P$, $max_P$, $count_P$$\}$, and $r$ be a probability preference rule in $R_{pref}$. Then the satisfaction of a boolean combination, $C$, appearing in $head(r)$, by $h$ is defined inductively as follows: \begin{itemize} \item $h$ satisfies $L:\mu$ iff $\mu \leq h(L)$. \item $h$ satisfies $not\;L:\mu$ iff $\mu \nleq h(L)$ or $L$ is undefined in $h$. \\ \item $h$ satisfies $f(S) \prec T : [1,1]$ iff $f(S_h) \neq \bot$ and $f(S_h) \prec T$. \item $h$ satisfies $not \; f(S) \prec T :[1,1] $ iff $f(S_h) = \bot$ or $f(S_h) \neq \bot$ and $f(S_h) \nprec T$. \item $h$ satisfies $g(S) \prec T : \mu$ iff $g(S_h) = (x,\nu) \neq \bot$ and $x \prec T$ and $\mu \leq_t \nu$. \item $h$ satisfies $not \; g(S) \prec T :\mu $ iff $g(S_h) = \bot$ or $g(S_h) = (x, \nu ) \neq \bot$ and $x \nprec T$ or $\mu \nleq_t \nu$. \\ \item $h$ satisfies $max (f(S))$ iff $f(S_h) = x \neq \bot$ and for any $h'$, $f(S_{h'}) = x' \neq \bot$ and $x' \leq x$ or $f(S_h) \neq \bot$ and $f(S_{h'}) = \bot$. \item $h$ satisfies $min (f(S))$ iff $f(S_h) = x \neq \bot$ and for any $h'$, $f(S_{h'}) = x' \neq \bot$ and $x \leq x'$ or $f(S_h) \neq \bot$ and $f(S_{h'}) = \bot$. \\ \item $h$ satisfies $max_\mu (g(S))$ iff $g(S_h) = (x, \nu) \neq \bot$ and for any $h'$, $g(S_{h'}) = (x', \nu') \neq \bot$ and $\nu' \leq \nu$ or $g(S_h) \neq \bot$ and $g(S_{h'}) = \bot$. \item $h$ satisfies $min_\mu (g(S))$ iff $g(S_h) = (x, \nu) \neq \bot$ and for any $h'$, $g(S_{h'}) = (x', \nu') \neq \bot$ and $\nu \leq \nu'$ or $g(S_h) \neq \bot$ and $g(S_{h'}) = \bot$. \item $h$ satisfies $max_x (g(S))$ iff $g(S_h) = (x, \nu) \neq \bot$ and for any $h'$, $g(S_{h'}) = (x', \nu') \neq \bot$ and $x' \leq x$ or $g(S_h) \neq \bot$ and $g(S_{h'}) = \bot$. \item $h$ satisfies $min_x (g(S))$ iff $g(S_h) = (x, \nu) \neq \bot$ and for any $h'$, $g(S_{h'}) = (x', \nu') \neq \bot$ and $x \leq x'$ or $g(S_h) \neq \bot$ and $g(S_{h'}) = \bot$. \item $h$ satisfies $max_{x \mu} (g(S))$ iff $g(S_h) = (x, \nu) \neq \bot$ and for any $h'$, $g(S_{h'}) = (x', \nu') \neq \bot$ and $x' \leq x$ and $\nu' \leq \nu$ or $g(S_h) \neq \bot$ and $g(S_{h'}) = \bot$. \item $h$ satisfies $min_{x \mu} (g(S))$ iff $g(S_h) = (x, \nu) \neq \bot$ and for any $h'$, $g(S_{h'}) = (x', \nu') \neq \bot$ and $x \leq x'$ and $\nu \leq \nu'$ or $g(S_h) \neq \bot$ and $g(S_{h'}) = \bot$. \\ \item $h \models C_1 \wedge C_2$ iff $h \models C_1$ and $h \models C_2$. \item $h \models C_1 \vee C_2$ iff $h \models C_1$ or $h \models C_2$. \end{itemize} \label{def:satisfaction} The satisfaction of $body(r)$ by $h$ is defined inductively as follows: \begin{itemize} \item $h$ satisfies $L:\mu$ iff $\mu \leq h(L)$. \item $h$ satisfies $not\;L:\mu$ iff $\mu \nleq h(L)$ or $L$ is undefined in $h$. \item $h$ satisfies $f(S) \prec T : [1,1]$ iff $f(S_h) \neq \bot$ and $f(S_h) \prec T$. \item $h$ satisfies $not \; f(S) \prec T :[1,1] $ iff $f(S_h) = \bot$ or $f(S_h) \neq \bot$ and $f(S_h) \nprec T$. \item $h$ satisfies $g(S) \prec T : \mu$ iff $g(S_h) = (x,\nu) \neq \bot$ and $x \prec T$ and $\mu \leq_t \nu$. \item $h$ satisfies $not \; g(S) \prec T :\mu $ iff $g(S_h) = \bot$ or $g(S_h) = (x, \nu ) \neq \bot$ and $x \nprec T$ or $\mu \nleq_t \nu$. \item $h$ satisfies $body(r)$ iff $\forall(k+1 \leq i \leq m)$, $h$ satisfies $L_i : \mu_i$ and $\forall(m+1 \leq j \leq n)$, $h$ satisfies $not\; L_j : \mu_j$. \end{itemize} The application of the probability aggregates, $f \in \{val_E, sum_E, time_E, min_E, max_E \}$, on a singleton $\{x:\mu \}$, returns $x . \mu$ ($x$ multiplied by $\mu$), i.e., $f(\{x:\mu\}) = x . \mu$. Therefore, we use $max(S)$ and $min(S)$ as abbreviations for the probability optimization aggregates $max(f(S))$ and $min(f(S))$ respectively, whenever $S$ is a singleton and $f \in \{val_E, sum_E, time_E, min_E, max_E \}$. Similarly, the application of the probability aggregates, $g \in \{sum_P, time_P, min_P, max_P \}$, on a singleton $\{x:\mu \}$, returns $(x, \mu)$, i.e., $f(\{x:\mu\}) = (x, \mu)$. Therefore, we use $max_\mu(S)$, $min_\mu(S)$, $max_x(S)$, $min_x(S)$, $max_{x \mu}(S)$, and $min_{x \mu}(S)$ as abbreviations for the probability optimization aggregates $max_\mu(g(S))$, $min_\mu(g(S))$, $max_x(g(S))$, $min_x(g(S))$, $max_{x \mu}(g(S))$, and $min_{x \mu}(g(S))$ respectively, whenever $S$ is a singleton and $g \in \{sum_P, time_P, min_P, max_P \}$. \begin{definition} Let $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$ be a ground probability aggregates probability answer set optimization program, $h$ be a probability answer set for $R_{gen}$, and $r$ be a probability preference rule in $R_{pref}$, and $C_i$ be a boolean combination in $head(r)$. Then, we define the following notions of satisfaction of $r$ by $h$: \begin{itemize} \item $h \models_{i} r$ iff $h \models body(r)$ and $h \models C_i$. \item $h \models_{irr} r$ iff $h \models body(r)$ and $h$ does not satisfy any $C_i$ in $head(r)$. \item $h \models_{irr} r$ iff $h$ does not satisfy $body(r)$. \end{itemize} \end{definition} $h \models_{i} r$ means that the body of $r$ and the boolean combination $C_i$ that appearing in the head of $r$ is satisfied by $h$. However, $h \models_{irr} r$ means that $r$ is irrelevant (denoted by $irr$) to $h$, or, in other words, the probability preference rule $r$ is not satisfied by $h$, because either one of two reasons. Either because the body of $r$ and non of the boolean combinations that appearing in the head of $r$ are satisfied by $h$. Or because the body of $r$ is not satisfied by $h$. \subsection{Probability Answer Sets Ranking} In this section we define the ranking of the probability answer sets with respect to a boolean combination, a probability preference rule, and with respect to a set of probability preference rules. \begin{definition} Let $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$ be a ground probability aggregates probability answer set optimization program, $h_1, h_2$ be two probability answer sets for $R_{gen}$, $r$ be a probability preference rule in $R_{pref}$, $C_i$ be boolean combination appearing in $head(r)$, and $f \in \{$$val_E$, $sum_E$, $times_E$, $min_E$, $max_E$, $count_E$$\}$ and $g \in \{$$sum_P$, $times_P$, $min_P$, $max_P$, $count_P$$\}$. Then, $h_1$ is strictly preferred over $h_2$ w.r.t. $C_i$, denoted by $h_1 \succ_i h_2$, iff $h_1 \models C_i$ and $h_2 \nvDash C_i$ or $h_1 \models C_i$ and $h_2 \models C_i$ (except $C_i$ is a probability optimization aggregate) and one of the following holds: \begin{itemize} \item $C_i = L:\mu$ implies $h_1 \succ_i h_2$ iff $h_1(L) > h_2(L)$. \item $C_i = not \; L:\mu$ implies $h_1 \succ_i h_2$ iff $h_1(L) < h_2(L)$ or $L$ is undefined in $h_1$ but defined $h_2$. \\ \item $C_i = f(S) \prec T : [1,1]$ implies $h_1 \succ_i h_2$ iff $f(S_{h_1}) = x \neq \bot$, $f(S_{h_2}) = x' \neq \bot$, and $x' < x$. \item $C_i = not \; f(S) \prec T :[1,1] $ implies $h_1 \succ_i h_2$ iff \begin{itemize} \item $f(S_{h_1}) = \bot$ and $f(S_{h_2}) \neq \bot$ or \item $f(S_{h_1}) = x \neq \bot$, $f(S_{h_2}) = x' \neq \bot$, and $x < x'$ \\ \end{itemize} \item $C_i = g(S) \prec T : \mu$ implies $h_1 \succ_i h_2$ iff $g(S_{h_1}) = (x, \nu) \neq \bot$, $g(S_{h_2}) = (x', \nu') \neq \bot$, and $\nu' < \nu$. \item $C_i = not \; g(S) \prec T :\mu $ implies $h_1 \succ_i h_2$ iff \begin{itemize} \item $g(S_{h_1}) = \bot$ and $g(S_{h_2}) \neq \bot$ or \item $g(S_{h_1}) = (x, \nu) \neq \bot$, $g(S_{h_2}) = (x', \nu') \neq \bot$, and $\nu < \nu'$ \\ \end{itemize} \item $C_i \in \{ max (f(S)), \; min (f(S)), max_\mu (g(S)), \; min_\mu (g(S)), \\ max_x (g(S)), min_x (g(S)), max_{x \mu} (g(S)), \; min_{x \mu} (g(S)) \}$ implies $h_1 \succ_i h_2$ iff $h_1 \models C_i$ and $h_2 \nvDash C_i$. \\ \item $C_i = C_{i_1} \wedge C_{i_2}$ implies $h_1 \succ_i h_2$ iff there exists $t \in \{{i_1}, {i_2}\}$ such that $h_1 \succ_t h_2$ and for all other $t' \in \{{i_1}, {i_2}\}$, we have $h_1 \succeq_{t'} h_2$. \item $C_i = C_{i_1} \vee C_{i_2}$ implies $h_1 \succ_i h_2$ iff there exists $t \in \{{i_1}, {i_2}\}$ such that $h_1 \succ_t h_2$ and for all other $t' \in \{{i_1}, {i_2}\}$, we have $h_1 \succeq_{t'} h_2$. \end{itemize} We say, $h_1$ and $h_2$ are equally preferred w.r.t. $C_i$, denoted by $h_1 =_{i} h_2$, iff $h_1 \nvDash C_i$ and $h_2 \nvDash C_i$ or $h_1 \models C_i$ and $h_2 \models C_i$ and one of the following holds: \begin{itemize} \item $C_i = L:\mu$ implies $h_1 =_{i} h_2$ iff $h_1(L) = h_2(L)$. \item $C_i = not \; L:\mu$ implies $h_1 =_{i} h_2$ iff $h_1(L) = h_2(L)$ or $L$ is undefined in both $h_1$ and $h_2$. \\ \item $C_i = f(S) \prec T : [1,1]$ implies $h_1 =_i h_2$ iff $f(S_{h_1}) = x \neq \bot$, $f(S_{h_2}) = x' \neq \bot$, and $x' = x$. \item $C_i = not \; f(S) \prec T :[1,1] $ implies $h_1 =_i h_2$ iff \begin{itemize} \item $f(S_{h_1}) = \bot$ and $f(S_{h_2}) = \bot$ or \item $f(S_{h_1}) = f(S_{h_2}) \neq \bot$ \\ \end{itemize} \item $C_i = g(S) \prec T : \mu$ implies $h_1 =_i h_2$ iff $g(S_{h_1}) = (x, \nu) \neq \bot$, $g(S_{h_2}) = (x', \nu') \neq \bot$, and $\nu' = \nu$. \item $C_i = not \; g(S) \prec T :\mu $ implies $h_1 =_i h_2$ iff \begin{itemize} \item $g(S_{h_1}) = \bot$ and $g(S_{h_2}) = \bot$ or \item $g(S_{h_1}) = (x, \nu) \neq \bot$, $g(S_{h_2}) = (x', \nu') \neq \bot$, and $\nu = \nu'$ \\ \end{itemize} \item $C_i \in \{ max (f(S)), \; min (f(S)), max_\mu (g(S)), \\ min_\mu (g(S)), max_x (g(S)), min_x (g(S)), max_{x \mu} (g(S)), \\ min_{x \mu} (g(S)) \}$ implies $h_1 =_i h_2$ iff $h_1 \models C_i$ and $h_2 \models C_i$. \\ \item $C_i = C_{i_1} \wedge C_{i_2}$ implies $h_1 =_{i} h_2$ iff \[ \forall \: t \in \{{i_1}, {i_2}\}, \; h_1 =_{t} h_2 \] \item $C_i = C_{i_1} \vee C_{i_2}$ implies $h_1 =_{i} h_2$ iff \[ |\{h_1 \succeq_{t} h_2 \: | \: \forall \: t \in \{{i_1}, {i_2}\} \}| = | \{ h_2 \succeq_{t} h_1 \: | \: \forall \: t \in \{{i_1}, {i_2}\} \}|. \] \end{itemize} We say, $h_1$ is at least as preferred as $h_2$ w.r.t. $C_i$, denoted by $h_1 \succeq_i h_2$, iff $h_1 \succ_i h_2$ or $h_1 =_i h_2$. \label{def:compination} \end{definition} \begin{definition} Let $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$ be a ground probability aggregates probability answer set optimization program, $h_1, h_2$ be two probability answer sets for $R_{gen}$, $r$ be a probability preference rule in $R_{pref}$, and $C_l$ be a boolean combination appearing in $head(r)$. Then, $h_1$ is strictly preferred over $h_2$ w.r.t. $r$, denoted by $h_1 \succ_r h_2$, iff one of the following holds: \begin{itemize} \item $h_1 \models_{i} r$ and $h_2 \models_{j} r$ and $i < j$, \\ where $i = \min \{l \; | \; h_1 \models_l r \}$ and $j = \min \{l \; | \; h_2 \models_l r \}$. \item $h_1 \models_{i} r$ and $h_2 \models_{i} r$ and $h_1 \succ_i h_2$, \\ where $i = \min \{l \; | \; h_1 \models_l r \} = \min \{l \; | \; h_2 \models_l r \}$. \item $h_1 \models_{i} r$ and $h_2 \models_{irr} r$. \end{itemize} We say, $h_1$ and $h_2$ are equally preferred w.r.t. $r$, denoted by $h_1 =_{r} h_2$, iff one of the following holds: \begin{itemize} \item $h_1 \models_{i} r$ and $h_2 \models_{i} r$ and $h_1 =_i h_2$, \\ where $i = \min \{l \; | \; h_1 \models_l r \} = \min \{l \; | \; h_2 \models_l r \}$. \item $h_1 \models_{irr} r$ and $h_2 \models_{irr} r$. \end{itemize} We say, $h_1$ is at least as preferred as $h_2$ w.r.t. $r$, denoted by $h_1 \succeq_{r} h_2$, iff $h_1 \succ_{r} h_2$ or $h_1 =_{r} h_2$. \label{def:pref-rule} \end{definition} The previous two definitions characterize how probability answer sets are ranked with respect to a boolean combination and with respect to a probability preference rule. Definition (\ref{def:compination}) presents the ranking of probability answer sets with respect to a boolean combination. But, Definition (\ref{def:pref-rule}) presents the ranking of probability answer sets with respect to a probability preference rule. The following definitions specify the ranking of probability answer sets according to a set of probability preference rules. \begin{definition} [Pareto Preference] Let $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$ be a probability aggregates answer set optimization program and $h_1, h_2$ be probability answer sets of $R_{gen}$. Then, $h_1$ is (Pareto) preferred over $h_2$ w.r.t. $R_{pref}$, denoted by $h_1 \succ_{R_{pref}} h_2$, iff there exists at least one probability preference rule $r \in R_{pref}$ such that $h_1 \succ_{r} h_2$ and for every other rule $r' \in R_{pref}$, $h_1 \succeq_{r'} h_2$. We say, $h_1$ and $h_2$ are equally (Pareto) preferred w.r.t. $R_{pref}$, denoted by $h_1 =_{R_{pref}} h_2$, iff for all $r \in R_{pref}$, $h_1 =_{r} h_2$. \end{definition} \begin{definition} [Maximal Preference] Let $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$ be a probability aggregates probability answer set optimization program and $h_1, h_2$ be probability answer sets of $R_{gen}$. Then, $h_1$ is (Maximal) preferred over $h_2$ w.r.t. $R_{pref}$, denoted by $h_1 \succ_{R_{pref}} h_2$, iff \[ |\{r \in R_{pref} | h_1 \succeq_{r} h_2\}| > |\{r \in R_{pref} | h_2 \succeq_{r} h_1\}|. \] We say, $h_1$ and $h_2$ are equally (Maximal) preferred w.r.t. $R_{pref}$, denoted by $h_1 =_{R_{pref}} h_2$, iff \[ |\{r \in R_{pref} | h_1 \succeq_{r} h_2\}| = | \{r \in R_{pref} | h_2 \succeq_{r} h_1\}|. \] \end{definition} It is worth noting that the Maximal preference definition is more {\em general} than the Pareto preference definition, since the Maximal preference relation {\em subsumes} the Pareto preference relation. \begin{example} The generator rules, $R_{gen}$, of the probability aggregates probability answer set program, $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$, that represents the two stages stochastic optimization with recourse problem presented in Example (\ref{ex:finance-code}), has $75$ probability answer sets, where the probability answer sets with the least total expected cost are: \[ \begin{array}{lcl} I_1 = \{ objective(500,50,200,1330), \ldots \} \\ I_2 = \{ objective(650,0,50,1360), \ldots \} \\ I_3 = \{ objective(550,0,150,1280), \ldots \} \\ I_4 = \{ objective(550,0,200,1340), \ldots \} \\ I_5 = \{ objective(600,0,100,1320), \ldots \} \\ I_6 = \{ objective(600,0,150,1380), \ldots \} \\ I_7 = \{ objective(500,0,200,1240), \ldots \} \end{array} \] The ground instantiation of the probability preference rule in $R_{pref}$ consists of one ground probability preference rule, denoted by $r$, which is \[ \begin{array}{l} min_x \{ \\ \qquad \langle 1330 : 1 \; | \; objective(500,50,200,1330) \; \rangle, \\ \qquad \langle 1360 : 1 \; | \; objective(650,0,50,1360) \; \rangle, \\ \qquad \langle 1280 : 1 \; | \; objective(550,0,150,1280) \; \rangle, \\ \qquad \langle 1240 : 1 \; | \; objective(500,0,200,1240) \; \rangle, \\ \qquad \langle 1340 : 1 \; | \; objective(550,0,200,1340) \; \rangle, \\ \qquad \langle 1320 : 1 \; | \; objective(600,0,100,1320) \; \rangle, \\ \qquad \langle 1380 : 1 \; | \; objective(600,0,150,1380) \; \rangle, \\ \ldots \} \end{array} \] Therefore, it can be easily verified that $I_7 \models_1 r$ and \[ \begin{array}{l} I_1 \models_{irr} r, I_2 \models_{irr} r, I_3 \models_{irr} r, I_4 \models_{irr} r, I_5 \models_{irr} r, I_6 \models_{irr} r \end{array} \] This implies that $I_7$ is the top (Pareto and Maximal) preferred probability answer set and represents the optimal solution for the two stages stochastic optimization with recourse problem described in Example (\ref{ex:finance}). The probability answer set $I_7$ assigns $500$ to $x$, $0$ to $y_1$, and $200$ to $y_2$ with total expected cost $\$1240$, which coincides with the optimal solution of the problem as described in Example (\ref{ex:finance}). \label{ex:finance-sol} \end{example} \section{Properties} In this section, we show that the probability aggregates probability answer set optimization programs syntax and semantics naturally subsume and generalize the syntax and semantics of classical aggregates classical answer set optimization programs \cite{Saad_ASOG} as well as naturally subsume and generalize the syntax and semantics of classical answer set optimization programs \cite{ASO} under the Pareto preference relation, since there is no notion of Maximal preference relation has been defined for the classical answer set optimization programs. A classical aggregates classical answer set optimization program, $\Pi^c$, consists of two separate classical logic programs; a classical answer set program, $R^c_{gen}$, and a classical preference program, $R^c_{pref}$ \cite{Saad_ASOG}. The first classical logic program, $R^c_{gen}$, is used to generate the classical answer sets. The second classical logic program, $R^c_{pref}$, defines classical context-dependant preferences that are used to form a preference ordering among the classical answer sets of $R^c_{gen}$. \\ \\ Any classical aggregates classical answer set optimization program, $\Pi^c = R^c_{gen} \cup R^c_{pref}$, can be represented as a probability aggregates probability answer set optimization program, $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$, where all probability annotations appearing in every probability logic rule in $R_{gen}$ and all probability annotations appearing in every probability preference rule in $R_{pref}$ is equal to $[1,1]$, which means the truth value {\em true}, and $\tau$ is any arbitrary mapping $\tau: {\cal B_L} \rightarrow S_{disj}$. For example, for a classical aggregates classical answer set optimization program, $\Pi^c = R^c_{gen} \cup R^c_{pref}$, that is represented by the probability aggregates probability answer set optimization program, $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$, the classical logic rule \begin{eqnarray*} a_1 \; \vee \ldots \vee \; a_k \leftarrow a_{k+1}, \ldots, a_m, not\; a_{m+1}, \ldots, not\;a_{n} \end{eqnarray*} is in $R^c_{gen}$, where $\forall (1 \leq i \leq n)$, $a_i$ is an atom, iff \[ \begin{array}{r} a_1:[1,1] \; \vee \ldots \vee \; a_k:[1,1] \leftarrow a_{k+1}:[1,1], \ldots, a_m:[1,1], \\ not\; a_{m+1}:[1,1], \ldots, not\;a_{n}:[1,1] \end{array} \] is in $R_{gen}$. It is worth noting that the syntax and semantics of this class of probability answer set programs are the same as the syntax and semantics of the classical answer set programs \cite{Saad_DHPP,Saad_EHPP}. In addition, the classical preference rule \begin{eqnarray} C_1 \succ C_2 \succ \ldots \succ C_k \leftarrow l_{k+1},\ldots, l_m, not\; l_{m+1},\ldots, not\;l_{n} \label{rule:classical-pref} \end{eqnarray} is in $R^c_{pref}$, where $l_{k+1}, \ldots, l_{n}$ are literals and classical aggregate atoms and $C_1, C_2, \ldots, C_k$ are boolean combinations over a set of literals, classical aggregate atoms, and classical optimization aggregates iff \begin{eqnarray} C_1 \succ C_2 \succ \ldots \succ C_k \leftarrow l_{k+1}:[1,1],\ldots, l_m:[1,1], \notag \\ not\; l_{m+1}:[1,1],\ldots, not\;l_{n}:[1,1] \label{rule:classical-fuzzy-pref} \end{eqnarray} is in $R_{pref}$, where $C_1, C_2, \ldots, C_k$ and $l_{k+1},\ldots, l_n$ in (\ref{rule:classical-fuzzy-pref}) are exactly the same as $C_1, C_2, \ldots, C_k$ and $l_{k+1},\ldots, l_n$ in (\ref{rule:classical-pref}) except that each classical aggregate appearing within a classical aggregate atom or a classical optimization aggregate in (\ref{rule:classical-fuzzy-pref}) involves a conjunction of literals each of which is associated with the probability annotation $[1,1]$, where $[1,1]$ represents the truth value \emph{true}. In addition, any classical answer set optimization program is represented as a probability aggregates probability answer set optimization program by the same way as for classical aggregates classical answer set optimization programs except that classical answer set optimization programs disallows classical aggregate atoms and classical optimization aggregates. The following theorem shows that the syntax and semantics of probability aggregates probability answer set optimization programs subsume the syntax and semantics of the classical aggregates classical answer set optimization programs \cite{Saad_ASOG}. \begin{theorem} Let $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$ be the probability aggregates probability answer set optimization program equivalent to a classical aggregates classical answer set optimization program, $\Pi^c = R^c_{gen} \cup R^c_{pref}$. Then, the preference ordering of the probability answer sets of $R_{gen}$ w.r.t. $R_{pref}$ coincides with the preference ordering of the classical answer sets of $R^c_{gen}$ w.r.t. $R^c_{pref}$ under both Maximal and Pareto preference relations. \label{thm:theorem} \end{theorem} Assuming that \cite{ASO} assigns the lowest rank to the classical answer sets that do not satisfy either the body of a classical preference rule or the body of a classical preference and any of the boolean combinations appearing in the head of the classical preference rule, the following theorems show that the syntax and semantics of the probability aggregates probability answer set optimization programs subsume the syntax and semantics of the classical answer set optimization programs \cite{ASO}. \begin{theorem} Let $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$ be the probability aggregates probability answer set optimization program equivalent to a classical answer set optimization program, $\Pi^c = R^c_{gen} \cup R^c_{pref}$. Then, the preference ordering of the probability answer sets of $R_{gen}$ w.r.t. $R_{pref}$ coincides with the preference ordering of the classical answer sets of $R^c_{gen}$ w.r.t. $R^c_{pref}$. \label{thm:1} \end{theorem} \begin{theorem} Let $\Pi = \langle R_{gen} \cup R_{pref}, \tau \rangle$ be a probability aggregates probability answer set optimization program equivalent to a classical answer set optimization program, $\Pi^c = R^c_{gen} \cup R^c_{pref}$. A probability answer set $h$ of $R_{gen}$ is Pareto preferred probability answer set w.r.t. $R_{pref}$ iff a classical answer set $I$ of $R^c_{gen}$, equivalent to $h$, is Pareto preferred classical answer set w.r.t. $R^c_{pref}$. \label{thm:2} \end{theorem} Theorem (\ref{thm:theorem}) shows in general probability aggregates probability answer set optimization programs in addition can be used solely for representing and reasoning about multi objectives classical optimization problems by the classical answer set programming framework under both the Maximal and Pareto preference relations, by simply replacing any probability annotation appearing in a probability aggregates probability answer set optimization program by the constant probability annotation $[1,1]$. Furthermore, Theorem (\ref{thm:1}) shows in general that probability aggregates probability answer set optimization programs in addition can be used solely for representing and reasoning about qualitative preferences under the classical answer set programming framework, under both Maximal and Pareto preference relations, by simply replacing any probability annotation appearing in a probability aggregates probability answer set optimization program by the constant probability annotation $[1,1]$. Theorem (\ref{thm:2}) shows the subsumption result of the classical answer set optimization programs. \section{Conclusions and Related Work} We developed syntax and semantics of probability aggregates probability answer set optimization programs to represent probability preference relations and rank probability answer sets based on minimization or maximization of some specified criteria to allow the ability to represent and reason and solve probability optimization problems. Probability aggregates probability answer set optimization framework modifies and subsumes the classical aggregates classical answer set optimization presented in \cite{Saad_ASOG} as well as the classical answer set optimization introduced in \cite{ASO}. We shown the application of probability aggregates probability answer set optimization to the two stages stochastic optimization with recourse problem. To the best of our knowledge, this development is the first to consider a logical framework for representing and reasoning about optimal preferences in general in a quantitative and/or qualitative preferences in answer set programming frameworks. However, qualitative preferences were introduced in classical answer set programming in various forms. In \cite{Schaub-Comp}, preferences are defined among the rules of the logic program, whereas preferences among the literals described by the logic programs are introduced in \cite{Sakama}. Answer set optimization (ASO) \cite{ASO} and logic programs with ordered disjunctions (LPOD) \cite{LPOD} are two answer set programming based preference handling approaches, where context-dependant preferences are defined among the literals specified by the logic programs. Application-dependant preference handling approaches for planning were presented in \cite{Son-Pref,Schaub-Pref07}, where preferences among actions, states, and trajectories are defined, which are based on temporal logic. The major difference between \cite{Son-Pref,Schaub-Pref07} and \cite{ASO,LPOD} is that the former are specifically developed for planning, but the latter are application-independent. Contrary to the existing approaches for reasoning about preferences in answer set programming, where preference relations are specified among rules and literals in one program, an ASO program consists of two separate programs; an answer set program, $P_{gen}$, and a preference program, $P_{pref}$ \cite{ASO}. The first program, $P_{gen}$, is used to generate the answer sets, the range of possible solutions. The second program, $P_{pref}$, defines context-dependant preferences that are used to form a preference order among the answer sets of $P_{gen}$. Probability aggregates probability answer set optimization programs distinguish between probability answer set generation, by $R_{gen}$, and probability preference based probability answer set evaluation, by $R_{pref}$, which has several advantages. In particular, $R_{pref}$ can be specified independently from the type of $R_{gen}$, which makes preference elicitation easier and the whole approach more intuitive and easy to use in practice. In addition, more expressive forms of probability preferences can be represented in probability aggregates probability answer set optimization programs, since they allow several forms of boolean combinations in the heads of preference rules. In \cite{Saad_ASOG}, classical answer set optimization programs have been extended to allow aggregate preferences. This is to allow answer set optimization programs capable of encoding general optimization problems and intuitive encoding of Nash equilibrium strategic games. The classical answer set optimization programs with aggregate preference are built on top of classical answer set optimization \cite{ASO} and aggregates in classical answer set programming \cite{Recur-aggr}. It has been shown in \cite{Saad_ASOG} that the classical answer set optimization programs with aggregate preferences subsumes the classical answer set optimization programs described in \cite{ASO}. \bibliographystyle{named}
1,108,101,565,901
arxiv
\section{Introduction} The main challenge in Speech Emotion Recognition (SER) is to learn utterance-level representations from frame-level features, which starts with learning expressive frame-level representations. Previous works \citep{zadeh2017tensor, zadeh2018memory, zhao2019attention} mainly used LSTM \citep{hochreiter1997long} or GRU \citep{chung2014empirical}, a variant of RNN, to handle this challenge. However, RNN-based models can only consider left-to-right or right-to-left information. Even with bidirectional RNN that is learned for each direction and concatenated, both directions' information cannot be considered simultaneously. RNN architecture also propagates irrelevant information continuously, even if there is noise. Therefore, surrounding frame information cannot be learned properly, and frame-level representations are perturbated easily. To overcome these issues, we propose using a GNN architecture, as it can relate information in various parts simultaneously. Furthermore, GNN can also optimize the model size due to parameter sharing, which extends SER’s applications that require limited memory space, such as on-device speech recognition. There are many multimodal \citep{aguilar2019multimodal, priyasad2020attention} or multi-task \citep{li2019improved, latif2020multi} approaches in SER, but we focus on unimodal and single task for in this study to demonstrate the effectiveness of GNN for SER. The utterance can be represented as a noisy graph structure in which voice frame and vacuum (with no voice) frame coexist. Therefore, it is important to ensure that useful (with-voice) frames are not perturbed by irrelevant (no-voice) frames. An ideal graph structure can help with it. If the input graphs are ideal, irrelevant information can be filtered out through the message passing process. To construct an ideal graph structure, we propose to use a cosine similarity metric. The superiority of the cosine similarity metric in graph construction has already been demonstrated in other applications \citep{chen2020iterative}. We adopt it in SER as we believe that graph structures constructed through the cosine similarity metric are more robust to perturbation by pruning out neighbors that have irrelevant information. Furthermore, we design our GNN architecture using a message passing framework of the Graph Convolutional Network (GCN) \citep{kipf2016semi} with the cosine similarity-based graph structure. We also construct additional modules in our architecture, such as an acoustic pre-processing layer and a skip connection. The proposed GNN architecture better captures neighbors' information and enables expressive frame-level representation learning. \begin{figure*}[th] \centering \includegraphics[width=1\linewidth]{fig2} \caption{Top: Temporal graph structure and cosine similarity-based graph structure for the utterance sample. Bottom: t-SNE \citep{van2008visualizing} visualization of the representations for each method.} \end{figure*} Figure 1 gives an overview of SER representations with graphs. Figures 1 (b) and 1 (c) show a temporal graph structure and a cosine similarity-based graph structure for the utterance sample in Figure 1 (a). The red node is a vacuum (no-voice) node, the green node is a partial-voice (a part of the frame is voice) node, and the blue node is a full-voice (the entire frame contains voice) node, using 50ms frame window size and 25ms frame intervals. We compare the representations learned with Bidirectional GRU (BiGRU) and the GCN's representations learned from each graph structure. In Figure 1 (d), nodes 3 and 4, which should be used in prediction, are far from each other. This means that the representations for the prediction are not properly learned. In Figure 1 (e), nodes 3 and 4 are mapped to a nearby latent space, but nodes 1, 2, and 3 are also mapped closely due to the perturbation. In Figure 1 (f), the nodes with similar characteristics are mapped closely in the latent space, which means that our proposed method can be robust to perturbation. Our contributions are summarized as follows: \begin{itemize} \item We propose a cosine similarity-based graph structure as an ideal graph structure for SER. \item We present a Cosine similarity-based Graph Convolutional Network (CoGCN), as a GCN variant for SER. \item Experimental results show that our method outperforms state-of-the-art methods or provides competitive results with a significant model size reduction with only 1/30 parameters. \end{itemize} \section{Related Work} GNN learns node representations from neighbors through message passing and aggregation. \citep{kipf2016semi} proposed Graph Convolutional Network (GCN), inspired by the first-order graph Laplacian methods. \citep{hamilton2017inductive} proposed GraphSAGE (SAmple and aggreGatE) sampling a fixed number of neighbors to keep the computational complexity consistent. \citep{velivckovic2017graph} proposed Graph Attention Network (GAT) to allocate different weights to neighbors. \citep{xu2018powerful} developed Graph Isomorphism Network (GIN) that is probably the most expressive among GNN varients. Generally, GIN's message passing method learns expressive representation, but it does not work well for SER because the sum aggregation over the multiset including noises can disturb the representation learning. We design our GNN architecture using GCN's message passing method since we believe that it aggregates abundant distribution information in SER that values statistical information. Furthermore, we construct additional modules along the design space guidelines for well-performing GNN of \citep{you2020design}. To learn the utterance-level representations from the frame-level features, \citep{latif2019direct, peng2020speech} used the RNN based-model. CLDNN \citep{latif2019direct} used a combination of CNN and LSTM to complement each architecture's shortcomings. ASRNN \citep{peng2020speech} used the BiLSTM and attention mechanism to strengthen the time step importance. Recently, CA-GRU \citep{su2020improving} introduced the GNN architecture in SER, where frame-level representations encoded in BiGRU are used as node features of GNN architecture. However, frame-level representations trained in this architecture can be easily perturbed and require many parameters. To make SER more robust to perturbation, we use a Fully Connected (FC) layer as a pre-processing layer instead of GRU. We also use a cosine similarity metric that does not require many additional parameters. \section{Approach} We first describe notations used for describing our method. Then we present the graph structure constructed through the cosine similarity metric. Finally, we present Cosine similarity-based Graph Convolutional Network (CoGCN), a GCN variant. \subsection{Notations} We begin by summarizing the notations used in the GNN architecture. Let $G=(V, E, X)$ denote a graph, where $V$ is a vertex set, $E$ is an edge set, $X \in \mathbb{R}^{n \times d}$ is a feature matrix, and $n$ and $d$ are the number of vertices and the dimension of the feature vector, respectively. $A \in \mathbb{R}^{n \times n}$ is an adjacency matrix and $\mathcal{N}(i)$ is a set of neighbors of node $i$. Therefore, given a set of graphs $\{G_1, ..., G_N\}$ and their labels $\{y_1, ..., y_N\}$, we aim to learn a graph representation vector $h_G$ to predict the label of the entire graph. \subsection{Graph Structure} Since the utterance is sequential data, it has a basic temporal graph structure where the center node has edges on both sides. The temporal graph structure is not ideal because it is difficult to capture long-term dependencies and is easily perturbed by irrelevant neighbors. In this paper, we propose a graph structure constructed with the cosine similarity metric as an ideal graph structure for SER. Cosine similarity-based graph structure can capture long-term dependencies and prevent perturbation from irrelevant neighbors. The process of generating the cosine similarity-based graph is as follows: $$ s_{ij}=\frac{x_i^Tx_j}{||x_i||\times||x_j||}, \eqno{(1)} $$ $$ a_{ij}=\begin{cases}1, & \mathrm{if} \; s_{ij} \geq \gamma \\ 0, & \mathrm{otherwise,} \end{cases} \eqno{(2)} $$ where $x_i \in \mathbb{R}^{d}$ is the $i$-th row of the feature matrix $X$, $\gamma$ is threshold hyperparameter, $s_{ij}$ is the cosine similarity between node $i$ and node $j$, and $a_{ij}$ is the corresponding element of the adjacency matrix $A$. \subsection{Model Architecture} \begin{figure}[t] \centering \includegraphics[width=1\linewidth]{fig1} \caption{Model architecture.} \end{figure} There are numerous GNN variants based on the message passing method used. GIN's message passing method learns expressive representation in many tasks but does not work well for SER because the sum aggregation over the multiset including noises can disturb the representation learning. Instead, we select the GCN's message passing method that best matches our task since we believe that it aggregates abundant distribution information in SER that values statistical information. \citep{you2020design} provided comprehensive guidelines about design spaces \{Batch Normalization, Dropout, Activation, Aggregation, Layer connectivity, Pre-processing Layers, Message passing Layers, Post-processing Layers\} for designing a well-performing GNN. We performed experiments with possible design spaces along the guidelines and found that one FC pre-processing layer and skip connection helped the most in improving the performance. Therefore, the node representation is calculated as follows: $$ h_i^{(0)}=\mathrm{ReLU}(W_px_i+b_p), \eqno{(3)} $$ $$ h_i^{\prime^{(k+1)}}=\mathrm{ReLU}(W_e^{(k)}\sum_{j}\frac{1}{\sqrt{\hat{d_i}\hat{d_j}}}h_j^{(k)}), \eqno{(4)} $$ $$ h_i^{(k+1)}=h_i^{\prime^{(k+1)}}+h_i^{(k)}, \eqno{(5)} $$ where $\hat{d_i}=1+\sum_{j \in \mathcal{N}(i)}a_{ij}$, $W_p \in \mathbb{R}^{z \times d}$ and $W_e^{(k)} \in \mathbb{R}^{z \times z}$ are the learnable weight matrices, and $b_p \in \mathbb{\mathbb{R}}^{z}$ is the learnable bias vector. $h_i^{(k)}$ is the node representation of the $k$-th layer of node $i$ for $k=1,...,K$, and $z$ is the number of hidden units. Node representation cannot be used directly for the graph classification task. Therefore, given the final iteration node representations, we use the readout function to produce a graph representation. Finally, the graph label is predicted through the FC layer followed by softmax activation that takes the graph representation as the input. $$ h_G=\mathrm{READOUT}({h_v^{(K)}\mid v \in G}), \eqno{(6)} $$ $$ \hat{y}_G=\mathrm{softmax}(W_oh_G+b_o), \eqno{(7)} $$ where $W_o \in \mathbb{R}^{C \times z}$ is the learnable weight matrix, $b_o \in \mathbb{R}^{C}$ is the learnable bias vector, and $C$ is the number of classes. $\mathrm{READOUT}$ can be a simple permutation-invariant function, such as summation or mean, or max-pooling. We believe that the graph's statistical and distributional information is important in this study, so we use the mean-pooling as the readout function, following \citep{xu2018powerful}'s suggestion. The overall architecture is illustrated in Figure 2. \section{Experiments} \subsection{Acoustic Features} Frame-level features are extracted from raw waveforms using the openSMILE \citep{eyben2010opensmile} speech toolkit with 25ms frame window size and 10ms frame intervals. We use the extended Geneva Minimalistic Acoustic Parameter Set (eGeMAPS) introduced by \citep{eyben2015geneva} to extract frame-level features with a total 88-dimension. \subsection{Dataset} An IEMOCAP \citep{busso2008iemocap} consists of 5 sessions, and each session includes 2 actors (1 male and 1 female). It consists of 9 emotions, but in this paper, following the previous studies, data with only 4 emotions \{neutrality, happiness (including excited), sadness, and anger\} is used, which is 5531 utterances. \subsection{Experiments Setup} \begin{table}[t] \centering \begin{tabular}{cccc} \textbf{Method} & \textbf{\# parameter} & \textbf{WA} & \textbf{UA}\\ \hline BiGRU & 464K & 59.76 & 59.71 \\ \hline TGCN & 56K & 61.52 & 62.43 \\ w/o skip & 56K & 60.97 & 61.77 \\ w/o skip, pre & 45K & 60.61 & 61.50 \\ \hline CoGCN & 56K & \textbf{62.64} & \textbf{63.67} \\ w/o skip & 56K & 61.35 & 62.56 \\ w/o skip, pre & 45K & 61.14 & 62.34 \\ \hline \end{tabular} \caption{\label{citation-guide} Comparison of different combinations. } \end{table} Each method has $K \in$ \{2, 3, 4\} layers with 128 hidden units, and dropout is applied with p = 0.1 after the readout function. $K$ is selected through the validation set results. We train all the models for a maximum of 50 epochs with a batch size of 32 using the Adam optimizer \citep{kingma2014adam} with a learning rate of 1e-3. We search the threshold parameter $\gamma$ in \{0.5, 0.55, 0.6\}. Finally, following the state-of-the-art methods' experimental settings being compared, we perform leave-one-person-out cross-validation and report the average of Weighted Accuracy (WA) and Unweighted Accuracy (UA). \subsection{Result Analysis} \begin{figure}[t] \centering \includegraphics[width=1\linewidth]{fig3} \caption{Training set and validation set learning curve of BiGRU, TGCN, and CoGCN.} \end{figure} \begin{table}[t] \small \centering \begin{tabular}{cccc} \textbf{Method} & \textbf{\# parameter} & \textbf{WA} & \textbf{UA}\\ \hline CLDNN \citep{latif2019direct} & 250K & - & 60.23 \\ ASRNN \citep{peng2020speech} & 6M & - & 62.60 \\ CA-GRU \citep{su2020improving} & 1.6M & 62.27 & \textbf{63.80} \\ \hline CoGCN (Ours) & \textbf{56K} & \textbf{62.64} & 63.67 \\ \hline \end{tabular} \caption{\label{citation-guide} Summary of results in terms of WA and UA. } \end{table} To demonstrate the effect of our proposed approach, we compare CoGCN with BiGRU and Temporal graph based GCN (TGCN) learned under the same conditions. Table 1 shows the study results with BiGRU, TGCN, and CoGCN. We can see the performance improvement with GNN over RNN (BiGRU). We can also see the benefit of cosine similarity-based graph structure when compared with temporal graph structure. When skip connection is removed from the GNN architectures, the performance drops in general. Besides, when the pre-processing layer is removed from GNN architectures without skip connection, we see an additional performance drop. In Figure 3, each method's learning curve with the training set and the validation set supports the findings above. Intuitively, BiGRU is relatively underfitting, and CoGCN learns more expressive node representations than TGCN. Table 2 shows a comparison with the state-of-the-art methods. The dash symbol denotes that reported results do not exist. Our method outperforms CLDNN, ASRNN and achieves a competitive performance when compared to CA-GRU. It is noteworthy that our approach significantly reduces the model size and the number of parameters is only 1/30 of CA-GRU. Such results can help applications that require limited memory space, such as on-device speech recognition. \section{Conclusions} In this paper, we propose a cosine similarity-based graph structure as an ideal graph structure for SER, and present the CoGCN, as a GCN variant for SER. Finally, we show that our method outperforms state-of-the-art methods or provides competitive results with a significant model size reduction with only 1/30 parameters. \section{Acknowledgement} This research was supported by the MSIT(Ministry of Science and ICT), Korea, under the ITRC(Information Technology Research Center) support program(IITP-2021-2020-0-01789) supervised by the IITP(Institute for Information \& Communications Technology Planning \& Evaluation).
1,108,101,565,902
arxiv
\subsubsection*{\bibname}} \usepackage[dvipsnames]{xcolor} \usepackage{tikz} \usetikzlibrary{positioning,fit,calc, decorations, arrows, shapes, shapes.geometric} \usetikzlibrary{cd} \tikzset{AmpRep/.style={ampersand replacement=\&}} \tikzset{center base/.style={baseline={([yshift=-.8ex]current bounding box.center)}}} \tikzset{paperfig/.style={center base,scale=0.9, every node/.style={transform shape}}} \tikzset{dpadded/.style={rounded corners=2, inner sep=0.7em, draw, outer sep=0.3em, fill={black!50}, fill opacity=0.08, text opacity=1}} \tikzset{dpad0/.style={outer sep=0.05em, inner sep=0.3em, draw=gray!75, rounded corners=4, fill=black!08, fill opacity=1, align=center}} \tikzset{dpadinline/.style={outer sep=0.05em, inner sep=2.5pt, rounded corners=2.5pt, draw=gray!75, fill=black!08, fill opacity=1, align=center, font=\small}} \tikzset{dpad/.style args={#1}{every matrix/.append style={nodes={dpadded, #1}}}} \tikzset{light pad/.style={outer sep=0.2em, inner sep=0.5em, draw=gray!50}} \tikzset{arr/.style={draw, ->, thick, shorten <=3pt, shorten >=3pt}} \tikzset{arr0/.style={draw, ->, thick, shorten <=0pt, shorten >=0pt}} \tikzset{arr1/.style={draw, ->, thick, shorten <=1pt, shorten >=1pt}} \tikzset{arr2/.style={draw, ->, thick, shorten <=2pt, shorten >=2pt}} \newcommand\cmergearr[5][]{ \draw[arr, #1, -] (#2) -- (#5) -- (#3); \draw[arr, #1, shorten <=0] (#5) -- (#4); } \newcommand\mergearr[4][]{ \coordinate (center-#2#3#4) at (barycentric cs:#2=1,#3=1,#4=1.2); \cmergearr[#1]{#2}{#3}{#4}{center-#2#3#4} } \newcommand\cunmergearr[5][]{ \draw[arr, #1, -, shorten >=0] (#2) -- (#5); \draw[arr, #1, shorten <=0] (#5) -- (#3); \draw[arr, #1, shorten <=0] (#5) -- (#4); } \newcommand\unmergearr[4][]{ \coordinate (center-#2#3#4) at (barycentric cs:#2=1.2,#3=1,#4=1); \cunmergearr[#1]{#2}{#3}{#4}{center-#2#3#4} } \newcommand\lab[1]{(#1)(lab-#1)} \tikzset{alternative/.style args={#1|#2|#3}{name=#1, circle, fill, inner sep=1pt,label={[name={lab-#1},gray!30!black, inner sep=1pt]#3:\scriptsize #2}} } \tikzset{tpt/.style args={#1|#2}{alternative={#1|#2|below}} } \tikzset{Dom/.style args={#1[#2] (#3) around #4}{dpadded, name=#3, label={[name={lab-#3},align=center,label distance=-1.9em, shading = axis, top color=white, bottom color=black!04, #2]120:#1}, fit={ #4 }, inner sep=0.5em}} \relax \usepackage{mathtools} \usepackage{amssymb} \DeclareMathSymbol{\shortminus}{\mathbin}{AMSa}{"39} \usepackage{bbm} \usepackage{lmodern} \usepackage{faktor} \usepackage{graphicx} \usepackage{scalerel} \usepackage{enumitem} \usepackage{nicefrac}\let\nf\nicefrac \usepackage{color} \usepackage{hyperref} \hypersetup{colorlinks=true, linkcolor=blue!75!black, urlcolor=magenta, citecolor=green!50!black} \usepackage{amsthm,thmtools} \usepackage[noabbrev,nameinlink,capitalize]{cleveref} \theoremstyle{plain} \newtheorem{theorem}{Theorem} \declaretheorem[name=Corollary,parent=theorem]{coro} \declaretheorem[name=Proposition,sibling=theorem,postheadhook={% }]{prop} \declaretheorem[name=Lemma,sibling=theorem]{lemma} % \newtheorem{claim}{Claim} \newtheorem{remark}{Remark} \theoremstyle{definition} \declaretheorem[name=Definition, qed=$\square$]{defn} \crefname{defn}{Definition}{Definitions} \crefname{prop}{Proposition}{Propositions} \relax \let\Horig\H \let\H\relax \DeclareMathOperator{\H}{\mathrm{H}} \DeclareMathOperator{\I}{\mathrm{I}} \DeclareMathOperator*{\Ex}{\mathbb{E}} \DeclareMathOperator*{\EX}{\scalebox{1.5}{$\mathbb{E}$}} \newcommand{\mat}[1]{\mathbf{#1}} \DeclarePairedDelimiterX{\infdivx}[2]{(}{)}{% #1\;\delimsize\|\;#2% } \newcommand{I\mkern-8muD}{I\mkern-8muD} \newcommand{\thickD\infdivx}{I\mkern-8muD\infdivx} \newcommand{\rightarrow\mathrel{\mspace{-15mu}}\rightarrow}{\rightarrow\mathrel{\mspace{-15mu}}\rightarrow} \newcommand{\datadist}[1]{\Pr\nolimits_{#1}} \makeatletter \newcommand{\subalign}[1]{% \vcenter{% \Let@ \restore@math@cr \default@tag \baselineskip\fontdimen10 \scriptfont\tw@ \advance\baselineskip\fontdimen12 \scriptfont\tw@ \lineskip\thr@@\fontdimen8 \scriptfont\thr@@ \lineskiplimit\lineskip \ialign{\hfil$\m@th\scriptstyle##$&$\m@th\scriptstyle{}##$\hfil\crcr #1\crcr }% }% } \makeatother \newcommand\numberthis{\addtocounter{equation}{1}\tag{\theequation}} \relax \newcommand{\ssub}[1]{_{\!_{#1}\!}} \newcommand{\bp}[1][L]{\mat{p}\ssub{#1}} \newcommand{\bP}[1][L]{\mat{P}\ssub{#1}} \newcommand{\mathcal V}{\mathcal V} \newcommand{\mathcal N}{\mathcal N} \newcommand{\mathcal E}{\mathcal E} \newcommand{\boldsymbol\alpha}{\boldsymbol\alpha} \newcommand{\boldsymbol\beta}{\boldsymbol\beta} \DeclareMathAlphabet{\mathdcal}{U}{dutchcal}{m}{n} \DeclareMathAlphabet{\mathbdcal}{U}{dutchcal}{b}{n} \newcommand{\dg}[1]{\mathbdcal{#1}} \newcommand{\PDGof}[1]{{\dg M}_{#1}} \newcommand{\UPDGof}[1]{{\dg N}_{#1}} \newcommand\VFE{\mathit{V\mkern-4mu F\mkern-4.5mu E}} \newcommand\Inc{\mathit{Inc}} \newcommand{\IDef}[1]{\mathit{IDef}_{\!#1}} \newcommand{\ed}[3]{#2% \overset{\smash{\mskip-5mu\raisebox{-1pt}{$\scriptscriptstyle #1$}}}{\rightarrow} #3} \newcommand{\nhphantom}[2]{\sbox0{\kern-2% \nulldelimiterspace$\left.\delimsize#1\vphantom{#2}\right.$}\hspace{-.97\wd0}} \makeatletter \newsavebox{\abcmycontentbox} \newcommand\DeclareDoubleDelim[5]{ \DeclarePairedDelimiterXPP{#1}[1]% \sbox{\abcmycontentbox}{\ensuremath{##1}}% }{#2}{#5}{}% {% \nhphantom{#3}{\usebox\abcmycontentbox}% \hspace{1.2pt} \delimsize#3% \mathopen{}\usebox{\abcmycontentbox}\mathclose{}% \delimsize#4\hspace{1.2pt}% \nhphantom{#4}{\usebox\abcmycontentbox}% }% } \makeatother \DeclareDoubleDelim \SD\{\{\}\} \DeclareDoubleDelim \bbr[[]] \makeatletter \newsavebox{\aar@content} \newcommand\aar{\@ifstar\aar@one@star\aar@plain} \newcommand\aar@one@star{\@ifstar\aar@resize{\aar@plain*}} \newcommand\aar@resize[1]{\sbox{\aar@content}{#1}\scaleleftright[3.8ex] {\Biggl\langle\!\!\!\!\Biggl\langle}{\usebox{\aar@content}} {\Biggr\rangle\!\!\!\!\Biggr\rangle}} \DeclareDoubleDelim \aar@plain\langle\langle\rangle\rangle \makeatother \relax \usepackage{xpatch} \makeatletter \xpatchcmd{\thmt@restatable {\csname #2\@xa\endcsname\ifx\@nx#1\@nx\else[{#1}]\fi {\ifthmt@thisistheone% \csname #2\@xa\endcsname\ifx\@nx#1\@nx\else[{#1}]\f \else\fi% } {}{\typeout{FIRST PATCH TO THM RESTATE FAILED}} \xpatchcmd{\thmt@restatable {\csname end#2\endcsname} {\ifthmt@thisistheone\csname end#2\endcsname\else\fi} {}{\typeout{FAILED SECOND THMT RESTATE PATCH}} \newcommand{\recall}[1]{\medskip\par\noindent{\bf \Cref{thmt@@#1}.} \begingroup\em \noindent \expandafter\csname#1\endcsname* \endgroup\par\smallskip} \setlength\marginparwidth{1.55cm} % \let\oldmarginpar\marginpar \renewcommand{\marginpar}[1]{% \leavevmode% \oldmarginpar{#1}% \ignorespacesafterend\ignorespaces} \newsavebox\marginprooflinkbox \newenvironment{linked}[3][]{% \def\linkedproof{#3}% \def\linkedtype{#2}% \ifmarginprooflinks% \sbox\marginprooflinkbox{% \centering% \hyperref[proof:\linkedproof]{% \color{blue!30!white}% \scaleleftright{$\Big[$}{\,\mbox{\footnotesize\centering\tt\begin{tabular}{@{}c@{}} link to\\[-0.15em] proof \end{tabular}}\,}{$\Big]$}}~} \fi \restatable[#1]{#2}{#2:#3}\label{#2:#3}% \ifmarginprooflinks\marginpar{\vspace{-1ex}\usebox\marginprooflinkbox}\fi% }% { \sbox\marginprooflinkbox{} \endrestatable% } \makeatother \newcounter{proofcntr} \newenvironment{lproof}{\begin{proof}\refstepcounter{proofcntr}}{\end{proof}} \usepackage{cancel} \newcommand{\Cancel}[2][black]{{\color{#1}\cancel{\color{black}#2}}} \usepackage{tcolorbox} \tcbuselibrary{most} \tcolorboxenvironment{lproof}{ enhanced, parbox=false, boxrule=0pt, frame hidden, borderline west={4pt}{0pt}{blue!20!black!40!white}, colback={blue!20!black!05!white}, sharp corners, breakable, } \newcommand{\begthm}[3][]{\begin{#2}[{name=#1},restate=#3,label=#3]} \relax \newcommand{\TODO}[1][INCOMPLETE]{{\centering\Large\color{red}$\langle$~\texttt{#1}~$\rangle$\par}} \newcommand{\dfootnote}[1]{% \let\oldthefootnote=\thefootnote% \setcounter{footnote}{999} \renewcommand{\thefootnote}{\textdagger}% \footnote{#1}% \let\thefootnote=\oldthefootnote% } \newcommand{\dfootnotemark}{ \footnotemark[999] } \begin{document} \runningtitle{Loss as the Inconsistency of a PDG: Choose Your Model, Not Your Loss} \twocolumn[ % \aistatstitle{ Loss as the Inconsistency of a Probabilistic Dependency Graph: \\ Choose Your Model, Not Your Loss Function} \aistatsauthor{ Oliver E. Richardson } \aistatsaddress{ Cornell University } ] \begin{abstract} In a world blessed with a great diversity of loss functions, we argue that that choice between them is not a matter of taste or pragmatics, but of model. Probabilistic depencency graphs (PDGs) are probabilistic models that come equipped with a measure of ``inconsistency''. We prove that many standard loss functions arise as the inconsistency of % a natural PDG describing the appropriate scenario% , and use the same approach to justify a well-known connection between regularizers and priors. % We also show that the PDG inconsistency captures a large class of statistical divergences, and detail benefits of thinking of them in this way, including an intuitive visual language for deriving inequalities between them. In variational inference, we find that the ELBO, a somewhat opaque objective for latent variable models, and variants of it arise for free out of uncontroversial modeling assumptions% ---as do simple graphical proofs of their corresponding bounds. Finally, we observe that inconsistency becomes the log partition function (free energy) in the setting where PDGs are factor graphs. \end{abstract} \section{INTRODUCTION} Many tasks in artificial intelligence have been fruitfully cast as optimization problems, but often the choice of objective is not unique. For instance, a key component of a machine learning system is a loss function which the system must minimize, and a wide variety of losses are used in pratice. Each implicitly represents different values and results in different behavior, so the choice between them can be quite important \parencite{wang2020comprehensive,jadon2020survey}. Yet, because it's unclear how to choose a ``good'' loss function, the choice is usually made by empirics, tradition, and an instinctive calculus acquired through the practice---not by explicitly laying out beliefs. Furthermore, there is something to be gained by fiddling with these loss functions: one can add regularization terms, to (dis)incentivize (un)desirable behavior. But the process of tinkering with the objective until it works is often unsatisfying. It can be a tedious game without clear rules or meaning, while results so obtained are arguably overfitted and difficult to motivate. By contrast, a choice of \emph{model} admits more principled discussion, in part because models are testable; it makes sense to ask if a model is accurate. This observation motivates our proposal: instead of specifying a loss function directly, one articulates a situation that gives rise to it, in the (more interpretable) language of probablistic beliefs and certainties. Concretely, we use the machinery of Probabilistic Dependency Graphs (PDGs), a particularly expressive class of graphical models that can incorporate arbitrary (even inconsistent) probabilistic information in a natural way, and comes equipped with a well-motivated measure of inconsistency \parencite{richardson2020probabilistic}. A primary goal of this paper is to show that PDGs and their associated inconsistency measure can provide a ``universal'' model-based loss function. Towards this end, we show that many standard objective functions---cross entropy, square error, many statistical distances, the ELBO, regularizers, and the log partition function---% arise naturally by measuring the inconsistency of the appropriate underlying PDG. This is somewhat surprising, since PDGs were not designed with the goal of capturing loss functions at all. Specifying a loss function indirectly like this is in some ways more restrictive, but it is also more intuitive (it no technical familiarity with losses, for instance), and admits more grounded defense and criticism. For a particularly powerful demonstration, consider the variational autoencoder (VAE), an enormously successful class of generative model that has enabled breakthroughs in image generation, semantic interpolation, and unsupervised feature learning \parencite{kingma2013autoencoding}. Structurally, a VAE for a space $X$ consists of a (smaller) latent space $Z$, a prior distribution $p(Z)$, a decoder $d(X | Z)$, and an encoder $e(Z| X)$. A VAE is not considered a ``graphical model'' for two reasons. The first is that the encoder $e(Z|X)$ has the same target variable as $p(Z)$, so something like a Bayesian Network cannot simultaneously incorporate them both (besides, they could be inconsistent with one another). The second reason: it is not a VAE's structure, but rather its \emph{loss function} that makes it tick. A VAE is typically trained by maximizing the ``ELBO'', a somewhat difficult-to-motivate function of a sample $x$, originating in variational calculus. We show that $-\mathrm{ELBO}(x)$ is also precisely the inconsistency of a PDG containing $x$ and the probabilistic information of the autoencoder ($p, d$, and $e$). We can form such a PDG precisely because PDGs allow for inconsistency. Thus, PDG semantics simultaneously legitimize the strange structure of the VAE, and also justify its loss function, which can be thought of as a property of the model itself (its inconsistency), rather than some mysterious construction borrowed from physics. Representing objectives as model inconsistencies, in addition to providing a principled way of selecting an objective, also has beneficial pedagogical side effects, because of the \emph{structural} relationships between the underlying models. For instance, these relationships will allow us to derive simple and intuitive visual proofs of technical results, such as the variational inequalitites that traditionally motivate the ELBO, and the monotonicity of R\'enyi divergence. In the coming sections, we show in more detail how this concept of inconsistency, beyond simply providing a permissive and intuitive modeling framework, reduces exactly to many standard objectives used in machine learning and to measures of statistical distance. We demonstrate that this framework clarifies the relationships between them, by providing clear derivations of otherwise opaque inequalities. \section{PRELIMINARIES} We generally use capital letters for variables, and lower case letters for their values. For variables $X$ and $Y$, a conditional probability distribution (cpd) $p$ on $Y$ given $X$, written $p(Y|X)$, consists of a probability distribution on $Y$ (denoted $p(Y| X\!=\!x)$ or $p(Y|\,x)$ for short), for each possible value $x$ of $X$. If $\mu$ is a probability on outcomes that determine $X$ and $Y$, then $\mu(X)$ denotes the marginal of $\mu$ on $X$, and $\mu(Y|X)$ denotes the conditional marginal of $\mu$ on $Y$ given $X$. Depending on which we find clearer in context, we write either $\Ex_\mu f$ or $\Ex_{\omega \sim \mu} f(\omega)$ for expectation of $f : \Omega \to \mathbb R$ over a distribution $\mu$ with outcomes $\Omega$. We write $\thickD\infdivx\mu\nu = \Ex_\mu \log \frac{\mu}{\nu}$ for the relative entropy (KL Divergence) of $\nu$ with respect to $\mu$, we write $\H(\mu) := \Ex_\mu \log\frac1\mu$ for the entropy of $\mu$, $\H_\mu(X):= \H(\mu(X))$ for the marginal entropy on a variable $X$, and $\H_\mu(Y \mid X):= \Ex_\mu \log \nicefrac1{\mu(Y|X)}$ for the conditional entropy of $Y$ given $X$. A \emph{probabilistic dependency graph} (PDG) \parencite{richardson2020probabilistic}, like a Bayesian Network (BN), is a directed graph with cpds attached to it. While this data is attached to the \emph{nodes} of a BN, it is attached to the \emph{edges} of a PDG. For instance, a BN of shape $ X \!\to\! Y \!\leftarrow\! Z $ contains a single cpd $\Pr(Y | X,Z)$ on $Y$ given joint values of $X$ and $Z$, while a PDG of the same shape contains two cpds $p(Y | X)$ and $q(Y | Z)$. The second approach is strictly more expressive, and can encode joint dependence with an extra variable. All information in a PDG can be expressed with variable confidence. We now restate the formal definition. \begin{defn} \label{defn:pdg} A Probabilistic Dependency Graph (PDG) is a tuple $\dg M = (\mathcal N,\mathcal E,\mathcal V,\mat p, \boldsymbol\alpha, \boldsymbol\beta)$, where \vspace{-1em} \begin{itemize}[leftmargin=1.5em, itemsep=0pt] \item $\mathcal N$ is a set of nodes, corresponding to variables; \item $\mathcal V$ associates each node $X \in \mathcal N$ with a set $\mathcal V(X)$ of possible values that the variable $X$ can take; \item $\mathcal E$ is a set of labeled edges $\{ \ed LXY \}$, each with a source $X$ and target $Y$ from $\mathcal N$; \item $\mat p$ associates a cpd $\bp(\mskip-1muY\mskip-1mu|\mskip-1muX\mskip-2mu)$ to each edge $\ed L{X\!\!}{\!Y} \!\in\! \mathcal E$; \item $\boldsymbol\alpha$ associates to each edge $\ed L{X}{Y}$ a non-negative number $\alpha_L$ representing the modeler's confidence in the functional dependence of $Y$ on $X$; \item $\boldsymbol\beta$ associates to each edge $L$ a number $\beta_L$, the modeler's confidence in the reliability of the cpd $\bp$.% \qedhere \end{itemize}% \end{defn} How should one choose parameters $\boldsymbol\beta$ and $\boldsymbol\alpha$? A choice of $\beta\ssub L=0$ means that the cpd $p\ssub L$ is effectively ignored, in the sense that such a PDG is equivalent to one in which the edge is attached to a different cpd $q \ne p\ssub L$. On the other hand, a large value of $\beta\ssub L$ (or $\infty$) indicates high (or absolute) confidence in the cpd. By default, we suppose $\beta\!=\! 1$, which is just a convenient choice of units---what’s important are the magnitudes of $\beta$ relative to one another. The parameter $\boldsymbol\alpha$, typically in $[0,1]$, represents certainty in the causal structure of the graph, and plays only a minor role in this paper. Like other graphical models, PDGs have semantics in terms of joint distributions $\mu$ over all variables. Most directly, a PDG $\dg M$ determines two scoring functions on joint distributions $\mu$. For the purposes of this paper, the more important of the two is the \emph{incompatibility} of $\mu$ with respect to $\dg M$, which measures the quantitative discrepency between $\mu$ and $\dg M$'s cpds, and is given by \begin{equation} \Inc_{\dg M}(\mu)\! := \!\!\!\sum_{\ed LXY}\!\! \beta\ssub L \Ex_{{x \sim \mu(\mskip-2.6muX\mskip-2mu)}} \thickD\infdivx[\Big]{\mu(Y|\, x)}{\bp(Y |\, x)}. \label{eq:inc} \end{equation} \vskip-2ex Relative entropy $I\mkern-8muD(\mu\Vert p)$ measures divergence between $\mu$ and $p$, and can be viewed as the overhead (in extra bits per sample) of using codes optimized for $p$, when in fact samples are distributed according to $\mu$ \parencite{mackay2003information}. But if one uses edges in proportion to the confidence one has in them, then inefficiencies for of high-confidence cpds are compounded, and hence more costly. So $\Inc_{\dg M}(\mu)$ measures the total excess cost of using $\dg M$'s cpds in proportion to their confidences $\boldsymbol\beta$, when worlds are distributed according to $\mu$. The \emph{inconsistency} of $\dg M$, denoted $\aar{\dg M}$, is the smallest possible incompatibility of $\dg M$ with any distribution: $\aar{\dg M} := \inf_\mu \Inc_{\dg M}(\mu)$. This quantity, which does not depend on $\boldsymbol\alpha$, is the primary focus of this paper. The second scoring function defined by a PDG $\dg M$, called the \emph{I}nformation \emph{Def}iciency, measures the {qualitative} discrepency between $\dg M$ and $\mu$, and is given by \vskip-3ex \[ \IDef{\dg M}(\mu) := -\H(\mu) + \sum_{\ed LXY} \alpha\ssub L \H_\mu(Y\mid X). \] \vskip-2ex $\IDef{\dg M}(\mu)$ can be thought of as the information needed to separately describe the target of each edge $L$ given the value of its source (weighted by $\alpha\ssub L$) beyond the information needed to fully describe a sample from $\mu$. As shown by \textcite{richardson2020probabilistic}, it is via these two scoring functions that PDGs capture other graphical models. The distribution specified by a BN $\dg B$ is the unique one that minimizes both $\Inc_{\dg B}$ and $\IDef{\dg B}$ (and hence every positive linear combination of the two), while the distribution specfied by a factor graph $\Phi$ uniquely minimizes the sum $\Inc_{\Phi} + \IDef{\Phi}$. In general, for any $\gamma > 0$, one can consider a weighted combination \( \bbr{\dg M}_\gamma(\mu) \!:= \Inc_{\dg M}(\mu) + \gamma\; \IDef{\dg M}(\mu), \) for which there is a corresponding $\gamma$-inconsistency $\aar{\dg M}_\gamma := \inf_\mu \bbr{\dg M}_\gamma(\mu)$. In the limit as $\gamma \to\! 0$, there is always a unique best distribution whose score is $\aar{\dg M}$. We now present some shorthand to clarify the presentation. We typically conflate a cpd's symbol with its edge label, thus drawing the PDG with a single edge attached to $f(Y|X)$ as \begin{tikzpicture}[center base] \node[dpadinline] (X) at (0,0) {$X$}; \node[dpadinline] (Y) at (1.2,0){\small$Y$}; \draw[arr1,->] (X) -- node[fill=white, fill opacity=1, pos=0.35, inner sep=-1pt]{$f$} (Y); \end{tikzpicture}\,. \Cref{defn:pdg} is equivalent to one in which edge sources and targets are both \emph{sets} of variables. This allows us to indicate joint dependence with multi-tailed arrows, joint distributions with multi-headed arrows, and unconditional distributions with nothing at the tail. For instance, we draw \\[-0.5ex] \phantom{aa} $p(Y|X,Z)$ as \begin{tikzpicture}[center base] \node[dpadinline] (Y) at (0.9,0) {$Y$}; \node[dpadinline] (X) at (0,-0.4) {$X$}; \node[dpadinline] (Z) at (0, 0.4) {$Z$}; \mergearr[arr2] XZY \node[above right=-1pt and -3pt of center-XZY] {$p$}; \end{tikzpicture}\,, and $q(A,B)$ as \begin{tikzpicture}[center base] \node[dpadinline] (A) at (0,0) {$A$}; \node[dpadinline] (B) at (1,0) {$B$}; \coordinate (above) at (0.5,0.8); \coordinate (center) at (0.5,0.4); \cunmergearr[arr1] {above}{A}{B}{center} \node[above left=0pt and 0pt of center,inner sep=2pt]{$q$}; \end{tikzpicture}\,. \\ To emphasize that a cpd $f(Y|X)$ is degenerate (a function $f:X\to Y$), we will draw it with two heads, as in: \begin{tikzpicture}[center base] \node[dpad0, inner sep=2.5pt, rounded corners=2.5pt] (X) at (0,0) {\small$X$}; \node[dpad0, inner sep=2.5pt, rounded corners=2.5pt] (Y) at (1.2,0){\small$Y$}; \draw[arr1,->>] (X) -- node[fill=white, fill opacity=1, pos=0.35, inner sep=-1pt]{$f$} (Y); \end{tikzpicture}\,. We identify an event $X\!\!=\!x$ with the degenerate unconditional distribution $\delta_x(X)$ that places all mass on $x$; hence it may be associated to an edge and drawn simply as \begin{tikzpicture}[center base] \node[dpadinline] (Y) {\small$X$}; \draw[arr1,<<-] (X) -- node[above, pos=0.60, inner sep=1pt]{$x$} +(-1,0); \end{tikzpicture}\,. To specify a confidence $\beta \ne 1$, we place the value near the edge, lightly colored and parenthesized, as in: \!\!\!\! \begin{tikzpicture}[center base] \node[dpad0, inner sep=2.5pt, rounded corners=2.5pt](X){\small $X$}; \draw[arr, <-] (X) -- node[above,pos=0.62, inner sep=1pt, outer sep=-2pt, fill=white]{$p\!$} node[below,pos=0.55, inner sep=0, outer sep=1.5pt]{${\scriptscriptstyle\color{gray}(\beta)}$} ++(-1,0); \end{tikzpicture}\,, and we write ${\scriptstyle\color{gray}(\infty)}$ for the limit of high confidence ($\beta\!\to\! \infty$). Intuitively, believing more things can't make you any less inconsistent. \Cref{lemma!} captures this formally: adding cpds or increasing confidences cannot decrease a PDG's inconsistency. \newsavebox\olibox \sbox\olibox{$\aar{\;\cdot\;}$} \begin{linked}[{Monotonicity of \usebox\olibox}] {lemma}{!} \label{lemma!} Suppose PDGs $\dg M$ and $\dg M'$ differ only in their edges (resp. $\mathcal E$ and $\mathcal E'$) and confidences (resp. $\boldsymbol\beta$ and $\boldsymbol\beta'$). If $\mathcal E \subseteq \mathcal E'$ and $\beta_L \le \beta'_L$ for all $L \in \mathcal E$, then $\aar{\dg M}_{\gamma} \le \aar{\dg M'}_{\gamma}$ for all $\gamma$.% \footnote{All proofs can be found in \cref{appendix:proofs}.} \end{linked} \vspace{-1ex} As we will see, this tool is sufficient to derive many interesting relationships between loss functions. \section{STANDARD METRICS AS INCONSISTENCIES} \def{\mathcal D}{{\mathcal D}} \def{\mathcal D}{{\mathcal D}} Suppose you believe that $X$ is distributed according to $p(X)$, and also that it (certainly) equals some value $x$. These beliefs are consistent if $p(X\!\!=\!x) =\! 1$ but become less so as $p(X\!\!=\!x)$ decreases. In fact, this inconsistency is equal to the information content $\I_p[X\!\!=\!x] := -\log p(X\!\!=\!x)$, or \emph{surprisal} \parencite{tribus1961information}, of the event $X \!\!=\! x$, according to $p$.% \footnotemark\ In machine learning, $\I_p$ is usually called ``negative log likelihood'', and is perhaps the most popular objective for training generative models \parencite{deepgennotes,myung2003tutorial}.% \footnotetext{This construction requires the event $X\!\!=\!x$ to be measurable. One can get similar, but subtler, results for densities, where this is not the case; see \cref{appendix:density}.} \begin{linked}{prop}{pdg-Ix} Consider a distribution $p(X)$. The inconsistency of the PDG comprising $p$ and $X\!\!=\!x$ equals the surprisal $\I_p[X\!\!=\!x]$. That is, \vspace{-1.5ex} \[ \I_p[X\!\!=\!x] = \aar[\Big] { \begin{tikzpicture}[baseline=-0.7ex] \node[dpad0] (X) {$X$}; \coordinate (A) at ($(X) + (-0.9,0)$); \draw[arr1] (A) -- node[above]{$p$} (X); % \draw[arr2, <<-] (X) -- node[above,pos=0.8]{$x$} ++(0.9, 0); \end{tikzpicture} }. \] \vspace{-1.5ex} (Recall that $\aar{\dg M}$ is the inconsistency of the P\kern-1ptD\kern-1ptG $\dg M$% .% ) \end{linked} In some ways, this result is entirely unsurprising, given that \eqref{eq:inc} is a flexible formula built out of information theoretic primitives. Even so, note that the inconsistency of believing both a distribution and an event happens to be the standard measure of discrepency between the two% ---and is even named after ``surprise'', a particular expression of epistemic conflict. Still, we have a ways to go before this amounts to any more than a curiosity. One concern is that this picture is incomplete; we train probabilistic models with more than one sample. What if we replace $x$ with an empirical distribution over many samples? {% \def{\mathcal D}{{\mathcal D}}% \begin{linked}{prop}{expected-surprise}% If $p(X)$ is a probabilistic model of $X$, and ${\mathcal D} = \{ x_i \}_{i=1}^m$ is a dataset with empirical distribution $\datadist{\mathcal D}$, then ~~~$\mathrm{CrossEntropy}(\datadist{\mathcal D}, p) = $ % \vskip-4ex \[ \frac{1}{m} \sum_{i=1}^m \I_p[X\!\!=\!x_i] = \aar[\Big] { \begin{tikzpicture}[center base] \node[dpad0] (X) {$X$}; \coordinate (A) at ($(X) + (-0.9,0)$); \draw[arr2] (A) -- node[above]{$p$} (X); \draw[arr2, <-] (X) -- node[above,pos=0.6,inner sep=2pt]{${\datadist{\mathcal D}}$} node[below,pos=0.65,inner sep=2pt] {${\color{gray}\scriptscriptstyle(\infty)}$} ++(1.2, 0); \end{tikzpicture} % ~{+ \H(\datadist{\mathcal D})} . \] \end{linked} \begin{remark} The term $H(\datadist{\mathcal D})$ is a constant depending only on the data, so is irrelevant for optimizing $p$. \end{remark} } Essentially the only choices we've made in specifying the PDG of \cref{prop:expected-surprise} are the confidences. But $\mathrm{CrossEntropy}(\datadist{\mathcal D},p)$ is the expected code length per sample from $\datadist{\mathcal D}$, when using codes optimized for the (incorrect) distribution $p$. So implicitly, a modeler using cross-entropy has already articulated a belief the data distribution $\datadist{\mathcal D}$ is the ``true one''. To get the same effect from a PDG, the modeler must make this belief explicit by placing infinite confidence in $\datadist{\mathcal D}$. Now consider an orthogonal generalization of \cref{prop:pdg-Ix}, in which the sample $x$ is only a partial observation of $(x,z)$ from a joint model $p(X,Z)$. \begin{linked}{prop}{marginal-ll} If $p(X,Z)$ is a joint distribution, then the information content of the partial observation $X=x$ is given by \vskip-4ex \begin{equation} \I_p[X\!\!=\!x] = \aar[\Bigg]{ \begin{tikzpicture}[center base] \node[dpad0] (Z) {$Z$}; \node[dpad0,right=.5 of Z] (X) {$X$}; \coordinate (A) at ($ (X)!.5!(Z) + (0,0.7)$); \draw[arr1] (A) -- node[right]{$ p$} ++(0,-0.25) -- (X); \draw[arr1] (A) -- ++(0,-0.25) -- (Z); % \draw[arr2, <<-] (X) -- node[above,pos=0.8]{$ x$} ++(0.9, 0); \end{tikzpicture} }. \label{eq:mll} \end{equation} \end{linked} Intuitively, the inconsistency of the PDG on the right side of \eqref{eq:mll} is localized to $X$, where the observation $x$ conflicts with $p(X)$; other variables don't make a difference. The multi-sample partial-observation generalization also holds; see \cref{appendix:more-crossent}. So far we have considered models of an unconditional distribution $p(X)$. Because they are unconditional, such models must describe how to generate a complete sample $X$ without input, and so are called \emph{generative}; the process of training them is called \emph{unsupervised} learning \parencite{elts_stat_learn2009}. In the (more common) \emph{supervised} setting, we train \emph{discriminative} models to predict $Y$ from $X$, via labeled samples $\{(x_i,y_i)\}_i$. There, cross entropy loss is perhaps even more dominant---and it is essentially the inconsistency of a PDG consisting of the predictor $h(Y|X)$ together with high-confidence data. {\def{\mathcal D}{{\mathcal D}} \begin{linked}[Cross Entropy, Supervised] {prop}{supervised-cross-entropy} The inconsistency of the PDG comprising a probabilistic predictor $h(Y|X)$, and a high-confidence empirical distribution $\datadist{\mathcal D}$ of a dataset ${\mathcal D} = \{(x_i, y_i)\}_{i=1}^{m}$ equals the cross-entropy loss (minus the empirical uncertainty in $Y$ given $X$, a constant depending only on ${\mathcal D}$). That is, \[ \aar**{ \begin{tikzpicture}[center base] \node[dpad0] (Y) {$Y$}; \node[dpad0,left=.9 of Y] (X) {$X$}; \coordinate (A) at ($ (X)!.5!(Y) + (0,0.9)$); \draw[arr1] (A) -- node[left,inner sep=3pt]{$\datadist{\mathcal D}$} node[right,inner sep=2pt]{${\color{gray}\scriptscriptstyle(\infty)}$} ++(0,-0.35) -- (X); \draw[arr1] (A) -- ++(0,-0.35) -- (Y); \draw[arr2, ->] (X) -- node[below,pos=0.5]{$h$} (Y); \end{tikzpicture}} \begin{aligned} = \frac1{m}\sum_{i=1}^m \log \frac1{h(y_i\,|\, x_i)}&\\ - \H_{\datadist{\mathcal D}}(Y | X)&. \end{aligned} \] \end{linked} } Simple evaluation metrics, such as the accuracy of a classifier, and the mean squared error of a regressor, also arise naturally as inconsistencies. \begin{linked}[Log Accuracy as Inconsistency] {prop}{accuracy} Consider functions $f,h : X \!\to\! Y$ from inputs to labels, where $h$ is a predictor and $f$ generates the true labels. The inconsistency of believing $f$ and $h$ (with any confidences), and a distribution $D(X)$ with confidence $\beta$, is $\beta$ times the log accuracy of $h$. That is, \vskip-4ex \begin{equation}\label{eq:accuracy-pdg} \aar*{\!\!\!\begin{tikzpicture}[center base] \node[dpad0] (Y) {$Y$}; \node[dpad0,left=0.8 of Y] (X) {$X$}; % \draw[arr2, ->>] (X) to[bend left] node[pos=0.45,above right=4pt,inner sep=1pt] {{\color{gray}$\scriptscriptstyle(r)$}} node[pos=0.35, above]{$h$} (Y); \draw[arr2, ->>] (X) to[bend right] node[pos=0.45,below right=4pt,inner sep=1pt] {{\color{gray}$\scriptscriptstyle(s)$}} node[pos=0.35, below]{$f$} (Y); \draw[arr2, <-] (X) to node[pos=0.55, anchor=south west, above] {$D$} node[pos=0.55, anchor=south west, below] {{\color{gray}$\scriptstyle(\beta)$}} +(-1.1, 0); \end{tikzpicture}\!} \begin{aligned} \!&= - \beta\,\log \Pr_{x \sim D}(f(x)\!=\!h(x)) \\ &\quad= \beta\, \I_D [f = h]. \end{aligned} \end{equation} \end{linked} \vskip-1.5ex One often speaks of the accuracy of a hypothesis $h$, leaving the true labels $f$ and empirical distribution $D$ implicit. Yet \Cref{prop:accuracy} suggests that there is a sense in which $D(X)$ plays the primary role: the inconsistency in \eqref{eq:accuracy-pdg} is scaled by the confidence in $D$, and does not depend on the confidences in $h$ or $f$. Why should this be this the case? Expressing $(x,y)$ such that $y \ne f(x)$ with codes optimized for $f$ is not just inefficient, but impossible. The same is true for $h$, so we can only consider $\mu$ such that $\mu(f \!=\! h) \!=\! 1$. In other words, the only way to form a joint distribution \emph{at all} compatible with both the predictor $h$ and the labels $f$, is to throw out samples that the predictor gets wrong---and the cost of throwing out samples scales with your confidence in $D$, not in $h$. This illustrates why accuracy gives no gradient information for training $h$. It is worth noting that this is precisely the opposite of what happened in \cref{prop:supervised-cross-entropy}: there we were unwilling to budge on the input distribution, and the inconsistency scaled with the confidence in $h$. Observe how even properties of these simple metrics---% relationships with one another and features of gradients% ---can be clarified by an underlying model. When $Y \cong \mathbb R^n$, an estimator $h(Y|X)$ is referred to as a regressor instead of a classifier. In this setting, most answers are incorrect, but some more so than others. A common way of measuring incorrectness is with mean squared error (MSE): $\Ex |f(X)-Y|^2$. MSE is also the inconsistency of believing that the labels and predictor have Gaussian noise---% often a reasonable assumption because of the central limit theorem. \begin{linked}[MSE as Inconsistency]{prop}{MSE \begin{align*} \aar**{\!\!\!\!\begin{tikzpicture}[center base] \node[dpad0] (Y) {$Y$}; \node[dpad0,left=2.2 of Y] (X) {$X$}; \node[dpad0,above right=0.1 and 0.7 of X] (mf) {$\mu_f$}; \node[dpad0,below right=0.1 and 0.7 of X] (mh) {$\mu_h$}; % \draw[arr2, ->>] (X) to[bend left=0] node[pos=0.5, above left=0] {$f$} (mf); \draw[arr2, ->>] (X) to[bend right=0] node[pos=0.5, below left=0] {$h$} (mh); % \draw[arr2, <-] (X) to node[pos=0.55, above] {$D$} node[pos=0.55, below] {{\color{gray}$\scriptstyle(\infty)$}} +(-1.1, 0); % \draw[arr2, ->] (mh) to[bend right=0] node[pos=0.3, below right=0] {$\mathcal N_1$} (Y); \draw[arr2, ->] (mf) to[bend left=0] node[pos=0.3, above right=0]{$\mathcal N_1$} (Y); \end{tikzpicture}\!\!\!} \begin{aligned} = \frac12\Ex\nolimits_D \!\big| f(\mskip-1muX\mskip-1mu) - h(\mskip-1muX\mskip-1mu) \big|^2 \\ =: \mathrm{MSE}_D( f, h )\,,\; \end{aligned} \end{align*} where $\mathcal N_1(Y|\,\mu)$ is a unit Gaussian on $Y$ with mean $\mu$. \end{linked} In the appendix, we treat general univariate Gaussian predictors, with arbitrary variances and confidences. \section{REGULARIZERS AND PRIORS} \label{sec:regularizers} Regularizers are extra terms added to loss funtions, which provide a source of inductive bias towards simple model parameters. There is a well-known correspondence between using a regularizer and doing maximum \emph{a posteriori} inference with a prior,% \footnote{A full account can be found in the appendix.} in which L2 regularization corresponds to a Gaussian prior \parencite{rennie2003l2}, while L1 regularization corresponds to a Laplacian prior \parencite{williams1995bayesian}. Note that the ability to make principled modeling choices about regularizers is a primary benefit of this correspondence. Our approach provides a new justification of it. \begin{linked}{prop}{regularized} Suppose you have a parameterized model $p(Y|\Theta)$, a prior $q(\Theta)$, and a trusted distribution $D(Y)$. The inconsistency of also believing $\Theta =\theta$ is the cross entropy loss, plus the regularizer: $\log \frac1{q(\theta)}$ times your confidence in $q$. That is, \begin{equation}\label{eq:regularize} \aar*{\!\!\begin{tikzpicture}[center base] \node[dpad0] (theta) at (0,0) {$\Theta$}; \node[dpad0] (Y) at (1.2,0.3) {$Y$}; % \draw[arr] (theta) -- node[above]{$p$} (Y); \draw[arr2, <-] (theta) -- node[above right=-2pt and -2pt, pos=0.7] {$q$} node[below left=-3pt and -4pt,pos=0.6]{{$\color{gray}\scriptstyle(\beta)$}} ++(-1.0, 0.5); \draw[arr2, <<-] (theta) -- node[below,pos=0.4]{$\theta$} ++(-1.1, -0.3); \draw[arr2, <-] (Y) -- node[left,pos=0.6, inner sep=2pt]{$D$} node[right,pos=0.6, inner sep=1pt] {${\color{gray}\scriptscriptstyle(\infty)}$} ++(0, -0.9); \end{tikzpicture}\!} \begin{aligned} =\! \Ex_{y \sim D} \log \frac{1}{p(y \,|\,\theta)} &+ \beta \log \frac1{q(\theta)} \\[-0.2em] &\!{- \H(D)} \\[-1em] \end{aligned} % \end{equation} \end{linked} If our prior is $q(\theta) \!=\! \frac{1}{k} \exp(-\frac12 \theta^2)$% , a (discretized) unit gaussian% , then the right hand side of \eqref{eq:regularize} becomes \[ \underbrace{\Ex\nolimits_{D} \log \frac{1}{p(Y \,|\, \theta)} } _{\substack{\text{Cross entropy loss}\\(\text{data-fit cost of $\theta$})} \; + \!\!\!\!\underbrace{~\frac\beta2 \theta_0~}_{\substack{\text{L2 regularizer}\\(\text{complexity cost of $\theta$})}} \!\!\!\!\! {\color{black} + \underbrace{\beta \log k - \H(D)}_{\text{constant in $p$ and $\theta$}}}\,, \] which is the L2 regularized version of \cref{prop:expected-surprise}. Moreover, the regularization strength corresponds exactly to the confidence $\beta$. What about other priors? It is not difficult to see that if we use a (discretized) unit Laplacian prior, $q(\theta) \propto \exp(-|\theta|)$, the second term instead becomes $\beta |\theta_0|$, which is L1 regularization. More generally, to consider a complexity measure $U(\theta)$, we need only include the Gibbs distribution $\Pr_U(\theta) \propto \exp(-U(\theta))$ into our PDG. We remark that nothing here is specific to cross entropy; any of the objectives we describe can be regularized in this way. \section{STATISTICAL DISTANCES AS INCONSISTENCIES} \label{sec:statdist} \begin{figure*} \centering \def0.07{0.07} \begin{tikzpicture}[xscale=1.8, yscale=1.4]\def39{39} \draw[help lines, color=gray!30, dashed] (-1.2,-1.1) grid (5.9,2.7); \draw[->,thick] (-1.3,0)--(6,0) node[right]{$\beta_p$}; \draw[->,thick] (0,-1.2)--(0,2.8) node[above]{$\beta_q$} ; \fill[gray, fill opacity=0.2] (-1.3,1.3) -- (1.2,-1.2) -- (-1.3,-1.2) --cycle; \draw[gray, opacity=0.5, thick] (-1.3,1.3) -- node[below left=0.5em and 1.5em, anchor=north, rotate=-39,font=\footnotesize,fill=gray!20,fill opacity=0.8, inner sep=1pt, outer sep=3pt] {Non-convex region} (1.2,-1.2); \draw[color=gray!80!orange!45, densely dashdotted] (-1, -1) -- (3,3); \draw[color=gray!80!orange!45, thick, <->] (1.8, 1.2) -- node[above right, anchor=south, rotate=-39,font=\footnotesize,fill=white,fill opacity=0.8, inner sep=1pt, outer sep=3pt]{\scalebox{0.8}{Axis of Symmtry}}(1.2,1.8); \draw[blue!40, densely dashed, very thick, opacity=0.8] (0,1) -- node[below, align=center, pos=0.8, font=\footnotesize]{R\'enyi divergences\\for $\alpha \in (0,1)$} (5.6,1) (5.6,-1) -- node[above, align=center, pos=0.2, font=\footnotesize]{(negative) R\'enyi divergences\\ for $\alpha \in (1,\infty)$} (1,-1); \draw[red!90!blue!40, densely dashed, very thick, opacity=0.9, (-), shorten <=6pt, shorten >=6pt] (0,1) -- node[pos=0.5,below left=3pt,anchor=north, rotate=-39, font=\footnotesize, align=center, fill=white,fill opacity=0.8, inner sep=1pt, outer sep=2pt] {\scalebox{0.9}{Chernoff}} node[pos=0.5,below left=1.25em,anchor=north, rotate=-39, font=\footnotesize, align=center, fill=white,fill opacity=0.8, inner sep=1pt, outer sep=2pt]{\scalebox{0.9}{Divergences}} (1,0); \draw[domain=1.6:5.6, smooth, very thick, variable=\x, blue!50!green!50, opacity=0.8, densely dashed] plot ({\x}, {1/(1-1/\x)}) node[rotate=-7, font=\footnotesize] at (3.9,1.55){$\alpha$-divergences}; \fill (0.5,0.5) circle (0.07) node[above right, align=center, label={[yshift=0ex,xshift=-1ex,align=left,font=\footnotesize\color{gray!50}]right:Bhattacharya\\distance}] {$I\mkern-8muD_{B}(p,q)$}; \fill (1,3.1) -- +(0:0.07) arc (0:-180:0.07) -- +(0:0.07) node[below]{$\vdots$} node[right=1ex, align=center,label={[yshift=1ex,xshift=0ex]below:\footnotesize\color{gray!50}Reverse KL}](revKL){$\thickD\infdivx qp$}; \fill (6.4,1) -- +(270:0.07) arc (270:90:0.07) -- +(270:0.07) node[above=2pt, align=center, label={[yshift=-1ex,xshift=0ex]\footnotesize\color{gray!50} KL Divergence}] (FwdKL) {$\thickD\infdivx pq$} node[left]{$\cdots$}; \fill (0,1) -- ++(-90:0.07) arc (-90:90:0.07) node[above right, align=center, label={[yshift=-1ex,xshift=1ex]\footnotesize\color{gray!50} Max Entropy} ]{$\I_q(p > 0)$}; \fill (2,-1) circle (0.07) node[above right, align=center, label={[yshift=-1ex,xshift=1ex, align=center,font=\footnotesize\color{gray!50}]above:$-$(Pearson)~$\chi^2$ \\[-0.1em]divergence}] {$-\chi^2_P\infdivx pq$}; \fill (-1,2) circle (0.07) node[above, align=center, label={[yshift=-1ex,xshift=1ex, align=center,font=\footnotesize\color{gray!50}]above:$-$(Neyman)~$\chi^2$ \\[-0.1em]divergence}] {$-\chi^2_N\infdivx pq$}; \fill (1,-1) -- ++(-45:0.07) arc (-45:135:0.07) node[above, align=center, inner sep=2pt, label={[yshift=-1ex,xshift=1ex]\footnotesize\color{gray!50} $-$Min Entropy}] {$- \log \sup \frac pq$}; \end{tikzpicture} \caption{A map of the inconsistency of the PDG comprising $p(X)$ and $q(X)$, as we vary their respective confidences $\beta_p$ and $\beta_q$. Solid circles indicate well-known named measures, semicircles indicate limiting values, and the heavily dashed lines are well-established classes. } \label{fig:statdistmap} \end{figure*} Suppose you are concerned with a single variable $X$. One friend has told you that it is distributed according to $p(X)$; another has told you that it follows $q(X)$. You adopt both beliefs. Your mental state will be inconsistent if (and only if) $p \ne q$, with more inconsistency the more $p$ and $q$ differ. Thus the inconsistency of a PDG comprising $p$ and $q$ is a measure of divergence. Recall that a PDG also allows us to specify the confidences $\beta_p$ and $\beta_q$ of each cpd, so we can form a PDG divergence $I\mkern-8muD^{\mathrm{P\mskip-2muD\mskip-1.5muG}}_{{\color{gray}(r,s)}}(p\Vert q)$ for every setting $(r,s)$ of $(\beta_p, \beta_q)$. It turns out that a large class of statistical divergences arise in this way. We start with a familiar one. \begin{prop}[KL Divergence as Inconsistency] The inconsistency of believing $p$ with complete certainty, and also $q$ with some finite certainty $\beta$, is $\beta$ times the KL Divergence (or relative entropy) of $q$ with respect to $p$. That is, \vspace{-0.7em} \[ \aar[\Big]{\begin{tikzpicture}[center base] \node[dpad0] (X) {$X$}; \draw[arr, <-] (X) -- node[above,inner sep=2pt, pos=0.65] {$p$} node[below,inner sep=2pt, pos=0.65] {${\color{gray}\scriptscriptstyle(\infty)}$} ++(-1.1,0); \draw[arr, <-] (X) -- node[above,pos=0.6,inner sep=2pt] {$q$} node[below,pos=0.6,inner sep=2pt] {$\scriptstyle{\color{gray}(\beta)}$}++(1.1, 0); \end{tikzpicture}} = \beta\, \thickD\infdivx pq . \] \end{prop} \vskip-1.3ex This result gives us an intuitive interpretation of the asymmetry of relative entropy / KL divergence, and a prescription about when it makes sense to use it. $\thickD\infdivx p q$ is the inconsistency of a mental state containing both $p$ and $q$, when absolutely certain of $p$ (and not willing to budge on it). This concords with the standard intuition that $\thickD\infdivx pq$ reflects the amount of information required to change $q$ into $p$, which is why it is usually called the relative entropy ``from $q$ to $p$''. We now consider the general case of a PDG comprising $p(X)$ and $q(X)$ with arbitrary confidences. \begin{linked}{lemma}{pdgdiv} The inconsistency $I\mkern-8muD^{\mathrm{P\mskip-2muD\mskip-1.5muG}}_{{\color{gray}(r,s)}}(p\Vert q)$ of a PDG comprising $p(X)$ with confidence $r$ and $q(X)$ with confidence $s$ is given in closed form by \vspace{-1ex} \[ \aar[\bigg]{\!\!\begin{tikzpicture}[baseline = -0.75ex] \node[dpad0] (X) {$X$}; \draw[arr2, <-] (X) -- node[above, pos=0.6, inner sep=2pt, align=center] {$p$} node[below, pos=0.65, inner sep=2pt, align=center] {$\scriptstyle{\color{gray}(r)}$} ++(-1.1,0); \draw[arr2, <-] (X) -- node[above, pos=0.6, inner sep=2pt, align=center] {$q$} node[below, pos=0.65, inner sep=2pt, align=center] {$\scriptstyle{\color{gray}(s)}$} ++(1.1, 0); \end{tikzpicture}\!\!} = - (r+s) \log \sum_x \left(p(x)^{r}\vphantom{\Big|} q(x)^{s}\right)^{\frac{1}{r+s}}. \] \end{linked} \vskip-1ex Of the many generalizations of KL divergence, R\'enyi divergences, first characterized by Alfr\'ed R\'enyi \citeyear{renyi1961measures} are perhaps the most significant, as few others have found either application or an interpretation in terms of coding theory \parencite{van2014renyi}. The R\'enyi divergence of order $\alpha$ between two distributions $p(X)$ and $q(X)$ is given by \vspace{-1ex} \begin{equation} I\mkern-8muD_\alpha\infdivx p q := \frac{1}{1- \alpha} \log \sum_{x \in \mathcal V(X)} p(x)^\alpha q(x)^{1-\alpha}. \label{eq:renyi} \end{equation} R\'enyi introduced this measure in the same paper as the more general class of $f$-divergences, but directs his attention towards those of the form \eqref{eq:renyi}, because they satisfy a natural weakening of standard postulates for Shannon entropy due to \textcite{fadeev1957begriff}. Concretely, every symmetric, continuous measure that additively separates over independent events, and with a certain ``mean-value property'', up to scaling, is of the form \eqref{eq:renyi} for some $\alpha$ \parencite{renyi1961measures}. It follows from \Cref{lemma:pdgdiv} that every R\'enyi divergence is a PDG divergence, and every (non-limiting) PDG divergence is a (scaled) R\'enyi divergence. \newpage \begin{coro}[R\'enyi Divergences] ~\vspace{-1ex} \begin{align*}% \aar[\Big]{\!\begin{tikzpicture}[baseline = -0.75ex] \node[dpad0] (X) {$X$}; \draw[arr2, <-] (X) -- node[above, pos=0.6, inner sep=2pt, align=center] {$p$} node[below, pos=0.65, inner sep=2pt, align=center] {$\scriptstyle{\color{gray}(r)}$} ++(-1.1,0); \draw[arr2, <-] (X) -- node[above, pos=0.6, inner sep=2pt, align=center] {$q$} node[below, pos=0.65, inner sep=2pt, align=center] {$\scriptstyle{\color{gray}(s)}$} ++(1.1, 0); \end{tikzpicture}\!} &= s \cdot I\mkern-8muD_{\frac{r}{r+s}}\infdivx{p}{q} \\[-1ex]\text{and}\qquad I\mkern-8muD_{\alpha}\infdivx{p}{q} &= \aar[\Big]{\!\begin{tikzpicture}[baseline = -0.75ex] \node[dpad0] (X) {$X$}; \draw[arr2, <-] (X) -- node[above, pos=0.6, inner sep=2pt, align=center] {$p$} node[below, pos=0.65, inner sep=2pt, align=center] {$\scriptstyle{\color{gray}(\frac{\alpha}{1-\alpha})}$} ++(-1.3,0); \draw[arr2, <-] (X) -- node[above, pos=0.6, inner sep=2pt, align=center] {$q$} ++(0.9, 0); \end{tikzpicture}\!} \end{align*} \end{coro} \vskip-1.4ex However, the two classes are not identical, because the PDG divergences have extra limit points. One big difference is that the reverse KL divergence $\thickD\infdivx q p$ is not a R\'enyi divergence $I\mkern-8muD_\alpha\infdivx p q$ for any value (or limit) of $\alpha$. This lack of symmetry has led others \parencite[e.g.,][]{cichocki2010families} to work instead with a symmetric variant called $\alpha$-divergence, rescaled by an additional factor of $\frac1\alpha$. The relationships between these quantities can be seen in \cref{fig:statdistmap}. The Chernoff divergence measures the tightest possible exponential bound on probability of error \parencite{nielsen2011chernoff} in Bayesian hypothesis testing. It also happens to be the smallest possible inconsistency of simultaneously believing $p$ and $q$, with total confidence 1. \begin{coro The Chernoff Divergence between $p$ and $q$ equals \\[-1.8em] \[ \inf_{\beta \in (0,1)} \aar[\Big]{\begin{tikzpicture}[center base] \node[dpad0] (X) {$X$}; \draw[arr2, <-] (X) -- node[above, inner sep=2pt,pos=0.6] {$p$} node[below, inner sep=2pt,pos=0.65] {${\color{gray}\scriptscriptstyle(\beta)}$} ++(-1.1,0); \draw[arr2, <-] (X) -- node[above, inner sep=2pt,pos=0.6] {$q$} node[below, inner sep=2pt, pos=0.65] {${\color{gray}\scriptscriptstyle(1-\beta)}$} ++(1.1, 0); \end{tikzpicture}}. \] \end{coro} One significant consequence of representing divergences as inconsistencies is that we can use \cref{lemma!} to derive relationships between them. The following facts follow directly from \cref{fig:statdistmap}, by inspection. \begin{coro} \begin{enumerate}[nosep] \item R\'enyi entropy is monotonic in its parameter $\alpha$. \item $\thickD\infdivx p q \ge 2 I\mkern-8muD_B(p,q) \le \thickD\infdivx q p$. \item If $q(p > 0) < 1$ (i.e., $q \not\ll p$), then $\thickD\infdivx q p = \infty$. \end{enumerate} \end{coro} These divergences correspond to PDGs with only two edges and one variable. What about more complex graphs? For a start, conditional divergences \vspace{-1.5ex} \def\mskip-1.5mu{\mskip-1.5mu} \[ I\mkern-8muD^{\mskip-1.5mu\mathrm{P\mskip-2muD\mskip-1.5muG}}_{(r,s)}\mskip-1.5mu\ns\Big(\mskip-1.5mu p(\mskip-1.5mu Y \mskip-1.5mu|\mskip-1.5mu X \mskip-1.5mu) \mskip-1.5mu\,\Big\Vert\,\mskip-1.5mu q(\mskip-1.5mu Y \mskip-1.5mu|\mskip-1.5mu X\mskip-1.5mu) \mskip-1.5mu\,\Big|\,\mskip-1.5mu r(\mskip-1.5mu X\mskip-1.5mu)\!\Big) \!:=\!\! \displaystyle \Ex_{x\sim r} \!\! \mskip-1.5mu I\mkern-8muD^{\mskip-1.5mu\mathrm{P\mskip-2muD\mskip-1.5muG}}_{(r,s)} \mskip-1.5mu\ns\Big(\mskip-1.5mu p(\mskip-1.5mu Y \mskip-1.5mu|x) \mskip-1.5mu\ns\,\Big\Vert\,\mskip-1.5mu\ns q(\mskip-1.5mu Y \mskip-1.5mu|x)\!\Big) \] \vskip-2ex can be represented straightforwardly as \vskip-3ex \[ I\mkern-8muD^{\mathrm{P\mskip-2muD\mskip-1.5muG}}_{(r,s)}\mskip-1.5mu(p \,\Vert\, q \,|\, r\mskip-1.5mu) = \aar*{ \begin{tikzpicture}[center base] \node[dpad0] (X) at (0,0) {$X$}; \node[dpad0] (Y) at (1.65,0) {$Y$}; \draw[arr, <-] (X) -- node[above, pos=0.55, inner sep=2pt]{$r$} node[below, pos=0.55, inner sep=2pt]{${\color{gray}\scriptscriptstyle(\infty)}$} +(-1.2,0); \draw[arr] (X) to[bend left=25, inner sep=1pt] node[above, inner sep=2pt, pos=0.35] {$p$} node[above, inner sep=2pt, pos=0.68] {${\color{gray}\scriptscriptstyle(r)}$} (Y); \draw[arr] (X) to[bend right=25] node[below, inner sep=2pt,pos=0.35] {$q$} node[below, inner sep=2pt, pos=0.68] {${\color{gray}\scriptscriptstyle(s)}$} (Y); \end{tikzpicture} } \,. \] \vskip-1ex Other structures are useful intermediates. \Cref{lemma!}, plus some structural manipulation, gives visual proofs of many divergence properties; \Cref{fig:dpi-vis-proof} features such a proof of the data-processing inequality. And in general, PDG inconsistency can be viewed as a vast generalization of divergences to arbitrary structured objects. \begin{figure*} \tikzset{ci2/.style={inner sep=2pt, align=center}}% % \colorlet{pcolor}{Plum}% \colorlet{qcolor}{MidnightBlue}% \def\amt{45}% \tikzset{pstyle/.style={line width=0.9pt, pcolor!\amt!black}}% \tikzset{qstyle/.style={line width=1.3pt, qcolor!\amt!black}}% \tikzset{pqstyle/.style={line width=1.5pt,pcolor!50!qcolor!\amt!black}}% % \def\lgs{\color{gray!80}\scriptstyle}% \def{$\lgs\color{pcolor!40}({\beta})$}{{$\lgs\color{pcolor!40}({\beta})$}} \def{$\lgs\color{qcolor!40}({\zeta})$}{{$\lgs\color{qcolor!40}({\zeta})$}} % % \scalebox{0.89}{{ $ \aar*{\!\begin{tikzpicture}[center base] \node[dpad0] (X) {$X$}; \draw[arr2, <-,qstyle] (X) -- node[above,pos=0.6,ci2]{$q$} node[below, pos=0.65,ci2] {{$\lgs\color{qcolor!40}({\zeta})$}} ++(1.1, 0); \draw[arr2, <-,pstyle] (X) -- node[above,pos=0.6,ci2]{$p$} node[below, pos=0.65, ci2] {{$\lgs\color{pcolor!40}({\beta})$}} ++(-1.1, 0);% \end{tikzpicture}\!} \!=\!\! \aar**{\!\begin{tikzpicture}[center base] \node[dpad0] (X) {$X$}; \node[dpad0,above=.8 of X,align=center] (Y) {$Y$}; \draw[arr2, <-,qstyle] (X) -- node[above,pos=0.7,ci2]{$q$} node[below, pos=0.65,ci2] {{$\lgs\color{qcolor!40}({\zeta})$}} ++(1.1, 0); \draw[arr2, <-,pstyle] (X) -- node[above,pos=0.7,ci2]{$p$} node[below, pos=0.65,ci2] {{$\lgs\color{pcolor!40}({\beta})$}} ++(-1.1, 0);% \draw[arr2, pqstyle] (X) -- node[left,pos=0.45,inner sep=1pt]{$f$} node[right, pos=0.45, inner sep=1.5pt, align=center] {{$\lgs\color{pcolor!50!qcolor!40}(\beta+\zeta)$}} (Y);% \end{tikzpicture}\!} \!=\! \aar**{\!\begin{tikzpicture}[center base] \node[dpad0] (X1) {$X_1$}; \node[dpad0, right=0.6 of X1] (X2) {$X_2$}; \node[dpad0,above=.8 of {$(X1)!.5!(X2)$},align=center] (Y) {$Y$}; \draw[arr2, -, double equal sign distance] (X1) to (X2); \draw[arr2, <-,qstyle] (X2) -- node[above,pos=0.6,ci2]{$q$} node[below, pos=0.65,ci2] {{$\lgs\color{qcolor!40}({\zeta})$}} ++(1.1, 0); \draw[arr2, <-,pstyle] (X1) -- node[above,pos=0.6,ci2]{$p$} node[below, pos=0.65,ci2] {{$\lgs\color{pcolor!40}({\beta})$}} ++(-1.1, 0);% \draw[arr2,pstyle] (X1) to[bend left=40] node[above left, pos=0.35, inner sep=1pt]{$f$} node[below right=0 and 0, pos=0.45, inner sep=0pt, align=center] {{$\lgs\color{pcolor!40}({\beta})$}} (Y);% \draw[arr2,qstyle] (X2) to[bend right=40] node[above right, pos=0.35, inner sep=1pt]{$f$} node[below left=0 and 0, pos=0.45, inner sep=0pt, align=center] {{$\lgs\color{qcolor!40}({\zeta})$}} (Y);% \end{tikzpicture}\!} \!\ge\! \aar**{\!\begin{tikzpicture}[center base] \node[dpad0] (X1) {$X_1$}; \node[dpad0, right=0.65 of X1] (X2) {$X_2$}; \node[dpad0,above=.75 of {$(X1)!.5!(X2)$},align=center] (Y) {$Y$}; \draw[arr2, <-,qstyle] (X2) -- node[above,pos=0.6,ci2]{$q$} node[below, pos=0.65,ci2] {{$\lgs\color{qcolor!40}({\zeta})$}} ++(1.1, 0); \draw[arr2, <-,pstyle] (X1) -- node[above,pos=0.6,pstyle,ci2]{$p$} node[below, pos=0.65,ci2] {{$\lgs\color{pcolor!40}({\beta})$}} ++(-1.1, 0);% \draw[arr2,pstyle] (X1) to[bend left=30] node[above left, pos=0.35, inner sep=1pt]{$f$} node[below right=0 and 0, pos=0.45, inner sep=0pt, align=center] {{$\lgs\color{pcolor!40}({\beta})$}} (Y);% \draw[arr2,qstyle] (X2) to[bend right=30] node[above right, pos=0.35, inner sep=1pt]{$f$} node[below left=0 and 0, pos=0.45, inner sep=0pt, align=center] {{$\lgs\color{qcolor!40}({\zeta})$}} (Y);% \end{tikzpicture}\!} \!\!=\! \aar*{\!\begin{tikzpicture}[center base] \node[dpad0] (X) {$X$}; \draw[arr2, <-,qstyle] (X) -- node[above,pos=0.7,ci2]{$ f\!\circ\! q$} node[below, pos=0.65,ci2] {{$\lgs\color{qcolor!40}({\zeta})$}} ++(1.1, 0); \draw[arr2, <-,pstyle] (X) -- node[above,pos=0.6,ci2]{$ f\!\circ\! p$} node[below, pos=0.65,ci2] {{$\lgs\color{pcolor!40}({\beta})$}} ++(-1.1, 0);% \end{tikzpicture}\!} $ }} \def\thickD^{\mathrm{P\mskip-2muD\mskip-1.5muG}}_{\lgs({\color{pcolor!40}\beta},{\color{qcolor!40}\zeta})}\infdivx[\big]{I\mkern-8muD^{\mathrm{P\mskip-2muD\mskip-1.5muG}}_{\lgs({\color{pcolor!40}\beta},{\color{qcolor!40}\zeta})}\infdivx[\big]} \caption{A visual proof of the data-processing inequality: $\thickD^{\mathrm{P\mskip-2muD\mskip-1.5muG}}_{\lgs({\color{pcolor!40}\beta},{\color{qcolor!40}\zeta})}\infdivx[\big] pq \ge \thickD^{\mathrm{P\mskip-2muD\mskip-1.5muG}}_{\lgs({\color{pcolor!40}\beta},{\color{qcolor!40}\zeta})}\infdivx[\big] {f\circ p}{f \circ q}$. In words: the cpd $f(Y|X)$ can always be satisfied, so adds no inconsistency. It is then equivalent to split $f$ and the variable $X$ into $X_1$ and $X_2$ with edges enforcing $X_1 = X_2$. But removing such edges can only decrease inconsistency. Finally, compose the remaining cpds to give the result. See the appendix for a full justification. } \label{fig:dpi-vis-proof} \end{figure*} \section{VARIATIONAL OBJECTIVES AND BOUNDS} \label{sec:theory} The fact that the incompatibility of $\dg M$ with a \emph{specific} joint distribution $\mu$ is an upper bound on the inconsistency is not a deep one, but it is of a variational flavor. Here, we focus on the more surprising converse: PDG semantics capture general aspects of variational inference and provide a graphical proof language for it. \subsection{PDGs and Variational Approximations} \label{sec:variational} We begin by recounting the standard development of the `Evidence Lower BOund' (ELBO), a standard objective for training latent variable models \parencite[\S2.2]{blei2017variational}. Suppose we have a model $p(X,Z)$, but only have access to observations of $x$. In service of adjusting $p(X,Z)$ to make our observations more likely, we would like to maximize $\log p(X\!\!=\!x)$, the ``evidence'' of $x$ (\Cref{prop:marginal-ll}). Unfortunately, computing $p(X) = \sum_z p(X,Z\!\!=\!z)$ requires summing over all of $Z$, which can be intractable. The variational approach is as follows: fix a family of distributions $\mathcal Q$ that is easy to sample from, choose some $q(Z) \in \mathcal Q$, and define $\mathrm{ELBO}_{p,q}(x) := \Ex_{z \sim q} \log \frac{p(x,z)}{q(z)}$. This is something we can estimate, since we can sample from $q$. By Jensen's inequality, \[ \mathop{\mathrm{ELBO}}\limits_{p,q}(x) =\! \Ex_{q} \log \frac{p(x,Z)}{q(Z)} \le \log \Big[\! \Ex_{q}\! \frac{p(x,Z)}{q(Z)} \Big]\! = \log p(x), \] with equality if $q(Z) = p(Z)$. So to find $p$ maximizing $p(x)$, it suffices to adjust $p$ and $q$ to maximize $\mathrm{ELBO}_{p,q}(x)$,% \footnote{or for many iid samples: $\max_{p,q}\sum_{x \in {\mathcal D}}\mathrm{ELBO}_{p,q}(x)$.} provided $\mathcal Q$ is expressive enough. The formula for the ELBO is somewhat difficult to make sense of.% \footnote{Especially if $p, q$ are densities. See \cref{appendix:density}.} Nevertheless, it arises naturally as the inconsistency of the appropriate PDG. \begin{linked}{prop}{pdg-elbo-x The negative ELBO of $x$ is the inconsistency of the PDG containing $p$,$q$, and $X\!\!=\!x$, with high confidence in $q$. That is, \vspace{-0.8em} \[ -\mathrm{ELBO}_{p,q}(x) = \aar[\Bigg]{ \begin{tikzpicture}[center base] \node[dpad0] (Z) {$Z$}; \node[dpad0,right=.5 of Z] (X) {$X$}; \coordinate (A) at ($ (X)!.5!(Z) + (0,0.8)$); \draw[arr1] (A) -- node[left, inner sep=3pt]{$p$} ++(0,-0.35) -- (X); \draw[arr1] (A) -- ++(0,-0.35) -- (Z); \draw[arr2, <<-] (X) -- node[above,pos=0.8]{$ x$} ++(0.9, 0); \draw[arr2, <-] (Z) -- node[above,pos=0.65, inner sep=2pt]{$q$} node[below,pos=0.7, inner sep=2pt]{${\color{gray}\scriptscriptstyle(\infty)}$} ++(-0.9, 0);% \end{tikzpicture} . \] \end{linked} \vskip-1ex Owing to its structure, a PDG is often more intuitive and easier to work with than the formula for its inconsistency. To illustrate, we now give a simple and visually intuitive proof of the bound traditionally used to motivate the ELBO, via \cref{lemma!}: \[ \log \! \frac{1}{p(\mskip-1.5mux\mskip-1.5mu)} \!=\!\! \aar*{ \begin{tikzpicture}[center base] \node[dpad0] (Z) {$Z$}; \node[dpad0,right=.5 of Z] (X) {$X$}; \coordinate (A) at ($ (X)!.5!(Z) + (0,0.7)$); \draw[arr1] (A) -- node[left, inner sep=2pt]{$p$} ++(0,-0.25) -- (X); \draw[arr1] (A) -- ++(0,-0.25) -- (Z); \draw[arr2, <<-] (X) -- node[left,pos=0.8]{$x$} ++(0.2, 0.8); \end{tikzpicture} \!\le\! \aar*{\!\! \begin{tikzpicture}[center base] \node[dpad0] (Z) {$Z$}; \node[dpad0,right=.5 of Z] (X) {$X$}; \coordinate (A) at ($ (X)!.5!(Z) + (0,0.7)$); \draw[arr1] (A) -- node[left, inner sep=2pt]{$p$} ++(0,-0.25) -- (X); \draw[arr1] (A) -- ++(0,-0.25) -- (Z); \draw[arr2, <<-] (X) -- node[left,pos=0.8]{$x$} ++(0.2, 0.8); \draw[arr2, <-] (Z) -- node[left, inner sep=2pt,pos=0.5]{$q$} node[above, inner sep=1.5pt, rotate=-70, pos=0.7]{{${\color{gray}\scriptscriptstyle(\infty)}$}} ++(110:0.9); \end{tikzpicture} \!=\! -\!\mathop{\mathrm{E\mskip-0.5muL\mskip-0.5muB\mskip-0.5muO}}\limits_{p,q}(\mskip-1.5mux\mskip-1.5mu). \] The first and last equalities are \Cref{prop:marginal-ll,prop:pdg-elbo-x} respectively. Now to reap some pedagogical benefits. The second PDG has more edges so it is clearly at least as inconsistent. Furthermore, it's easy to see that equality holds when $q(Z) \!=\! p(Z)$: the best distribution for the left PDG has marginal $p(Z)$ anyway, so insisting on it incurs no further cost. \subsection{Variational Auto-Encoders and PDGs} An autoencoder is a probabilistic model intended to compress a variable $X$ (e.g., an image) to a compact latent representation $Z$. Its structure is given by two conditional distributions: an encoder $e(Z | X)$, and a decoder $d(X | Z)$. Of course, not all pairs of cpds fill this role equally well. One important consideration is the \emph{reconstruction error} \eqref{eq:rec}: when we decode an encoded image, we would like it to resemble the original. \vspace{-0.5em} \begin{equation} % \mathrm{Rec}(x) := \!\!\!\!\! \Ex_{z \sim e(Z|x)} \smash{\underbrace{\mathrm I_{d(\!X\!|z)}(x)\vphantom{\Big|}}_{\mathclap{\left(\;\substack{\text{additional bits required to}\\\text{decode $x$ from its encoding $z$}}\;\right)}}} \!= \sum_z e(z \,|\, x) \log \frac1{d(x \,|\, z)}\label{eq:rec} \end{equation} \vspace{0.0ex} There are other desiderata as well. Perhaps good latent representations $Z$ have uncorrelated components, and are normally distributed. We encode such wishful thinking as a belief $p(Z)$, known as a variational prior. The data of a Variational Auto-Encoder \parencite{kingma2013autoencoding,rezende2014stochastic}, or VAE, consists of $e(Z|X)$, $d(X|Z)$, and $p(Z)$. The encoder $e(Z|X)$ can be used as a variational approximation of $Z$, differing from $q(Z)$ of \Cref{sec:variational} only in that it can depend on $X$. VAEs are trained with the analogous form of the ELBO: \begin{align*} \mathrm{ELBO}_{p,e,d}(x) :=& \Ex_{z \sim e(Z|x)} \left[\log \frac{p(z) d(x\mid z)}{e(z\mid x)} \right] \\ =& - \mathrm{Rec}(x) - \thickD\infdivx{e(Z|x)}{p}. \end{align*} \vspace{-3ex} This gives us the following analog of \cref{prop:pdg-elbo-x}. \begin{linked}{prop}{pdg-elbo-vae} The VAE loss of a sample $x$ is the inconsistency of the PDG comprising the encoder $e$ (with high confidence, as it defines the encoding), decoder $d$, prior $p$, and $x$. That is, \vspace{-3ex} \[ -\mathrm{ELBO}_{p,e,d}(x) = \aar*{ \begin{tikzpicture}[center base] \node[dpad0] (Z) {$Z$}; \node[dpad0,right=.7 of Z] (X) {$X$}; \draw[arr2, ->] (X) to[bend left=50] node[above, inner sep=2pt]{$e$} node[below, inner sep=2pt]{${\color{gray}\scriptscriptstyle(\infty)}$} (Z); \draw[arr2, ->] (Z) to[bend left=50] node[above]{$ d$} (X); \draw[arr2, <<-] (X) -- node[above,pos=0.8]{$ x$} ++(0.9, 0); \draw[arr2, <-] (Z) -- node[above,pos=0.6]{$ p$} ++(-0.9, 0);% \end{tikzpicture}}. \] \vspace{-4ex} \end{linked} We now give a visual proof of the analogous variational bound. Let $\Pr_{p,d}(X,Z) := p(Z)d(X|Z)$ be the distribution that arises from decoding the prior. Then: \begin{align*} \log \frac{1}{\displaystyle\!\mathop{\mathrm{P\mkern-1.5mur}}_{\mathclap{p\mkern-1mu,d}}(\mskip-1.5mux\mskip-1.5mu)\!} \!=\! \aar** {$\mkern-1mu$\begin{tikzpicture} [baseline=1.9ex] \node[dpad0] (Z) {$Z$}; \node[dpad0,right=.4 of Z] (X) {$X$}; \draw[arr2, ->] (Z) to[bend left=50,looseness=1.5] node[above]{$\smash{d}$} (X); \draw[arr2, <<-] (X) -- node[left,pos=0.8,inner sep=2pt]{$x$} ++(0.2, 0.8); \draw[arr2, <-] (Z) -- node[right,pos=0.7,inner sep=2pt]{$p$} ++(-0.2, 0.8);% \end{tikzpicture}$\mkern-1mu$} &\!\le\! \aar** {$\mkern-1mu$\begin{tikzpicture} [baseline=1.5ex] \node[dpad0] (Z) {$Z$}; \node[dpad0,right=.7 of Z] (X) {$X$}; \draw[arr2, ->] (X) to[bend left=0] node[above, inner sep=2pt]{$e$} node[below, inner sep=1pt, pos=0.4] {${\color{gray}\scriptscriptstyle(\!\infty\!)}$} (Z); \draw[arr2, ->] (Z) to[bend left=50,looseness=1.5] node[above, inner sep=2pt]{$\smash{d}$} (X); \draw[arr2, <<-] (X) -- node[left,pos=0.8,inner sep=2pt]{$x$} ++(0.2, 0.8); \draw[arr2, <-] (Z) -- node[right,pos=0.7,inner sep=2pt]{$p$} ++(-0.2, 0.8);% \end{tikzpicture}$\mkern-1mu$} \!=\! \shortminus\!\mathop{\mathrm{E\mskip-0.5muL\mskip-0.6muB\mskip-0.7muO}}\limits_{p,e,d}\mskip-0.2mu(\mskip-1.5mux\mskip-1.5mu). \end{align*} The first and last equalities are \Cref{prop:marginal-ll,prop:pdg-elbo-vae}, and the inequality is \cref{lemma!}. See the appendix for multi-sample analogs of the bound and \cref{prop:pdg-elbo-vae}. \subsection{The \texorpdfstring{$\beta$}{beta}-VAE Objective} \label{sec:betavae} The ELBO is not the only objective that has been used to train networks with a VAE structure. In the most common variant, due to \textcite{higgins2016beta}, one weights the reconstruction error \eqref{eq:rec} and the `KL term' differently, resulting in a loss function of the form \vspace{-0.5ex} \[ \beta\text{-}\mathrm{ELBO}_{p,e,d}(x) := - \mathrm{Rec}(x) - \beta \thickD\infdivx{e(Z|x)}{p}, \] \vskip-1.5ex which, when $\beta \!=\! 1$, is the ELBO as before. The authors view $\beta$ as a regularization strength, and argue that it sometimes helps to have a stronger prior. Sure enough: \vspace{-2ex} \begin{linked}{prop}{betaelbo-informal} \!\!$-\beta\text{-ELBO}_{p,e,d}(x)$ is the inconsistency of the same PDG, but with confidence $\beta$ in $p(Z)$.% \end{linked} \section{FREE ENERGY AND INCONSISTENCY} A weighted factor graph $\Psi = (\phi_J, \theta_J)_{J \in \cal J}$, where each $\theta_J$ is a real-valued weight, $J$ is associated with a subset of variables $\mathbf X_J$, and $\phi_J : \mathcal V(\mathbf X_J) \to \mathbb R$, determines a distribution by \vskip-1.4em \[ \Pr\nolimits_\Psi(\mat x) = \frac{1}{Z_\Psi} \prod_{J \in \cal J} \phi_J(\mat x_J)^{\theta_J}. \] \vskip-0.7em \( Z_{\Psi} \) is the constant $ \sum_{\mat x} \prod_{J \in \mathcal J} \phi_J(\mat x_J)^{\theta_J}$ required to normalize the distribution, and is known as the \emph{partition function}. Computing $\log Z_\Psi$ is intimately related to probabilistic inference in factor graphs \parencite{ma2013estimating}. Following \textcite{richardson2020probabilistic}, let $\PDGof{\Psi}$ be the PDG with edges $\{ \raisebox{-0.3ex}{$\smash{\stackrel{J}{\rightarrow}}$} \mathbf X_J \}_{\mathcal J}$, cpds $p_J(\mathbf X_J) \propto \phi_J(\mathbf X_J)$, and weights $\alpha_J, \beta_J := \theta_J$% . There, it is shown that $\Pr_\Psi$ is the unique minimizer of $\bbr{\PDGof{\Psi}}_1$. But what about the corresponding inconsistency, $\aar{\PDGof{\Psi}}_1$? If the factors are normalized and all variables are edge targets, then $Z_\Psi \le 1$, so $\log \frac{1}{Z_\Psi} \ge 0$ measures how far the product of factors is from being a probability distribution. So in a sense, it measures $\Psi$'s inconsistency. \begin{linked}{prop}{fg-inconsistency-is-partition-function} For all weighted factor graphs $\Psi$, we have that $\aar{\PDGof{\Psi}}_1 = - \log Z_{\Psi}$. \end{linked} The exponential families generated by weighted factor graphs are a cornerstone of statistical mechanics, where $- \log Z_{\Psi}$ is known as the (Heimholz) free energy. It is also an especially natural quantity to minimize: the principle of free-energy minimization has been enormously succesful in describing of not only chemical and biological systems \parencite{chipot2007free}, but also cognitive ones \parencite{friston2009free}. \section{BEYOND STANDARD LOSSES: A CONCRETE EXAMPLE} \label{sec:datsim} In contexts where a loss function is standard, it is usually for good reason---which is why we have focused on recovering standard losses. But most situations are non-standard, and even if they have standard sub-components, those components may interact with one another in more than one way. Correspondingly, there is generally more than one way to cobble standard loss functions together. How should you choose between them? By giving a principled model of the situation. \def\texttt{s\kern-1.1pti\kern-0.8ptm}{\texttt{s\kern-1.1pti\kern-0.8ptm}} \def\texttt{d\kern-0.75pta\kern-1ptt}{\texttt{d\kern-0.75pta\kern-1ptt}} \def\texttt{s}{\texttt{s}} \def\texttt{d}{\texttt{d}} Suppose we want to train a predictor network $h(Y|X)$ from two sources of information: partially corrupted data with distribution $d(X,Y)$, and a simulation with distribution $s(X,Y)$. If the simulation is excellent and the data unsalvagable, we would have high confidence in $s$ and low confidence in $d$, in which case we would train with cross entropy with respect to $s$% , $\mathcal L_{\texttt{s\kern-1.1pti\kern-0.8ptm}}\!:=\!\Ex_s [\log \nf1{h(Y|X)}]$. Conversely, if the simulation were bad and the data mostly intact, we would use $\mathcal L_\texttt{d\kern-0.75pta\kern-1ptt}$, the cross entropy with respect to $d$. What if we're not so confident in either? One approach a practitioner might find attractive is to make a dataset from samples of both $s$ and $d$% , or equivalently, train with a convex combination of the two previous losses, $\mathcal L_1 := \lambda_{\texttt{s}}\mathcal L_{\texttt{s\kern-1.1pti\kern-0.8ptm}} + \lambda_{\texttt{d}}\mathcal L_{\texttt{d\kern-0.75pta\kern-1ptt}}$ for some $\lambda_\texttt{s}, \lambda_\texttt{d} > 0$ with $\lambda_\texttt{s} + \lambda_\texttt{d} = 1$. This amounts to training $h$ with cross entropy with respect to the mixture $\lambda_\texttt{s} s + \lambda_\texttt{d} d$. Doing so treats $d$ and $s$ as completely unrelated, and so redundancy is not used to correct errors---a fact on display when we present the modeling choices in PDG form, such as \vspace{-1ex} \[ \mathcal L_1 = \aar**{ \begin{tikzpicture}[center base] \node[tpt={z0|\texttt{s\kern-1.1pti\kern-0.8ptm}}] at (-0.5,0.1) {}; \node[tpt={z1|\texttt{d\kern-0.75pta\kern-1ptt}},right=0.35 of z0]{}; \node[Dom={$Z$[label distance=-2.5ex, xshift=1.0em] (Z) around {\lab{z0}\lab{z1}}},yshift=0.2em ] {}; \node[dpad0] (X) at (2.4, 0.6) {$X$}; \node[dpad0] (Y) at (2.4, -0.6) {$Y$}; \coordinate (xyz) at (1.9, 0); \draw[arr1, <-] (Z) to node[above, pos=0.6]{$\lambda$} node[below,inner sep=1pt, pos=0.6]{${\color{gray}\scriptstyle( \infty )}$} +(-1.5, 0); \draw[arr1] (X) to node[right,pos=0.4]{$h$} (Y); \draw[arr,-,shorten >=0pt] (Z) to[bend left=0, shorten >=0pt] node[above, inner sep=1pt, pos=0.55] {$\begin{matrix}\texttt{d\kern-0.75pta\kern-1ptt} \mapsto d \\[-0.6ex] \texttt{s\kern-1.1pti\kern-0.8ptm} \mapsto s \end{matrix}$} node[below,inner sep=1pt]{${\color{gray}\scriptstyle( \infty )}$} (xyz); \draw[arr2, shorten <=0pt] (xyz) to (X); \draw[arr2, shorten <=0pt] (xyz) to (Y); \end{tikzpicture}} , \] in which a swich variable $Z$ with possible values $ \{\texttt{s\kern-1.1pti\kern-0.8ptm},\texttt{d\kern-0.75pta\kern-1ptt}\}$ controls whether samples come from $s$ or $d$, and is distributed according to $\lambda(Z\!=\!\texttt{s\kern-1.1pti\kern-0.8ptm}) = \lambda_\texttt{s}$. Our practitioner now tries a different approach: draw data samples $(x,y) \sim d$ but discount $h$'s surprisal when the simulator finds the point unlikely, via loss $\mathcal L_2 := \Ex_{d} [s(\mkern-2muX\!,\!Y\mkern-2mu) \log \nf1{h(Y|X)}]$. This is the cross entropy with respect to the (unnormalized) product density $ds$, which in many ways is appropriate. However, by this metric, the optimal predictor $h^*(Y|x) \propto d(Y|x) s(Y|x)$ is \emph{uncalibrated} \parencite{dawid1982well}. If the data and simulator agree ($d \!=\! s$), then we would want $h(Y|x) \!=\! s(Y|x)$ for all $x$, but instead we get $h^*(Y|x) \propto s(Y|x)^2$. So $h^*$ is overconfident. What went wrong? $\mathcal L_2$ cannot be written as the (ordinary $\gamma\!=\!0$) inconsistency of a PDG containing only $s,h$, and $d$, but for a large fixed $\gamma$, it is essentially the $\gamma$-inconsistency \[ \mathcal L_2 \approx C \aar**{ \begin{tikzpicture}[center base] \node[dpad0] (X) at (0, 0.6) {$X$}; \node[dpad0] (Y) at (0, -0.6) {$Y$}; \draw[arr1] (X) to node[left, pos=0.4, inner sep=1pt]{$h$} (Y); \coordinate (d0) at (1.8, 0); \coordinate (dmid) at (0.9, 0); \coordinate (s0) at (-1.8, 0); \coordinate (smid) at (-0.9, 0); \draw[arr,->,shorten <=0pt] (dmid) to[bend right=25] (X); \draw[arr,->,shorten <=0pt] (dmid) to[bend left=25] (Y); \draw[arr1,-,shorten <=0pt] (dmid) to node[below, inner sep=2pt]{${\color{gray}\scriptstyle \renewcommand{\arraystretch}{.7} \big(\begin{matrix} \scriptstyle\alpha: 1 \\[-0.2ex] \scriptstyle\beta: \gamma \end{matrix} \big)}$} node[above] {$d$} (d0); % \draw[arr,->,shorten <=0pt] (smid) to[bend left=25] (X); \draw[arr,->,shorten <=0pt] (smid) to[bend right=25] (Y); \draw[arr1,-,shorten <=0pt] (smid) to node[below, inner sep=2pt]{${\color{gray}\scriptstyle \renewcommand{\arraystretch}{.7} \big( \begin{matrix} \scriptstyle \alpha: 1 \\[-0.2ex] \scriptstyle \beta: \gamma \end{matrix} \big)}$} node[above]{$s$} (s0); \end{tikzpicture}}\Bigg._{\!\!\!\gamma} + \mathit{const}, \] where $C$ is the constant required to normalize the joint density $sd$, and $\mathit{const}$ does not depend on $h$. However, the values of $\boldsymbol\alpha$ in this PDG indicate an over-determination of $XY$ (it is determined in two different ways), and so $h^*$ is more deterministic than intended. By contrast, \vspace{-2ex} \[ \mathcal L_3 := \aar**{ \begin{tikzpicture}[center base] \node[dpad0] (X) at (0, 0.6) {$X$}; \node[dpad0] (Y) at (0, -0.6) {$Y$}; \draw[arr1] (X) to node[left=0pt,pos=0.4, inner sep=1pt]{$h$} (Y); \coordinate (d0) at (1.8, 0); \coordinate (dmid) at (0.9, 0); \coordinate (s0) at (-1.8, 0); \coordinate (smid) at (-0.9, 0); \draw[arr,->,shorten <=0pt] (dmid) to[bend right=25] (X); \draw[arr,->,shorten <=0pt] (dmid) to[bend left=25] (Y); \draw[arr1,-,shorten <=0pt] (dmid) to node[below, inner sep=2pt]{${\color{gray}\scriptstyle(\lambda_{\texttt{d}})}$} node[above] {$d$} (d0); % \draw[arr,->,shorten <=0pt] (smid) to[bend left=25] (X); \draw[arr,->,shorten <=0pt] (smid) to[bend right=25] (Y); \draw[arr1,-,shorten <=0pt] (smid) to node[below, inner sep=2pt]{${\color{gray}\scriptstyle(\lambda_{\texttt{s}})}$} node[above]{$s$} (s0); \end{tikzpicture}}, \] does not have this issue: the optimal predictor $h^*$ according to $\mathcal L_3$ is proportional to the $\lambda$-weighted geometric mean of $s$ and $d$. It seems that our approach, in addition to providing a unified view of standard loss functions, can also suggest more appropriate loss functions in practical situations. \section{REVERSE-ENGINEERING LOSS?} \label{sec:reverse-engineer} \def{\tt T}{{\tt T}} \def{\tt t}{{\tt t}} \def{\tt f}{{\tt f}} Given an \emph{arbitrary} loss function, can we find a PDG that gives rise to it? The answer appears to be yes---although not without making unsavory modeling choices. Without affecting its semantics, one may add the variable ${\tt T}$ that takes values $\{{\tt t}, {\tt f}\}$, and the event ${\tt T} \!\!=\! {\tt t}$, to any PDG. Now, given a cost function $c: \mathcal V(X) \to \mathbb R_{\ge 0}$, define the cpd $\hat c({\tt T} |X)$ by $ \hat c({\tt t} | x) := e^{-c(x)}. $ By threatening to generate the falsehood {\tt f} with probability dependent on the cost of $X$, $\hat c$ ties the value of $X$ to inconsistency. \begin{linked}{prop}{expected-cost} \! \( \displaystyle \aar*{\!\begin{tikzpicture}[center base] \node[dpad0] (X) at (0,0) {$X$}; \node[dpad0] (2) at (1.1,0) {${\tt T}$}; \draw[arr2] (X) to node[above, pos=0.4,inner sep=2pt]{$\hat c$} (2); \draw[arr2, <-] (X) to node[above, pos=0.6, inner sep=2pt]{$p$} node[below, pos=0.6, inner sep=2pt] {${\color{gray}\scriptscriptstyle(\mskip-2mu\infty\mskip-2mu)}$} +(-1, 0); \draw[arr2, <<-] (2) to node[above, inner sep=2pt, pos=0.6] {{\tt t}} +(0.9,0); \end{tikzpicture}\!} = \! \Ex_{x\sim p}\! c(x). \) \end{linked} Setting confidence $\beta_p := \infty$ may not be realistic since we're still training the model $p$, but doing so is necessary to recover $\Ex_p c$.% \footnote{If $\beta_p$ were instead equal to $1$, we would have obtained $-\log \Ex_p \exp(-c(\!X\!))$, with optimal distribution $\mu(\!X\!) \!\ne\! p(\!X\!)$.\label{fn:logEexp}} Any mechanism that generates inconsistency based on the value of $X$ (such as this one) also works in reverse: the PDG ``squirms'', contorting the probability of $X$ to disperse the inconsistency. One cannot cannot simply ``emit loss'' without affecting the rest of the model, as one does with utility in an Influence Diagram \parencite{influencediagrams}. Even setting every $\beta := \infty$ may not be enough to prevent the squirming. \def\dg{S}\!\mathrm{Learn}{\dg{S}} To illustrate, consider a model $\dg{S}\!\mathrm{Learn}$ of the supervised learning setting (predict $Y$ from $X$), with labeled data $\mathcal D$, model $h$, and a loss function $\ell$ on pairs of output labels. Concretely, define: \vspace{-1ex} \[ \dg{S}\!\mathrm{Learn} := \begin{tikzpicture}[baseline=4ex] \begin{scope}[xscale=1.2] \node[dpad0] (X) at (0.3,0) {$X$}; \node[dpad0] (Yt) at (1,1) {$Y$}; \node[dpad0,align=center] (Yp) at (1.4,0) {$\vphantom{Y}\smash{Y'}$}; \node[dpad0] (2) at (2,1) {${\tt T}$}; \coordinate (dstart) at (-0.1,0.9); \end{scope} \unmergearr[arr1]{dstart}{X}{Yt} \node[above=2pt of center-dstartXYt, xshift=-2pt] {$\datadist{\mathcal D}$}; \node[below right=2.0pt and -0.4pt of center-dstartXYt, inner sep=0pt, rotate=25] {${\color{gray}\scriptscriptstyle(\mskip-2mu\infty\mskip-2mu)}$}; \mergearr[arr2]{Yt}{Yp}{2} \node[above=2pt of center-YtYp2] {$\hat\ell$}; \draw[arr2] (X) to node[above, inner sep=2pt,pos=0.4] {$h$} node[below, inner sep=2pt,pos=0.4] {${\color{gray}\scriptscriptstyle(\mskip-2mu\infty\mskip-2mu)}$} (Yp); \draw[arr2, <<-] (2) to node[right, inner sep=2pt, pos=0.6] {{\tt t}} +(0,-1); \end{tikzpicture} \quad\;\text{and}\;\quad \mathcal L := \;\;\mathop{\scalebox{1.2}{$\Ex$}}\limits_{\substack{% \vphantom{x}\\ \mathllap{(x,y)} \sim \mathrlap{\datadist{\mathcal D}} \\ \mathllap{y'} \sim \mathrlap{p(Y'|\,x)}} } \;\big[\ell(y,y')\big]. % \vspace{-1.5ex} \] Given \Cref{prop:expected-cost}, one might imagine $\aar{\dg{S}\!\mathrm{Learn}} = \mathcal L$, but this is not so. In some ways, $\aar{\dg{S}\!\mathrm{Learn}}$ is actually preferable. The optimal $h(Y'|X)$ according to $\mathcal L$ is a degenerate cpd that places all mass on the label(s) $y^*_X$ minimizing expected loss, while the optimal $h(Y'|X)$ according to $\aar{\dg{S}\!\mathrm{Learn}}$ is $\datadist{\mathcal D}(Y|X)$, which means that it is calibrated, unlike $\ell$. If, in addition, we set $\alpha_p, \alpha_{\datadist{\mathcal D}} := 1$ and strictly enforce the qualitative picture, finally no more squirming is possible, as we arrive at $\displaystyle\lim_{\gamma\to\infty}\aar{\dg{S}\!\mathrm{Learn}}_\gamma = \mathcal L$. \vskip-0.5ex In the process, we have given up our ability to tolerate inconsistency by setting all probabilistic modeling choices in stone. What's more, we've dragged in the global parameter $\gamma$, further handicapping our ability to compose this model with others. To summarize: while model inconsistency readily generates appropriate loss functions, the converse does not work as well. Reverse-enerineering a loss may require making questionable modeling choices with absolute certainty, resulting in brittle models with limited potential for composition. In the end, we must confront our modeling choices; good loss functions come from good models. \section{FINAL REMARKS} We seen that that PDG semantics, in the same stroke by which they capture Bayesian Networks and Factor Graphs \parencite{richardson2020probabilistic}, also generate many standard loss functions, including some non-trivial ones. In each case, the appropriate loss arises simply by articulating modeling assumptions, and then measuring inconsistency. Viewing loss functions in this way also has beneficial side effects, including an intuitive visual proof language for reasoning about the relationships between them. This ``universal loss'', which provides a principled way of choosing an optimization objective, may be of particular interest to the AI alignment community. \newpage \subsubsection*{Acknowledgements} Work supported in part by MURI grant W911NF-19-1-0217. Many thanks to my advisor, Joe Halpern, for his generous support, and for valuable critiques of many drafts. Thanks as well to my reviewers, who pushed me to better explain the confidence parameters, and to include a practical example (\Cref{sec:datsim}). Finally, thanks to my friends, particularly Varsha Kishore and Greg Yauney, for helping me to refine the presentation of these ideas. \subsubsection*{References} { \printbibliography[heading=none] } \clearpage \onecolumn
1,108,101,565,903
arxiv
\section{Introduction} Understanding how the hyperons undergo changes in nuclear matter is a very important issue in contemporary nuclear physics. In particular, it is of great interest to see how the hyperons are related to in-medium kaon properties at low densities and how they can be changed in higher densities that can be found in the interior of neutron stars.~\cite{Gal:2016boi,Lattimer:2015nhk}. In the present contribution, we will discuss a recent work on the hyperon properties in nuclear matter, which was carried out in a simple but plausible framework of a chiral soliton approach to nonzero density phenomena in the SU(3) sector~\cite{Hong:2018sqa}. Previously, a similar approach was developed in the non-strangeness sector to study various phenomena in medium (for example, see Ref.~\cite{Yakhshiev:2013eya} and references therein) and the results were in qualitative agreement with those from other different approaches. In Ref.~\cite{Hong:2018sqa}, we extended the work of Ref.~\cite{Yakhshiev:2013eya} to the SU(3) including the hyperons. We discuss the main results and significance of the work. \section{The model} The Lagrangian of the present model written by the following form~\cite{Hong:2018sqa} \begin{eqnarray} \mathcal{L}=&-\frac{F_\pi^2}{16}\alpha_2^t(\rho) {\rm Tr} L_0L_0+\frac{F_\pi^2}{16}\alpha_2^s(\rho){\rm Tr} L_iL_i -\frac{\alpha_4^t(\rho)}{16e^2} {\rm Tr}[L_0,L_i]^2 +\frac{\alpha_4^s(\rho)}{32e^2}{\rm Tr}[L_i,L_j]^2\cr & +\frac{F_\pi^2}{16}\alpha_{\chi SB}(\rho){\rm Tr} \mathcal{M}(U+U^\dagger-2)+ \mathcal{L}_{WZ}, \label{ModLag} \end{eqnarray} where $L_\mu=U^\dagger\partial_\mu U$ and $U(\bm{x},t)$ is a chiral field in SU(3). The Wess-Zumino term~\cite{Wess:1971yu} $\mathcal{L}_{\mathrm{WZ}}$ in the Lagrangian constrains the soliton to be identified as a baryon and is expressed by a five-dimensional integral over a disk $D$ \begin{eqnarray} S_{\rm WZ} = -\frac{iN_c}{240\pi^2} \int_{D} d^5 \vec x\, \epsilon^{\mu\nu\alpha\beta\gamma} {\rm Tr}(L_\mu L_\nu L_\alpha L_\beta L_\gamma). \end{eqnarray} Here $\epsilon^{\mu\nu\alpha\beta\gamma}$ is the totally antisymmetric tensor defined as $\epsilon^{01234}=1$ and $N_c=3$ is the number of colors. The values of input parameters are defined in free space: $F_\pi=108.783$\,MeV denotes the pion decay constant, $e=4.854$ represents the Skyrme parameter, the masses of the $\pi$ and $K$ mesons are given respectively as $m_\pi=134.976$\,MeV and $m_K=495$\,MeV, and the mass matrix of the pseudo-Nambu-Goldstone bosons $\mathcal{M}$ has the diagonal form $\mathcal{M}=(m_\pi^2,m_\pi^2,m_K^2)$. The density-dependent functions $\alpha_2^t(\rho)$, $\alpha_2^s(\rho)$, $\alpha_4^t(\rho)$, $\alpha_4^s(\rho)$ and $\alpha_{\chi SB}(\rho)$ reflect the changes of the meson properties in nuclear medium. In an approximation of homogeneous infinite nuclear matter they are expressed in terms of the three linear density-dependent functions $f_{i}(\rho)=1+C_i\rho,\,(i=1,2,3)$. The numerical values of $C_i$ are fixed to be $C_1=-0.279$, $C_2=0.737 $ and $C_3=1.782$, respectively. They reproduce very well the equations of state (EoS) for symmetric nuclear matter near the normal nuclear matter density $\rho_0$ and at higher densities that may exist in the interior of a neutron star. The medium modification of the kaon properties is achieved by considering the following scheme \begin{eqnarray} F_\pi m_K\rightarrow F_K^* m_K^*=F_\pi m_K(1-C\rho/\rho_0) \label{comKprop} \end{eqnarray} and can be explained in terms of the alteration of the kaon decay constant and/or of the kaon mass in nuclear environment. The quantization of the model is performed by considering the time-dependent rigid rotation of a static soliton \begin{equation} U(\bm{r},t)=\mathcal{A}(t)U_0(\bm{r})\mathcal{A}(t)^\dagger, \end{equation} where $U_0(\bm{r})$ denotes the static SU(3) chiral soliton with trivial embedding. The time-dependent rotational matrix $\mathcal{A}(t)$ is decomposed \begin{eqnarray} \mathcal{A}(t)&=\left(\begin{array}{cc} A(t)&0\\ 0^\dagger&1\end{array}\right)S(t), \end{eqnarray} in terms of the SU(2) isospin rotation $A(t)=k_0(t){\bf 1}+i \sum_{a=1}^3\tau_a k_a(t)$ and fluctuations into the strangeness sector given by the matrix $S(t)=\exp\left\{i\sum_{p=4}^7k_p \lambda_p\right\}$. Here $\tau_{1,2,3}$ denote the Pauli matrices, whereas $\lambda_p$ stand for the strange part of the SU(3) Gell-Mann matrices. The time-dependent functions $k_a(t)$ $(a=0,1,2,\dots,7)$ represent arbitrary collective coordinates. The more details of the approach can be found in Ref.~\cite{Hong:2018sqa}. \section{Results and discussions} All model parameters in free space and in nuclear matter, except for the parameter $C$ in Eq.~(\ref{comKprop}), are fixed in the SU(2) sector. The only remaining parameter $C$ could be fixed by data on kaon-nucleus scattering and kaonic atoms. However, in the present work we carry out a qualitative analysis of the effects in the baryonic sector due to the modification of the kaon properties in nuclear medium. Consequently, we discuss the density dependence of the mass splittings among the various baryon multiplet members. In our calculation, the parameter value $C=0$ corresponds to the case when the properties of kaon will not change in nuclear matter whereas a nonzero value of the parameter $C\neq 0$ indicates that the mass and/or kaon dynamics is alters in a dense nuclear environment. The results show that in general the masses of the baryon octet tend to decrease in nuclear matter. Only $\Sigma$ showed a different tendency if the parameter value is set to be $C=0$. In the case of $C=0.2$, $m_\Sigma$ also tends to decrease as the density of nuclear matter increases~\cite{Hong:2018sqa}. In comparison, the results from SU(3) chiral effective field theory~\cite{Petschauer:2015nea} show that $m_{\Lambda}^*$ is decreased by about 17~\% at normal nuclear matter density $\rho_0$. The $\Xi$ hyperon is behaved in a similar manner. At $\rho_0$ the change in the mass of $\Xi^*$ was about 6~\% and 16~\% for the corresponding parameter values $C=0$ and $C=0.2$, respectively. The masses of the baryon decuplet increase in general as $\rho$ increases. Changes are dramatic for $C=0$ while for $C=0.2$ they are less changeable. We present the density dependence of the mass splittings among the multiplet members in Figs.~\ref{Fig1} and \ref{Fig2}. \begin{figure}[th] \includegraphics[width=0.45\textwidth]{fig1a.eps} \includegraphics[width=0.45\textwidth]{fig1b.eps} \caption{ (Color online.) Density dependence of the mass splittings among the baryon octet members. The mass splittings in nuclear matter are normalized to the corresponding free space mass splittings. The left and right panels in the figure corresponds to the results with $C=0$ and $C=0.2$, respectively. } \label{Fig1} \end{figure} \begin{figure}[th] \includegraphics[width=0.45\textwidth]{fig2a.eps} \includegraphics[width=0.45\textwidth]{fig2b.eps} \caption{ (Color online.) Density dependence of the mass splittings among the baryon decuplet members. Notations are the same as in Fig.~\ref{Fig1}. } \label{Fig2} \end{figure} Figure~\ref{Fig1} shows the density dependence of the mass splittings among the baryon octet members while Fig.~\ref{Fig2} depicts the results corresponding to the mass splittings among the decuplet members. All the mass splittings in nuclear matter are normalized to the vaues of the corresponding ones in free space. The left and right panels in the figures illustrate the results with two different values of parameter $C$, respectively. It is interesting to see that except $m^*_{\Sigma}-m^*_{\Lambda}$ all the mass splittings tend to decrease up to $(1.5-2)\rho_0$. This behavior can be explained in terms of the density-dependent functionals $\omega^*_-$ and $c^*$ entering into the mass formula (see Eq.\,(36) in Ref.~\cite{Hong:2018sqa}). The first functional describes the fluctuations in the strangeness direction and comes into play for the mass splitting formula between the same strangeness members while all other mass splittings presented in the figures depend linearly on $\omega_-^*$. This indicates that at large densities the fluctuations in strangeness direction gets weaker. From the figures one concludes also that at large densities SU(3) flavor symmetry tends to be restored. The work is supported by Basic Science Research Program through the National Research Foundation (NRF) of Korea funded by the Korean government (Ministry of Education, Science and Technology, MEST), Grant No. 2016R1D1A1B03935053 (UY) and Grant No. NRF-2018R1A2B2001752 (HChK).
1,108,101,565,904
arxiv
\section{Introduction} Classical deep inelastic $lH\,\,\to\,\,l' X$ scattering experiments deepened considerably our knowledge of nucleon structure. Measuring the momentum of the outgoing lepton $l'$ we learn about the distribution of partons inside the nucleon. In a reference frame where the nucleon moves (infinitely) fast, this information is accumulated in the parton distribution functions (PDFs) $f_a(x, Q^2)$ in terms of the partonic degrees of freedom: e.g., the Bjorken variable $x_{\rm Bj}$ relates to the fraction of the longitudinal momentum $P$ of the parent hadron possessed by a parton of the flavor $a$. Such collinear (integrated) PDFs can be properly defined as completely gauge invariant (nonperturbative) hadronic matrix elements (here and in what follows, the light-cone components of four-vectors are $p^\pm = (p^0 \pm p^z)/{\sqrt 2}$) \begin{equation} f_a(x, \mu^2) = \frac{1}{2} \int \frac{d\xi^- }{2\pi } \ {\rm e}^{-ik^{+}\xi^{-} } \left\langle p\ |\bar \psi_a (\xi^-, \mbox{\boldmath$0_\perp$})[\xi^-, 0^-]_n \gamma^+ \psi_a (0^-,\mbox{\boldmath$0_\perp$}) | \ p \right\rangle \ \label{eq:iPDF} \end{equation} with renormalization-group properties controlled by the DGLAP evolution equations (for review and Refs. see \cite{DIS_p}). The Wilson line $[\xi^-, 0^-]_n$ will be introduced below. Moreover, one can relate the moments of the collinear PDFs $ M^N_a = \int\!dx \ x^{N-1} f_a(x) $ to the matrix elements of the local twist-two operators $ {\cal O}^N = (p^+)^{-N} \ \langle p \ | \frac{1}{2} \bar \psi_a (0) \{ \gamma^+ iD^+...iD^+ \}_{\rm sym.} \psi_a(0) | \ p \rangle $ arising in the operator product expansion on the light-cone, thus making them well-defined objects from the field-theoretical point of view \cite{CS_first}. Another attractive feature of the collinear PDFs (\ref{eq:iPDF}) is that they imply a clear interpretation as the probability for a parton inside the nucleon to have the longitudinal momentum $k_{\rm long.} = xP_{\rm long.}$. This interpretation is most naturally established when QCD is canonically quantized (and subsequently renormalized) on equal-``light-cone-time'' surfaces $\xi^+ = 0$ in a class of singular non-covariant gauges \cite{LC_quant}. This parton number interpretation holds, in the light-cone gauge $A^+ =0$, in higher order calculations as well \cite{LC_Bassetto, SF87, CDL84}. However, an important problem must be solved in order to make the calculations in the light-cone gauge feasible. Even after imposing the gauge condition $A^+=0$, the gauge in not completely fixed: one may still perform a $\xi^-$-independent gauge transformation $U(\xi^+, \bm{\xi}_\perp)$ by virtue of $ \partial^+ U(\xi^+, \bm{\xi_\perp}) = \partial / \partial x^- U(\xi^+, {\bm \xi_\perp}) = 0 $. Therefore, fixing the gauge is equivalent to imposing certain boundary conditions on the gauge field (see, e.g., \cite{SF87, TMD_LC_trans, BR05, AT_POL}). Going over to the conjugate momentum space, one observes that the ambiguity in the behavior of the gauge field at light-cone infinity $\xi^- \to \infty$ maps over ambiguity of the gluon Green function at small $q^+ \to 0$. A key issue is, therefore, how to get rid of extra complications due to the emergent ``spurious'' singularity $\sim [q^+]^{-1}$ in the free gluon propagator \begin{equation} D^{\mu\nu} (q) = \frac{i}{q^2+i0} \left( - g^{\mu\nu} + \frac{(n^-)^{\mu} q^\nu + (n^-)^{\nu} q^\mu}{[q^+]} \right) \ . \label{eq:gluon_pr_LC} \end{equation} The uncertainty of the pole prescription in Eq. (\ref{eq:gluon_pr_LC}) corresponds to the residual gauge freedom and can be treated without changing the gauge-fixing constraint $A^+=0$. There are several possible pole-prescription-fixing procedures that are compatible with the light-cone gauge and have been shown to give correct results (at least, up to the $O(\alpha_s^2)$-order). In particular, the principal value prescription $ \frac{1}{[q^+]_{\eta}^{\rm PV} } = \lim_{\eta \to 0}\ \frac{1}{2}\left( \frac{1}{q^+ + i \eta} +\frac{1}{q^+ - i \eta} \right) $ was used in Ref. \cite{CFP80} to evaluate the DGLAP kernel in the next-to-leading order. Non-symmetrical advanced and retarded pole prescriptions are also possible \cite{TMD_LC_trans, BR05}. Although these methods work in some situations, the only pole prescription which is consistent with the equal-time canonical quantization in the light-cone gauge is the Mandelstam-Leibbrandt one \cite{LC_ML}: \begin{equation} \frac{1}{[q^+]} \to \lim_{\eta \to 0}\ \frac{1}{[q^+]_{\rm ML }} = \lim_{\eta \to 0}\ \frac{(q \cdot n^+)}{(q\cdot n^+) (q\cdot n^-) + i \eta} \doteq \lim_{\eta \to 0}\ \frac{1}{ (q\cdot n^-) + i \eta (q \cdot n^+) }\ , \label{eq:ML_def} \end{equation} where and in what follows $n^\pm$ are the light-like vectors $(n^\pm)^2 = 0 \ , \ n^+ n^- = 1$, and $\doteq$ means equality in the sense of the theory of distributions. It was shown that the free gluon propagator supplied with the ML pole prescription can be directly derived following the equal-time quantization procedure and is compatible with well-established results at least up to $O(\alpha_s)$-order \cite{LC_Bassetto, BR05, K_LC}. The main difference between the $q^-$-independent prescriptions and the ML one (\ref{eq:ML_def}) originate in the different situation of poles in the $q^0$ plane, as it is shown and explained in Fig. 1. Thanks to this feature, one can perform a Wick rotation of the integration contour to the Euclidean momentum space, and the ultraviolet divergences can be analyzed by means of the usual power counting procedure in the Euclidean space. This observation anticipates the absence of overlapping divergences in the loop calculations with the ML prescription. \begin{figure}[h] \begin{center} \includegraphics[width=0.30\textwidth,height=0.40\textheight,angle=90]{pole_structure.eps}\hspace{2pc \caption{\label{fig:2} Location of the poles in the complex $q^0$ plane: the poles of the light-cone gluon propagator with the ML prescription (1) and the poles of the propagator in a covariant gauge (2) belong to the same quadrants, so that the clock-wise Wick rotation is allowed. The poles of the light-cone propagator with the principal-value prescription (3), in contrast, impede that rotation.} \end{center} \end{figure} In is worth noting that albeit the pole-prescription issues mentioned above may reveal themselves in the course of the calculation, they are non-visible in the case of the collinear PDFs, by virtue of the cancelation of the soft divergences in the virtual and the real gluon exchange graphs. However, those issues are crucial and unavoidable in unintegrated PDFs \cite{CS_first}, which are introduced in the factorization approach to the semi-inclusive processes. The picture of the nucleon revealed in the DIS experiments, being essentially one-dimensional, is still incomplete: in this ``longitudinal'' picture the transverse degrees of freedom of the partons are eliminated by definition and the 3D-structure remains inaccessible. The study of semi-inclusive processes, such as semi-inclusive deep inelastic scattering (SIDIS), the Drell-Yan (DY) process, hadron-hadron collisions, or lepton-lepton annihilation to hadrons, where (at least) one more final or initial hadron is detected and its transverse momentum (and, possibly, its spin) is observed, calls for the introduction of more involved quantities---unintegrated transverse-momentum dependent (TMD) distribution and fragmentation functions (see \cite{TMD_INT} and Refs. therein). Moreover, a variety of applications of the TMDs approach has been found in the phenomenology of polarized hadronic processes. It was conjectured and corroborated that the idea of TMD parton densities inside the nucleons is directly applicable in the theory of single-spin asymmetries (see, e.g., \cite{TMD_PHENO} and Refs. therein). Recently, interesting possibility that linearly polarized gluons inside unpolarized protons can affect the cross-sections of the scalar and pseudoscalar Higgs boson production in the gluon-gluon fusion channel was proposed and discussed within the TMD approach in the Refs. \cite{TMD_LHC}. In the present paper, I give a brief account of how the problem of the emergent singularities beyond the tree-approximation is approached in different operator definitions of the (quark) TMDs. At the one-loop level, the following three classes of singularities are expected: $(i)$ simple ultraviolet poles which must be removed by the standard renormalization procedure; $(ii)$ pure rapidity divergences, which depend on an additional rapidity parameter, but do not jeopardize the renormalizability of the TMDs, and can be safely resumed by means of the Collins-Soper equation; $(iii)$ highly undesirable overlapping divergences: they contain the UV and rapidity poles simultaneously and thus break down the standard renormalizability of TMDs, calling for a {generalized} renormalization procedure in order to enable the construction of a consistent operator definition of the TMDs. Before getting started with the analysis of the divergences, let us try to learn something from the tree-approximation. The simplest ``unsubtracted'' definition of ``a quark in a quark'' TMD ($[{\rm A}]_{\rm n}$ means that we use the light-like longitudinal Wilson lines), which allows a {\it parton number interpretation} in the light-cone gauge, reads \begin{eqnarray} && {\cal F}_{\rm unsub.}^{[{\rm A}_{\rm n}]} \left(x, {\bm k}_\perp; \mu \right) = \frac{1}{2} \int \frac{d\xi^- d^2 {\xi}_\perp}{2\pi (2\pi)^2} \ {\rm e}^{-ik \cdot \xi} \left\langle p \ |\bar \psi_a (\xi^-, \bm{\xi}_\perp) [\xi^-, \bm{\xi}_\perp; \infty^-, \bm{\xi}_\perp]_{n}^\dagger \right. \nonumber \\ && \left. \times [\infty^-, {\xi}_\perp; \infty^-, {\infty}_\perp]_{\bm l}^\dagger \gamma^+[\infty^-, {\infty}_\perp; \infty^-, \bm{0}_\perp]_{\bm l} [\infty^-, \bm{0}_\perp; 0^-,\bm{0}_\perp]_{n} \psi_a (0^-,\bm{0}_\perp) | \ p \right\rangle \ \label{eq:general} \end{eqnarray} with ${\xi^+=0}$. Generic semi-infinite path-ordered gauge links evaluated along a given four-vector $w$ are defined as $ [\infty; \xi]_{w} \equiv {} {\cal P} \exp \left[ - i g \int_0^\infty d\tau \ w_{\mu} \ A_{a}^{\mu}t^{a} (\xi + w \tau) \right] \ , $ where, in the case under consideration, the vector $w$ can be either light-like $w_L = n^\pm\ , \ (n^\pm)^2 =0$, or transverse $w_T = {\bm l}$. Although the formal integration of definition (\ref{eq:general}) over $\bm k_\perp$ yields the collinear PDF, Eq. (\ref{eq:iPDF}), \begin{equation} \int\! d^2 \bm k_\perp \ {\cal F}_{\rm unsub.}^{[{\rm A}_{\rm n}]} (x, \bm k_\perp) = \frac{1}{2} \int \frac{d\xi^- }{2\pi } \ {\rm e}^{-ik^{+}\xi^{-} } \ \left\langle p\ |\bar \psi_a (\xi^-, \bm 0_\perp)[\xi^-, 0^-]_n \gamma^+ \psi_a (0^-,\bm 0_\perp) | \ p \right\rangle \ = f_a(x) \ , \label{eq:u_to_i} \end{equation} this is only well-justified in the tree approximation, because the rapidity divergences in the loop corrections prevent a straightforward reduction. I would like to emphasize that the normalization of the above TMD \begin{equation} {\cal F}_{\rm unsub.}^{[{\rm A}_{\rm n}] (0)} (x, {\bm k}_\perp) = \frac{1}{2} \int \frac{d\xi^- d^2 \bm{\xi}_\perp}{2\pi (2\pi)^2} {\rm e}^{- i k^+ \xi^- + i \bm{k}_\perp \cdot \bm{\xi}_\perp} { \langle p \ | }\bar \psi (\xi^-, \bm{\xi}_\perp) \gamma^+ \psi (0^-, \bm 0_\perp) { | \ p \rangle } = \delta(1 - x ) \delta^{(2)} (\bm k_\perp) \ \label{eq:tree_tmd} \end{equation} can be obtained following the {canonical quantization procedure in the light-cone gauge}, where the longitudinal Wilson lines disappear and the equal-time commutation relations for the quark creation and annihilation operators $\{a^\dag (k, \lambda), a(k, \lambda)\}$ lead immediately to the parton number interpretation of the TMD: \begin{equation} {\cal F}_{\rm unsub.}^{[{\rm A}_{\rm n}] (0)} (x, {\bm k}_\perp) \sim \langle\ p \ | \ a^\dag(k^+, \bm k_\perp; \lambda) a(k^+, \bm k_\perp; \lambda) \ | \ p\ \rangle \ . \label{eq:parton_N} \end{equation} Use of the ``tilted'' gauge links doesn't obey this requirement. \begin{figure}[h] \includegraphics[width=0.45\textwidth,height=0.70\textheight,angle=90]{wilson_lines_PDF.eps}\hspace{2pc \caption{\label{fig:1} Geometrical structure of integration contours in the unsubtracted TMDs with the light-like (upper panel) and off-the-light-cone (lower panel) longitudinal gauge links and their symbolic reduction to the collinear PDFs. In the former case, the transverse gauge links cancel completely after the $\bm k_\perp$-integration, while the longitudinal gauge links reduce to the one-dimensional light-like connector $[\xi^-, 0^-]$. In the off-the-light-cone situation, the cancelation of the transverse gauge links at infinity is not, at least, straightforward. Moreover, the integrated configuration contains two non-vanishing off-the-light-cone gauge links which are not equivalent to the simple connector $[\xi^-, 0^-]$. Beyond the tree-level, the renormalization group properties of those two objects are also different. I put the interrogation marks next to the transverse gauge links at infinity since I'm not aware of any consistent treatment of them in the TMD formulations with off-the-light-cone (tilted) Wilson lines. In contrast, the transverse gauge links appear naturally in the ``light-cone'' frameworks.} \end{figure} \begin{figure}[h] \includegraphics[width=0.45\textwidth,height=0.70\textheight,angle=90]{wilson_lines_SF.eps} \caption{\label{fig:2}Comparative geometry of the Wilson lines in unsubtracted soft factors and visualization of the reduction to the collinear case. Upper panel shows the soft factor in the momentum space, as proposed in the Refs. \cite{CS_all, CS09}. Lower panel presents the tilted off-the-light-cone integration paths in the impact parameter space, as well as the result of the reduction to the collinear $\bm b_\perp \to 0$ configuration.} \end{figure} \begin{figure}[h] \includegraphics[width=0.45\textwidth,height=0.70\textheight,angle=90]{wilson_lines_full_SF.eps} \caption{\label{fig:2}Comparative geometry of the Wilson lines in the subtracted soft factors. Upper panel corresponds to the soft factor of the TMD distribution function which enters the factorization formula (\ref{eq:LC_factor}). Lower panel shows the longitudinal gauge links shifted off the light-cone, which are used in the factorization approach (\ref{eq:Col_factor}).} \end{figure} Going beyond the tree-approximation, one encounters a bunch of singularities mentioned above. To overcome the problems related with them, different frameworks have been proposed. Adopting the covariant Feynman gauge, Ji, Ma and Yuan developed a framework which makes use of the tilted (off-the-light-cone) longitudinal gauge links lined up along the vector $n_B^2 \neq 0$ \cite{JMY}. In a covariant gauge, the transverse gauge links at light-cone infinity cancel, and the rapidity cutoff $\zeta = (2 p\cdot n_B)^2/|n_B^2|$ is introduced to control the deviation of the longitudinal gauge links from the light-like direction. A subtracted soft factor contains the non-light-like gauge links as well. Within this approach, the off-the-light-cone unsubtracted TMDs where the light-like vector $n^-$ in the longitudinal Wilson lines is replaced by the tilted vector $n_{\rm B} = (-{\rm e}^{2y_B}, 1, \bm 0_\perp)$ do not satisfy the relation (\ref{eq:u_to_i}), even in the tree approximation---cf. Fig.~2 and the caption. However, one can design a ``secondary factorization'' method which allows the expression of this TMD (transformed to the impact parameter space ${\cal F} (x, \bm b_\perp)$) in terms of a convolution of collinear PDFs and perturbative coefficient functions at small $\bm b_\perp$ \cite{JMY}. Another subtraction method, also in covariant gauges, but without explicit off-the-light-cone regularization in the unsubtracted TMD, was developed in Refs. \cite{Collins_what, TMD_subtract}. The corresponding geometry of the light-like and tilted Wilson lines in the soft factors is shown in Fig. 3, lower panel. In our works \cite{CS_all} we proposed to study the renormalization-group properties of the unsubtracted quark TMD (\ref{eq:general}) and to make use of its one-loop anomalous dimensions in order to reveal the simplest {\it minimal} geometry of the gauge links in the soft factor which allows one to get rid of the mixed rapidity-dependent terms. We showed (in the leading $O(\alpha_s)$-order) that the extra contribution to the anomalous dimension is nothing but the cusp anomalous dimension \cite{KR87, K_LC}. Note that in these works we adopted the light-cone gauge supplied with the $q^-$-independent pole prescriptions. In subsequent works we showed that our approach works in the case of the ML pole prescription as well, Refs. \cite{CS09}, and that it can be consistently used to formulate a generalized definition of the quark TMD with a {\it non-minimal spin-dependent term} in the Wilson lines, Refs. \cite{CKS10} (see also \cite{CDKM06}). Compared to the Ji-Ma-Yuan approach, we followed a different strategy. Making the assumption that the parton number interpretation (\ref{eq:parton_N}) must hold for TMDs in the light-cone gauge (like it holds in the collinear PDFs), we are in a position to {\it derive} a gauge-invariant operator definition of the TMD. In other words, starting from the requirement of the probability interpretation in the light-cone gauge and adding, step by step, the {\it minimally necessary} gauge links, we would end up with a gauge invariant operator definition of the TMD without undesirable singularities \cite{Ste83}. The generalized definition of the quark TMD we proposed, reads \begin{equation} {\cal F}^{[{\rm A}_{\rm n}]} (x, \bm k_\perp; \mu, \theta) = \frac{{\cal F}_{\rm unsub.}^{[{\rm A}_{\rm n}]} (x, \bm k_\perp; \mu, \theta)}{S_F(n^+,n^-; \theta) L_F^{-1}(n^+)} \ , \label{eq:TMD_LC} \end{equation} where the soft factor $S_F$ and the self-energy factor $L_F$ are defined in Refs. \cite{CS_all, ChSt_edge}, see also Figs.~3 and~4. The rapidity regulator is $\theta = \frac{n^+n^-}{\eta}$. Let me suggest the following {\it conjecture} concerning the generic structure of divergences in (any reasonable) operator definition of the TMD beyond the tree-approximation: The contribution of the overlapping $\sim 1/\epsilon \otimes \ln \theta$ singularities to the renormalized TMD can be expressed either in terms of a finite number of the cusp anomalous dimensions which are known in the theory of Wilson lines/loops up to the $O(\alpha_s^2)$-order---in this case, their treatment consists of the subtraction of (a finite number of) corresponding {\it cusped soft factors}; or those singularities depend on the {\it degenerate rapidities} $\sim \ln \theta_0 = \ln (n^\pm)^2$---in that case, one has to subtract the self-energy soft factors which consist, in contrast, of the ``smooth'' infinite gauge links without any obstructions (cusp or intersections). The conjecture is, therefore, that {\it there is no other sort of unphysical singularities in the loop corrections to the TMDs in any order of $\alpha_s$}. In the leading order, we demonstrated the validity of the above statement in our works \cite{CS_all, CS09, CheISMD, ChSt_edge}. Recently, some interesting aspects of the TMD factorization, light-like Wilson lines and renormalization properties of the PDFs have been studied adopting the methodology of soft-collinear effective theory (SCET) in Refs. \cite{SCET_TMD}. An important question remains, however, how our TMD can be built into an appropriate factorization formula for semi-inclusive hadronic tensor. In our approach, the following factorization formula is supposed to be valid \begin{equation} W^{\mu\nu} = |H(Q,\mu)^2|^{\mu\nu} \cdot \frac{{\cal F}_{\rm unsub.}^{[{\rm A}_{\rm n}]} (x, \bm k_\perp; \mu, \theta)}{S_F(n^+,n^-; \theta) L_F^{-1}(n^+)} \otimes \frac{{\cal D}_{\rm unsub.}^{[{\rm A}_{\rm n}]} (z, z \bm k'_\perp; \mu, \theta)} {S_D(n^+,n^-; \theta) L_D^{-1}(n^-)} + ... \ , \label{eq:LC_factor} \end{equation} where the geometrical structure of the soft factor is consistent with that of the collinear PDF \cite{K_LC, Li_97} and doesn't break the number interpretation. The explicit proof of the conjectured factorization and of absence of double-counting is in progress. Recently, Collins proposed a new definition of the (quark) TMD \cite{New_TMD_Col} which is built into the factorization formula for the semi-inclusive hadronic tensor (up to power corrections) \begin{equation} W^{\mu\nu} = |H(Q,\mu)^2|^{\mu\nu} \cdot {\cal F}^{\rm [Col.]} (x, \bm k_\perp; \mu, \zeta_F)\otimes {\cal D}^{\rm [Col.]} (z, z \bm k'_\perp; \mu, \zeta_D) + ... \ , \label{eq:Col_factor} \end{equation} where all soft factors are absorbed into the TMD distribution ${\cal F}^{\rm Col.}$ and the fragmentation ${\cal D}^{\rm Col.}$ functions, so that there are no separate soft factors in factorized structure functions, e.g., \begin{equation} {\cal F}^{\rm [Col.]} (x, \bm b_\perp; \mu, \zeta_F) = {\cal F}_{\rm unsub.}^{[{\rm A}_{\rm n}]} (x, \bm b_\perp; \mu) \cdot \sqrt{\frac{S(n^+,n_B)}{S(n^+,n^-)S(n_A,n^-)}} \ . \label{eq:TMD_Col} \end{equation} Here the soft factors depend on the light-like $n^\pm$ or the tilted $n_{A,B}$ vectors (for details, see \cite{New_TMD_Col}). Note that the TMD (\ref{eq:TMD_Col}) is defined in the impact parameter space ${\cal F} (x, \bm b_\perp) = \int\! d^{2} \bm k_\perp \ {\rm e}^{- i \bm k_\perp \bm b_\perp} \ {\cal F} (x, \bm k_\perp)$, so that it is, in fact, a ``semi-integrated PDF'' and the reduction to the collinear case corresponds to the limit $\bm b_\perp \to 0$. Some phenomenological and lattice applications of this approach have already been discussed in Ref. \cite{New_TMD_PHENO}. The geometry of the gauge links in the soft factors is presented and explained in Fig. 4. Several open questions are still to be answered: $(i)$ How should one prove the complete gauge invariance of the TMD (\ref{eq:TMD_Col})? It is formulated in the covariant Feynman gauge where the transverse gauge links at light-cone infinity vanish. What will change, if we adopt some physical (axial) gauge? $(ii)$ In particular, how one should treat the $T-$odd effects in the axial gauges given that the structure of the transverse gauge links at light-cone infinity is not yet clarified in the TMD (\ref{eq:TMD_Col})? $(iii)$ After reduction to the collinear PDF (in the case of TMD (\ref{eq:TMD_Col}), this corresponds to the limit $\bm b_\perp \to 0$), there is neither a mutual compensation of the longitudinal, nor that of the transverse gauge links (if introduced in the usual manner), see Fig. 4. Hence, the geometrical structure of the gauge links in the collinear PDF obtained from the TMD (\ref{eq:TMD_Col}) seems too cumbersome to be simply included in the standard DIS factorization scheme (see, e.g., Ref. \cite{K_LC, Li_97}). To conclude, several approaches to the problem of the factorization of semi-inclusive processes which make use of the unintegrated TMD parton distribution functions have been proposed and developed so far. There is no {\it a priori} clear relationship between these frameworks: e.g., corresponding operator definitions of the TMDs may, in principle, describe different objects with different (renormalization-group, gauge invariance, evolution) properties. It is a matter of further study to work out these issues. \paragraph{Acknowledgements} The results presented in this work have been obtained in collaboration with N. G. Stefanis. I'm also grateful to him for careful reading of the manuscript and fruitful discussion of its content. I thank the Organizers of the conference Photon-2011 in Spa for the hospitality and warn atmosphere during the conference. The figures were produced with \verb"JaxoDraw" \cite{JaxoDraw} \section*{References}
1,108,101,565,905
arxiv
\section{Introduction} \noindent The Jacobi variety of an algebraic curve of genus $g$ plays a central role in algebraic geometry. Explicit descriptions of the group law play a less important role in the history of the subject. With the development of cryptography algorithms arose for the group law. In 1987 D.G. Cantor described the group law of a hyperelliptic curve in the context of cryptography in analogy to the Gau{\ss} composition of quadratic forms, cf. \cite{Ca,K}. Later group laws of other classes of plane curves were described. The papers \cite{Ga, Har, He} are based on the analogy of the Jacobian group with class groups in number theory. Their methods are restricted to curves with a special type of infinite points. Other papers are concerned with special types of curves (e.g. Picard curves \cite{Es,FO}, $C_{ab}$-curves \cite{Ar}). The papers \cite{Vo,Hu} describe general ideas for an algorithm for arbitrary plane curves. They are based on the theory of plane curves. These algorithms are not practical from a computational point of view. The content of this paper is an algorithm for smooth plane curves or of plane curves with simple double points. We give an elementary presentation using projective curves, Riemann surfaces and commutative algebra. We consider arbitrary complex nonsingular plane $n$-curves $C$ $(n\geq 4)$ with an arbitrary zero point. We describe a geometric group law intersecting $C$ with certain algebraic $m$-curves. For $n=4,5,6$ we intersect $C$ with $(n-1)$-curves and we have a very close analogy to the group law of an elliptic curve. On the basis of the geometric group law we give an algebraic description. We represent divisors by certain homogeneous ideals and describe the group law by ideal operations. All operations are rational and can be carried out by Groebner basis operations. Infinite points do not play a special role. The construction works also for curves with simple double points. We give an example for a curve of genus 2. In the last sections we discuss hyperelliptic curves, Picard curves and the case of fields of characteristic $p$. Because of the use of Groebner bases it is difficult to give a realistic complexity analysis. We used the computer algebra program Mathematica 4.0 forming the computations. \section{Nonsingular curves and intersections} \noindent Consider a nonsingular projective plane curve $C$ of degree $n\geq 4$ defined by the irreducible polynomial \[ F(x,y,z):= \sum_{i+j\leq n} a_{ij}\ x^iy^jz^{n-i-j}. \] One can consider $C$ as a Riemann surface of genus $g=\frac{(n-1)(n-2)}{2}$ (i.e. $g=3,6,10,\ \cdots $). In every point one of the coordinates $x,y,z$ defines a complex chart. We have a $g$-dimensional space of holomorhic differentials. Every holomorphic differential admits an explicit representation in the form \[ G(x,y,z) \frac{ \left| \begin{array}{ccc} \gamma_1 & \gamma_2 & \gamma_3 \\ x & y & z \\ dx & dy & dz \end{array} \right|}{\gamma_1F_x+\gamma_2F_y+\gamma_3F_z} \] where $G(x,y,z)$ is a homogeneous polynomial of degree $n-3$ and $\gamma_1,\gamma_2,\gamma_3$ are three complex numbers chosen depending on $x,y,z$ such that the denominator is not zero (cf. \cite{Cl}). Furthermore let $H(P_1+\ \cdots\ +P_t)$ be the subspace of holomorphic differentials whose zero divisors contain the points $P_1,\ \cdots\ ,P_t$. Now we consider $m$-curves $C_m$. It is allowed that $C_m$ has multiple irreducible components. First we consider the case $1\leq m \leq n-1$. $C_m$ has $\frac{(m+1)(m+2)}{2}$ parameters. Given $b_m:=\frac{m(m+3)}{2}$ points on $C$ we have a (not necessary unique) $C_m$-curve through these points. (A multiple intersection point will correspond to a multiple contact of $C_m$ with $C$). Because of the irreducibility of $C$ an $m$-curve $C_m$ has $mn$ intersections with $C$. Therefore we have \[ c_m:=mn-\frac{m(m+3)}{2}=\frac{m(2n-m-3)}{2} =g-\left( \begin{array}{c} n-m-1 \\ 2 \end{array} \right) \] further intersections. For $n=4,5,6$ we will be interested in the cases $m=n-1,n-2$ and $n-3$. Here we have $c_{n-1}=g, c_{n-2}=g, c_{n-3}=g-1$ and $b_{n-1}=g+2n-2, b_{n-2}=g+n-2, b_{n-3}=g-1$. For $n>6$ we will need also curves $C_m$ of degree $m\geq n$. In this case we have the difficulty that it is possible, that $C$ is an irreducible component of $C_m$. For $m\geq n$ we fix a monomial $x^ky^lz^{n-k-l}$ of $F$ with $a_{kl}\neq 0$. Then we consider the $m$-curves \[ \sum_{ \begin{array}{c} (i,j)\ \ {\rm with } \\ x^ky^lz^{n-k-l}\not{\kern-0.14em\mid } \ x^iy^jz^{m-i-j} \end{array} } \beta_{ i,j,m-i-j}\ x^iy^jz^{m-i-j}\ . \] They form a linear system of dimension {\footnotesize $\left( \begin{array}{c} m+2 \\ 2 \end{array} \right)- \left( \begin{array}{c} m-n+2 \\ 2 \end{array} \right)$} $-1=mn-g$ of curves without common components with $C$. Given $b_m:=mn-g$ points on $C$ we find a $C_m$-curve of this special form through these points. The curves $C$ and $C_m$ have no common components and therefore they have $mn$ intersections, i.e. we have $c_m=g$ further intersections. \section{Jacobi varieties and Reduced Divisors} The Jacobi variety of $C$ is the Abelian group \[ Jac(C)=Div^0(C)/Div^P(C).\] Here $Div^0(C)$ denotes the group of divisors of degree $0$ and $Div^P(C)$ is the subgroup of principal divisors (i.e. the zeros and poles of analytic functions), cf. \cite{Ha}. If $G(x,y,z)$ defines an $m$-curve $C_m$, then $G(x,y,1)$ defines a meromorphic function with zeros at the finite intersections of $C_m$ and with poles of order $m-h$ at the infinite points $P$ of $C$ if $P$ is a $h$-fold intersection with $C_m$. Let $D_\infty $ be the divisor of the infinite points of $C$. Then we have $P_1+\ \cdots\ +P_{mn} \sim mD_\infty $, if $P_1,\ \cdots\ ,P_{mn}$ are the (possibly infinite) intersections of $C_m$ and $C$. Wie fix an arbitrary point $P_0$ on $C$. We call a divisor $D$ of the form $D=P_1+\ \cdots\ +P_t-tP_0$ with $P_1,\ \cdots\ ,P_t\neq P_0$ a semireduced divisor. We call this semireduced divisor $D$ a reduced divisor if there is no divisor $D'={P'}_1+\ \cdots\ +{P'}_s-sP_0$ with ${P'}_1,\ \cdots\ ,{P'}_s\neq P_0$, $D'\sim D$ and $s< t$. \begin{Sa} We find in every divisor class of Jac(C) a unique reduced divisor with $t\leq g$. \end{Sa} \noindent {\it Proof:} {\it Existence:} Let $D\in Div^0(C)$. By the Riemann Roch theorem we have \\ $\dim L(D+gP_0)\geq g-g+1=1$. It follows \[ D+gP_0+(f)=P_1+\cdots +P_g \] with $(f)\in L(D+gP_0)$ and certain $P_i\in C$. Therefore we have $D\sim P_1+\cdots +P_g -gP_0$, i.e the set of semireduced divisors with $t\leq g$ is not empty and we can find reduced divisors. \noindent {\it Uniqueness:} Let $D =P_1+\cdots +P_t -tP_0\sim D'=Q_1+\cdots +Q_t -tP_0$ be two different reduced divisors with $P_i,Q_i\neq P_0$. There is a function $f$ with $(f)=D-D'$. Then the divisor $D'+(f-f(P_0))$ has the form $R_1+\cdots R_s -sR_0$ with $s<t$. This is a contradiction. $\bullet$ \noindent {\it Remark:} One can choose $P_0$ arbitrarily, but the structure of a reduced divisor might vary with this choice. For the reason of simplicity it can be useful to choose for $P_0$ an exceptional point (flex, rational point, infinite point etc.). \noindent {\it Remark:} We mention that not all semireduced divisors with $t\leq g$ are reduced. In the generic case we have $t=g$ for a reduced divisor. We have the following abstract characterisation of reduced divisors. \begin{Sa} The following three assertions are equivalent. \noindent (i) The divisor $D=P_1+\ \cdots\ +P_t-tP_0$ with $t\leq g$ and $P_1,\ \cdots\ ,P_t\neq P_0$ is reduced. \noindent (ii) $\dim\ H(P_1+\ \cdots\ +P_t) = g-t$. \noindent (iii) The dimension of the linear system of $(n-3)$-curves, which vanish on $P_1,\ \cdots\ ,P_t$ does not exceed $g-t$. \end{Sa} \noindent {\it Proof.} The proposition is a consequence of the Riemann Roch theorem. $\bullet$ For $n=4,5,6$ we have an explicit description of the set of reduced divisors. \begin{Sa} (i) Let $n=4$. The divisor $D=P_1+\ \cdots\ +P_t-tP_0$ with $t\leq g=3$ and $P_1,\ \cdots\ ,P_t\neq P_0$ is reduced if and only if $t=3$ and $P_1,P_2 ,P_3$ lie not on a projective line or $t<3$. \noindent (ii) Let $n=5$. The divisor $D=P_1+\ \cdots\ +P_t-tP_0$ with $t\leq g=6$ and $P_1,\ \cdots\ ,P_t\neq P_0$ is reduced if and only if no 6 points lie on a conic section and no 4 points lie on a line. \noindent (iii) Let $n=6$. The divisor $D=P_1+\ \cdots\ +P_t-tP_0$ with $t\leq g=10$ and $P_1,\ \cdots\ ,P_t\neq P_0$ is reduced if and only if no 10 points lie on a cubic and no 9 points form the intersection of two cubics and no 8 points lie on a conic section and no 5 points lie on a line. \end{Sa} \noindent {\it Proof.} $n\leq 2d+2$ points of the projective plane fail to impose independent conditions on curves of degree $d$ if and only if either $d+2$ of the points are collinear or $n=2d+2$ and the $n$ points lie on a conic, cf. \cite{Ei}. The proposition follows from this fact, from the Cayley-Bacharach theorem, cf. also \cite{Ei} and from Proposition 2.$\bullet$ \noindent {\it Remark:} An explicit characterisation of reduced divisors for arbitrary $n$ is associated to the classification problem of special divisors on Riemann surfaces or to the Cayley-Bacharach conjectures for algebraic curves. However there is an algorithm to determine the reduced divisor (cf. below). \section{The construction of the reduced divisor} Let \[ D=D^+ - D^- =P_1+\ \cdots\ P_s-Q_1-\ \cdots\ -Q_s \] with $P_i,Q_i\in C$ be an arbitrary divisor of degree zero. At first we consider an $m$-curve with a polynomial $G(x,y,z)\neq 0\ {\rm mod}\ F$ with a minimal $m\geq n-2$ such that \[ s+g\leq b_m:=\left\{ \begin{array}{ccc} \frac{m(m+3)}{2} & {\rm for} & m<n \\ mn-g & {\rm for} & m\geq n \end{array} \right.\ \] through the $s$ points of $D^+$ and $(b_m-s)P_0$. Because of our discussion at the end of section 2 the polynomial $G(x,y,z)\neq 0\ {\rm mod}\ F$ exists and there are $g$ remaining intersections $R_1,\ \cdots\ ,R_g$. We have \[ D^+ +(b_m -s )P_0+R_1+\ \cdots\ +R_g \sim m D_\infty . \] Then we consider another $m$-curve $G'(x,y,z)\neq 0 \rm\ mod\ \it F$ through the $s+g$ points of $D^-+R_1+\ \cdots\ +R_g$ and $(b_m-s-g)P_0$. Because this curve is not necessary unique we require a maximal additional contact $\alpha$ at $P_0$. Let $S_1,\ \cdots\ ,S_{g-\alpha}$ be the remaining intersections not equal to $P_0$. We have \[ D^-+R_1+\ \cdots\ +R_g +(b_m -s-g+\alpha )P_0 +S_1+\ \cdots\ +S_{g-\alpha} \sim m D_\infty . \] It follows \[ D^+ - D^- \sim S_1+\ \cdots\ +S_{g-\alpha} - (g-\alpha)P_0 . \] \begin{Sa} \[ \overline{D}:=S_1+\ \cdots\ +S_{g-\alpha} - (g-\alpha)P_0 \] is the reduced divisor for $D$. \end{Sa} \noindent {\it Proof.} From the above relations it follows $D\sim\overline{D}$. Furthermore we consider the divisor \[ D_1:=D^-+R_1+\ \cdots\ +R_g +(b_m -s-g+\alpha )P_0 -(m-n+3)D_\infty . \] \begin{Le} All differentials of $H(D_1)$ have the form \[ \frac{G_m(x,y,z)}{z^{m-n+3}}\ \frac{ \left| \begin{array}{ccc} \gamma_1 & \gamma_2 & \gamma_3 \\ x & y & z \\ dx & dy & dz \end{array} \right|}{(\gamma_1F_x+\gamma_2F_y+\gamma_3F_z)} \] with ${\rm deg}\ G_m=m$. \end{Le} \noindent {\it Proof.} Case 1 (the positive part of $D_1$ does not contain infinite points): Meromorphic differentials are unique determined by its principal part up to a holomorphic differential. The space of principal parts is $(m-n+3)n$-dimensional. Therefore the space of differentials with poles at most at $(m-n+3)D_\infty$ has the dimension $(m-n+3)n+g=mn-g+1$. Otherwise the space of degree $m$ polynomials $G\ {\rm mod}\ F$ has the dimension $mn-g+1$ for $m\geq n-3$ (cf. above). Therefore every differential with poles at most at $(m-n+3)D_\infty$ has the above form. Case 2 ($\alpha $ points of the positive part $D_1^+$ and the negative part $D_1^-$ of $D_1$ cancel): Analogously the space of differentials with negative part at most at $D_1^-$ has the dimension $mn-g+1-\alpha$. On the other side the space of degree $m$ polynomials $G {\rm mod}\ F$ with zeros at the $\alpha $ common points of $D_1^+$ and $D_1^-$ has also the dimension $mn-g+1-\alpha$.$\bullet$ For $G_m=G'$ we obtain a differential in $H(D_1)$. Because $C'$ has maximal contact at $P_0$ with $C$ we have no further differential in $H(D_1)$. Therefore we have \[ \dim\ H(D_1 )=1. \] Because $D_1+S_1+\ \cdots\ +S_{g-\alpha }$ is the divisor of poles and zeros of the differential with $G_m=G'$ it is a canonical divisor. By the Riemann Roch theorem it follows \[ \dim\ H(S_1+\ \cdots\ +S_{g-\alpha }) = \dim\ L(D_1) = \] \[ (s+g+b_m-s-g+\alpha-(m-n+3)n)-g+1+1\ = \] \[ mn -(m-n+3)n -2g+\alpha +2=\alpha. \] Therefore $\overline{D}$ is reduced according to Proposition 2. $\bullet$ \section{The group law } \noindent We represent the elements of the Jacobian by reduced divisors. For a description of the group law it is sufficient to reduce $-D$ and $D_1+D_2$ if $D,D_1,D_2$ are reduced divisors. \noindent 1. {\it The inverse divisor}. Let $D=P_1+\ \cdots\ +P_g-gP_0$ where $P_1,\ \cdots\ ,P_g = P_0$ is allowed. In this case we apply the above construction to $D^+=gP_0$ and $D^-=P_1+\ \cdots\ +P_g$. \noindent 2. {\it The addition of reduced divisors}. Let $D_1=P_1+\ \cdots\ P_g-gP_0$ and $D_2=P_{g+1}+\ \cdots\ +P_{2g}-gP_0$. In this case we apply the above construction to $D^+=P_1+\ \cdots\ +P_{2g}$ and $D^-=2gP_0$. For $n=4,5,6$ we can carry out the construction with $(n-1)$-curves. Therefore we have a close analogy to the group law of cubic curves. In these cases we have $b_{n-1} = g+2n-2 \geq 2g$. Instead of the $(n-2)$-curves of the cubic case we consider $(n-1)$-curves. We construct an $(n-1)$-curve through $P_1,\ \cdots\ ,P_{2g},(2n-2-g)P_0$. We have the remaining points $R_1,\ \cdots\ ,R_{g}$. Then we construct an $(n-1)$-curve through $R_1,\ \cdots\ ,R_{g}$ with highest contact at $P_0$ and obtain the remaining points $S_1,\ \cdots\ ,S_{t}$. Then $S_1+\ \cdots\ +S_{t}-tP_0$ is the reduced divisor for $D_1+D_2$. Analogously, for $n\geq 7$ we carry out the construction with $m$-curves $G(x,y,z)\neq 0\ {\rm mod}\ F$ with $m\geq n$ and $b_m\geq 2g$ (i.e. $mn\geq 3g$). \section{Algebraic description - The ideal-divisor-correspondence} \noindent 1. {\it The affine case.} We remark that there is a one-to-one correspondence between ideals of the quotient ring $\ {\CC} [x,y] /I_C$ of polynomial functions on the affine curve $C$ and the ideals of $\ {\CC} [x,y]$ with $I\supset I_C$ where $I_C =(F(x,y,1))$. $\ {\CC} [x,y] /I_C$ is one-dimensional and a Dedekind ring, cf. \cite{At}. Now let $I$ be an ideal with $I\supset I_C$. Then $I$ is zerodimensional. Because $\ {\CC} [x,y] /I_C$ is a Dedekind ring we have the unique primary decomposition \[ I = \bigcap (I_{P_i}^{m_i}, I_C) \] where $P_i=(x_i,y_i)$ and $I_{P_i}=((x-x_i),(y-y_i),I_C)$, cf. \cite{At}. We associate to $I$ the effective divisor \[D_I:=\sum m_i P_i\ . \] Conversely, let $D:=\sum m_i P_i$ be an effective divisor of finite points. Then we associate to $D$ the ideal \[ I_D:=\bigcap (I_{P_i}^{m_i}, I_C)\ . \] Therefore we have a one-to-one correspondence of effective divisors and ideals with $I\supset I_C$. Furthermore we have the following Lemma. \begin{Le} Let $D,D'$ be effective divisors of finite points and let $f\in I_D$. Then we have $(f)\geq D$ and \[ (I_D I_{D'},I_C) = I_{D+D'}\ \ \ \ \ \ \ \ {\rm and} \ \ \ \ \ \ \ \ \ \ \ \ ((f),I_C):I_{D} = I_{(f)-D}\ . \] Here $(f)$ denotes both, the ideal generated by $f(x,y)$ and the divisor of the analytic continuation of the meromorphic function $f(x,y)$. \end{Le} \noindent {\it Proof.} The proof follows from the above primary decomposition and the relations \\ $I_{P}^{p} \cap I_{Q}^{q} = I_{P}^{p}I_{Q}^{q}\ {\rm mod}\ I_C$ for $P\neq Q$ and $I_{P}^{p} \cap I_{P}^{q}= I_{P}^{\max\ (p,q)}\ {\rm mod}\ I_C$. $\bullet$ \noindent 2. {\it The projective case.} We suppose that $[0:1:0]\notin C$. Now let $D=D_e+D_\infty $ be an effective divisor with the finite part $D_e$ and the infinite part $D_\infty $. We define \[ {\bf I}^h_D:=H_z(I_{D_e})\cap H_x(I_{D_\infty }) \] where $I_{D_e}\subset\ {\CC} [x,y]$, $I_{D_\infty}\subset\ {\CC} [y,z]$ and $H_z$, $H_x$ are the homogenisations with respect to $z,x$, respectively. Conversely, let $\bf I$ be a homogeneous ideal of ${\CC} [x,y,z]$ containing the curve ideal, i.e. ${\bf I}\supset (F(x,y,z))$. Then we define \[ D_{\bf I}:=D_{A_z({\bf I})} +D_{(A_x({\bf I}),\ z)} \] where $A_x, A_z$ are the affinisations with respect to $z,x$. We have $D_{{\bf I}_D}=D$. We remark that for ideals ${\bf I}_1,{\bf I}_2$ with ${\bf I}_1,{\bf I}_2\supset (F(x,y,z))$ the relations ${\bf I}_{(D_{\bf I})}={\bf I}$ and ${\bf I}_{D_1} {\bf I}_{D_2} = {\bf I}_{D_1+D_2} $ are in general not valid. Now we define a product $\odot$ for homogeneous ideals ${\bf I}_1,{\bf I}_2$ which corresponds to the addition of divisors. We form the ideal product of the corresponding affine ideals. In order to include infinite points we consider products with respect to two affinisations with $z,x=1$. The intersection of the corresponding homogenisations will contain all curves with intersection divisor $\geq D_{{\bf I}_1}+D_{{\bf I}_2}$. I.e. we define \[ {\bf I}_1 \odot {\bf I}_2 := H_z((A_z({\bf I}_1) \cdot A_z({\bf I}_2),I_C))\bigcap H_x((A_x({\bf I}_1) \cdot A_x({\bf I}_2),I_C^x)) \] where $I^x := A_x ( H_z(I))$ for ideals $I\subset {\CC} [x,y]$. A generalized ideal quotient $\oslash$ is defined by \[ (G)\oslash {\bf I} := H_z((A_z((G)),I_C ) : A_z({\bf I}))\bigcap H_x((A_x((G)),I_C^x) : A_x({\bf I})). \] \begin{Sa} \noindent Let $D,D'$ be effective divisors and let $G\in {\bf I}_D$. Then we have $(G)\geq D$ and \[ {\bf I}^h_{D+D'}= {\bf I}^h_{D} \odot {\bf I}^h_{D'} \ \ \ \ \ \ \ {\rm and}\ \ \ \ \ \ \ {\bf I}^h_{(G)-D}= {\bf I}^h_{(G)} \oslash {\bf I}^h_{D} . \] \end{Sa} \noindent {\it Proof.} The proof follows from the fact, that the left and the right side are equal to the homogeneous ideal of all curves with intersection divisor $\geq D_{{\bf I}_1}+D_{{\bf I}_2}$ and $\geq (G)-D_{{\bf I}}$, respectively. $\bullet$ \section{Reduction and the group law} \noindent Let $D=D^+ - D^-$ with $\deg D^+ =\deg D^- =s $ be a divisor of degree zero and let ${\bf I^+}={\bf I}_{D^+}^h$, ${\bf I^-}={\bf I}_{D^-}^h$. Furthermore we consider the homogeneous ideals \[ {\bf I}_r:={\bf I}^h_{rP_0} \] of forms with an $r$-fold point $P_0$. We choose $m$ such that $b_m\geq s+g$. Then we choose an arbitrary element $G$ in ${\bf I^+} \odot {\bf I}_{b_m-s} = {\bf I}^h_{D^+ +(b_m-s)P_0}$ of degree $m$ and we form ${\bf J}=(G)\oslash{\bf ({\bf I}^+} \odot {\bf I}_{ b_m-s })$. Then we determine the number $\alpha$ such that $({\bf J}\odot {\bf I}^-)\cap {\bf I}_{b_m-s+g+\alpha }$ contains exactly one Groebner basis element $G'$ of degree $m$ with respect to a degree order. We form ${\bf I}_{red}=(G')\oslash (({\bf J} \odot {\bf I}^- )\cap {\bf I}_{b_m-s+g+\alpha } )= {\bf I}_{red}={\bf I}^h_{S_1+\cdots S_{g-\alpha}}$ where $S_1+\cdots S_{g-\alpha} -(g-\alpha)P_0$ is the reduced divisor of $D$. \noindent {\it Remark}: Given ${\bf I}^+$, ${\bf I}^-$, $t$ one can carry out the determination of ${\bf I^+} \odot {\bf I}_{b_m-s}$, ${\bf J}=(G)\oslash{\bf ({\bf I}^+}\odot {\bf I}_{ b_m-s })$, $({\bf J}\odot {\bf I}^-)\cap {\bf I}_r$ $(r\geq b_m-s+g )$ and $(G')\oslash (({\bf J}\odot {\bf I}^-)\cap {\bf I}_{b_m -s+g+ \alpha })$ by Groebner basis calculations, cf. \cite{Be}. Now we can describe the group law. In order to add the divisors $D_1=P_1+\ \cdots\ P_g-gP_0$ and $D_2=P_{g+1}+\ \cdots\ +P_{2g}-gP_0$ we apply the above construction to $D^+=P_1+\ \cdots\ +P_{2g}$ and $D^-=2gP_0$. In order to determine the ideal for $-D$ of $D=P_1+\ \cdots\ +P_g-gP_0$ (where $P_1,\ \cdots\ ,P_g = P_0$ is allowed) we apply the above construction to $D^+=gP_0$ and $D^-=P_1+\ \cdots\ +P_g$. \section{An Example} \noindent We consider the 4-curve $C$ with $x^4+y^4=2z^4$ with $g=3$. Let $P_0=(1,1,1)$ and $P_1=(1,-1,1)$. We reduce the divisor \[ D=6P_1-6P_0. \] We choose $m=3$. It is sufficient to consider affine ideals. ($C$ has no infinite intersections with all curves occuring during the calculation.) We obtain by Groebner basis calculations with respect to a lexicographic order \[ I^+=\{1 + 6y + 15y^2 + 20y^3 + 15y^4 + 6y^5 + y^6, 102 - x + 524y + 1092y^2 + 1141y^3 + 598y^4 + 126y^5 \}, \] \[ I_3=\{-1 + 3y - 3y^2 + y^3, -1 - x + 5y - 3y^2\}, \] \[ (I^+I_3 ,I_C) =\{-1 - 3y + 8y^3 + 6y^4 - 6y^5 - 8y^6 + 3y^8 + y^9,\] \[ 1289 - 128x + 2492y - 2016y^2 - 7476y^3 - 806y^4 + 7476y^5 + 3080y^6 - 2492y^7 - 1419y^8\}. \] With respect to a degree order we find an element of degree 3 in $(I^+I_3,I_C)$ \[ f=2612 - 3078x + 378x^2 + 281x^3 + 478y - 1912xy + 1195x^2y - 1286y^2 + 1093xy^2 + 239y^3. \] Furthermore \[ J:=((f),I_C):(I^+I_3) =\{698405268857 + 635735348837y - 10585774871y^2 + 108619669441y^3,\] \[ 268707776349 + 254869376165x + 103445986821y + 108619669441y^2 \}, \] \[ - 66326x^2y - 735502y^2 + 70382xy^2 + 325163y^3, \] \[ I_6=\{1 - 6y + 15y^2 - 20y^3 + 15y^4 - 6y^5 + y^6, -102 + x + 524y - 1092y^2 + 1141y^3 - 598y^4 + 126y^5\}, \] {\tiny \[ (JI_6 ,I_C) =\{698405268857 - 3554696264305y + 6651081164962y^2 - 4259940825918y^3 - 3049132583596y^4 + 7186609158448y^5 - 5447186836050y^6 \] \[ + 2328545039678y^7 - 662303791517y^8 + 108619669441y^9,\] \[ -93764434230515570410626334329726164533810553558278390921760183 + 2185848162978035880543485328036429801750462217550638009464820x \]\[+ 390604925102813740313936712629510601523986718107312688345228286y - 553599197596387095117790236962526646313212938826438985782932370y^2\]\[ + 109584143933719697324470424983767313360642548080131427940056882y^3 + 488838345917420338620885461620364216394401215438274344792992080y^4 \]\[- 548212607851251833242108578943280138884947131311429528021917262y^5 + 272507862737893198250637137223325900667162331316521096907832914y^6\]\[ - 85848455024362109011538768963037701794199996136791647052272470y^7 + 17703568847691597391590697413566189778227344673148355783307303y^8\}.\]} There is an element of degree 3 in $(JI_6,I_C)$ \[ g=683086 - 414993x - 636078x^2 + 356233x^3 - 259643y + 677678xy. \] Finally we obtain \[ I_{red}= ((g),I_C):(JI_6 ) = \{94544281343 + 377260313207y + 408415639297y^2 + 134215744153y^3,\]\[ -53515118937 + 13173978910x - 225487128300y - 134215744153y^2\}. \] Because the minimal element of $I_{red}$ \[ -53515118937 + 13173978910x - 225487128300y - 134215744153y^2 \] with respect to a degree order is of degree $2>n-3$ the ideal $I_{red}$ is reduced. The ideal $I_{red}$ corresponds to the reduced divisor \[ (-0.82409 - 0.62806i, -1.31975 + 0.06425i,1)\]\[+ (-0.82409 + 0.62806i, -1.31975 - 0.06425i,1)+ (-1.18524,- 0.40347,1)-3(1,1,1)\sim D. \] \section{Curves with simple double points } The above construction applies analogously to curves with simple singularities. Here we consider the case of $n$-curves $C$ with $d$ finite simple double points $ {\cal D}_1,\ \cdots\ ,{\cal D}_d \neq P_0 $ with $F_x=F_y=0,\ F_{xx} F_{yy}-F_{xy}^2\neq 0$. Furthermore we suppose $[0,1,0]\notin C$. To every point ${\cal D}_i$ correspond two points ${\cal D}_i^+$, ${\cal D}_i^-$ on the Riemann surface of $C$ of genus $g=\frac{(n-1)(n-2)}{2}-d$. Let \[ \Delta:={\cal D}_1^+ +{\cal D}_1^- +\ \cdots\ +{\cal D}_d^++{\cal D}_d^- \] be the double point divisor of $C$. Now consider the divisor \[ D=D^+ - D^- =P_1+\ \cdots\ +P_s-Q_1-\ \cdots\ -Q_s \] of degree zero. We consider an $m$-curve with a polynomial $G(x,y,z)\neq 0\ {\rm mod}\ F$ with \[ s+g+d\leq b_m:=\left\{ \begin{array}{ccc} \frac{m(m+3)}{2} & {\rm for} & m<n \\ mn-\frac{(n-1)(n-2)}{2} & {\rm for} & m\geq n \end{array} \right.\ \] through the $s$ points of $D^+$, ${\cal D}_1,\ \cdots\ ,{\cal D}_d$ and $(b_m-s-d)P_0$. We have \[ mn-s-2d-(b_m-s-d)\ =\ mn-b_m-d\ =\ \frac{(n-1)(n-2)}{2}-d\ =\ g \] remaining intersections $R_1,\ \cdots\ ,R_g$, i.e. \[ D^+ +\Delta +(b_m -s-d )P_0+R_1+\ \cdots\ +R_g \sim m D_\infty . \] Then we consider an $m$-curve $G'(x,y,z)\neq 0 \rm\ mod\ \it F$ through the $s$ points of $D^-$ and through $R_1,\ \cdots\ ,R_g, {\cal D}_1,\ \cdots\ ,{\cal D}_d,(b_m-s-d-g)P_0$. We require a maximal additional contact $\alpha$ at $P_0$. Let $S_1,\ \cdots\ ,S_{g-\alpha}$ be the remaining intersections not equal to $P_0$. We have {\small \[ D^- + \Delta + R_1+ \cdots +R_g +( b_m-s-d-g+\alpha )P_0 +S_1+ \cdots +S_{g-\alpha} \sim m D_\infty . \]} It follows \[ D^+ - D^- \sim \overline{D}:= S_1+\ \cdots\ +S_{g-\alpha} - (g-\alpha)P_0 . \] Analogously to Proposition 4 one shows that $\overline{D}$ is the reduced divisor of $D$. For an algebraic description of the reduction let ${\bf I}_\Delta $ be the ideal of all adjoint curves through the ${\cal D}_1,\ \cdots\ ,{\cal D}_d$. For a finite simple double point ${\cal D}_i$ we have two taylor series expansions \[ y-y_i= a^\pm_1(x-x_i)+ a^\pm_2(x-x_i)^2+\cdots\ \] for the two branches of ${\cal D}_i^\pm$ (or $x-x_i= b^\pm_1(y-y_i)+ b^\pm_2(y-y_i)^2+\cdots\ $, if $b_1^\pm=0$). Then {\small \[ {\bf I}_{m {\cal D}_i^\pm}:=H_z( ( (x-x_i,y-y_i)^m, y-y_i-a^\pm_1(x-x_i)- \cdots -a^\pm_{m-1}(x-x_i)^{m-1} ) ) \]} are the ideals of polynomials with an $m$-fold common point with the branch of ${\cal D}_i^\pm $. Now let \[ D=m_1P_1+\ \cdots\ +m_pP_p+\sum_i (m^+_i {\cal D}_i^+ + m^-_i {\cal D}_i^-) \] be a divisor with different ordinary points $P_1,\ \cdots\ ,P_p$. We consider the ideal of curves with intersection divisor $ \geq \Delta +D $ \[ {\bf I}_{\Delta +D}^h:= ({\bf I}_{m_1P_1}^{h}\cap \cdots \cap {\bf I}_{m_pP_p}^{h}) \cap \bigcap_i ( {\bf I}_{(m_i^+ +1) {\cal D}_i^+} \cap {\bf I}_{(m_i^- +1) {\cal D}_i^-} ). \] Now we define an ideal product $\odot_\Delta $ for ideals $ {\bf I}_1={\bf I}_{\Delta +D_1}^h, {\bf I}_2={\bf I}_{\Delta +D_2}^h $ which corresponds to the addition of the divisors $D_1$ and $D_2$. {\footnotesize \[ {\bf I}_1 \odot_\Delta {\bf I}_2 := H_z( ( A_z({\bf I}_1) \cdot A_z({\bf I}_2),I_C ) : I_\Delta ) \bigcap H_x( ( A_x({\bf I}_1) \cdot A_x({\bf I}_2),I_C^x ) : A_x({\bf I}_\Delta ) ) \]} \noindent where $ I_C^x := A_x ( {F} ) $. ${\bf I}_1 \odot_\Delta {\bf I}_2$ contains all curves with intersection divisor $\geq \Delta +D_1+D_2$. A generalized ideal quotient is given by {\footnotesize \[ (G)\oslash_\Delta {\bf I} := H_z( (A_z( (G) )\cdot I_\Delta ,I_C) : A_z({\bf I}) ) \bigcap H_x( (A_x( (G) )\cdot A_x({\bf I}_\Delta ),I_C^x) : A_x({\bf I})) \]} \begin{Sa} Let $D,D'$ be effective divisors and let $G\in {\bf I}_{\Delta +D}$. Then we have $(G)\geq \Delta +D$ and \[ {\bf I}^h_{\Delta +D+D'}= {\bf I}^h_{\Delta +D} \odot_\Delta {\bf I}^h_{\Delta +D'} \ \ \ \ \ \ \ {\rm and}\ \ \ \ \ \ \ {\bf I}^h_{(G)-D}= {\bf I}^h_{(G)} \oslash_\Delta {\bf I}^h_{\Delta +D} . \] \end{Sa} \noindent {\it Proof.} The proof follows from the fact, that the left and the right side are equal to the homogeneous ideal of all curves with intersection divisor $\geq \Delta + D_{{\bf I}_1}+D_{{\bf I}_2}$ and $\geq (G)-D_{{\bf I}}$, respectively. $\bullet$ Now let $D=D^+ - D^-$ with $D^+,D^-\geq 0$, $\deg D^+ =\deg D^- =s $ be a divisor of degree zero. Furthermore let ${\bf I^+}:={\bf I}_{\Delta +D^+ }^h $, ${\bf I^-}:={\bf I}_{\Delta +D^- }^h $, ${\bf I}_r:={\bf I}^h_{rP_0}$. We choose $m$ such that $b_m\geq s+d+g$. Then we choose an arbitrary element $G$ in ${\bf I^+} \odot {\bf I}_{b_m-s-d} = {\bf I}^h_{\Delta + D^+ +(b_m-s-d)P_0}$ of degree $m$ and we form ${\bf J}=(G) \oslash_\Delta ({\bf I^+} \odot {\bf I}_{b_m-s-d}) ={\bf I}^h_{\Delta +R_1+\ \cdots\ +R_g}$. We determine the number $r\geq b_m-s-d-g$ such that $({\bf J}\odot_\Delta {\bf I}^-)\cap {\bf I}_r$ contains exactly one Groebner basis element $G'$ of degree $m$ with respect to a degree order. We form ${\bf I}_{red}=(G')\oslash_\Delta ( ({\bf J} \odot_\Delta {\bf I}^- ) \cap {\bf I}_r )$. We obtain ${\bf I}_{red}={\bf I}^h_{\Delta +P_1+\cdots +P_t}$ with $t=g-r+(b_m-s-d-g)$ where $P_1+\cdots\ +P_t-tP_0$ is the reduced divisor for $D$. \section{An Example} We consider the hyperelleptic $4$-curve $C$ with $x^{4}-y^{4}=30xyz^2$ with $g=2$. $C$ has one simple double point ${\cal D}_0^\pm = (0,0,1)$ with the Taylor series expansions \[ y= \frac{x^3}{30} - \frac{x^{11}}{24300000} +\ \cdots ,\ \ \ \ \ \ \ x= - \frac{y^3}{30} + \frac{y^{11}}{24300000} -\ \cdots . \] We choose $P_0:=(1,1,0)$ and we consider the points $P_1=(4,2,1)$, $P_2=(1,-1,0)$. We apply the reduction to ideals ${\bf I}^+,{\bf I}^-$, which correspond to the divisor $2{\cal D}_0^+-P_1-P_2$. We have \[ {\bf I}^+ = {\bf I}^h_{\Delta+2{\cal D}_0^+} = (x^3,y) , \] and \[ {\bf I}^- = {\bf I}^h_{\Delta+P_1+P_2} = (x z - 2 y z, -x y - y^2 + 6 y z, -x^2 + y^2 + 6 y z, y^2 z - 2 y z^2). \] We form \[ {\bf I}^h_{\Delta+2{\cal D}_0^+ +2P_0} = {\bf I}^+ \odot (x-y,z^2) = ( -yx + y^2, y z^2, x^3 -x^2 y, x^3 z^2). \] We choose $G=-xy+y^2$. We obtain the quotient \[ {\bf J}=(G) \oslash_\Delta {\bf I}^h_{\Delta+2{\cal D}_0^+ +2P_0} = (x^2,xy,y^2) = {\bf I}^h_{\Delta +R_1 +R_2}. \] We remark that $R_1+R_2={\cal D}_0^++{\cal D}_0^-$. Now we form \[ {\bf I}^h_{\Delta+R_1+R_2+P_1+P_2} = {\bf I}^- \odot_\Delta J= (-x^2 + x y + 2 y^2, x^2z - 2 xyz, x^3 +x^2 y - 6x^2 z, x^3z - 4 x^2z^2). \] We choose $G'=x^2-xy-2y^2$. Then we obtain the quotient \[ (G') \oslash_\Delta {\bf I}^h_{\Delta+R_1+R_2+P_1+P_2} = (x z - 2 y z, -x y - y^2 - 6 y z, x^2 - y^2 + 6 y z, y^2 z + 2 y z^2) = {\bf I}^h_{\Delta +S_1 +S_2}={\bf I}_{red}. \] We obtained ${\bf I}_{red}$ from ${\bf I}^+,{\bf I}^-$ by rational operations. We remark that $S_1+S_2=(-4,-2,1)+P_2$. \section{Hyperelliptic curves} \noindent In this section we present an algorithm for hyperelliptic curves $C$ of genus $g$ in the standard form \[ y^2=a(x-x_1)(x-x_2) \cdots (x-x_{2g+1})=:h(x) \] with different $x_i$, $a\neq 0$. The projectivisation has a single singular (nonsimple) infinite point $P_\infty =(0,1,0)$. We choose $P_0:=P_\infty$. Hyperelliptic curves have an involution $x\rightarrow x$, $y\rightarrow -y$. Let $\overline{I}$ and $\overline{D}$ be the image of the ideal $I$ and the divisor $D$ with respect to this involution. Then we have $\overline{(x_1,y_1)+\cdots +(x_n,y_n)}= (x_1,-y_1)+\cdots +(x_n,-y_n)$,\\ $\overline{(x,y)}+(x,y)\sim 2 P_0$ and $I_{\overline{D}}=\overline{I_D}$. Because $C$ has only one infinite point it is sufficient to consider affine ideals. However, the selection of the interpolating curves requires a modification. Let $D=D^+ - D^- \sim D^+ +\overline{D^-}-2\deg(D^-)P_0$ be a divisor of finite points. We have $J:=I_{D^+ +\overline{D^-}}=I_{D^+}\overline{I_{D^-}}$. We replace the degree order by the weighted degreelexicographic order $\deg_{2g+1,2}$ with $\deg_{2g+1,2}(x^ay^b):=(2g+1)a+2b$ and $y>x$. Now we determine the minimal element $f=p(x)+q(x)y$ of $J$ with respect to this order. Then $C$ and the curve $f(x,y)=0$ have $\deg_{2g+1,2} (f)$ finite intersections with $p^2(x)-h(x)q^2(x)=0$ and $y=p(x)/q(x)$. Let $(x_i,y_i)$, $i=1,\cdots ,t:=\deg_{2g+1,2}(f)-\deg(D^+)-\deg(D^-)$ be the remaining finite intersections. It follows \[ (f)= D^+ +\overline{ D^-} + (x_1,y_1)+\cdots +(x_{t},y_{t}) - \deg_{2g+1,2}(f) P_0 , \] i.e. \[ D-(\deg(D^+)-\deg(D^-))P_0 \sim (x_1,-y_1)+\cdots +(x_{t}, - y_{t}) -tP_0 =:D_1-tP_0. \] \begin{Le} $D_1-tP_0$ is the reduced divisor for $D-\deg(D)P_0$. \end{Le} \noindent {\it Proof:} The proof follows from the fact that $t:=\deg_{2g+1,2}(f)-\deg(D^+)-\deg(D^-)$ is minimal if $\deg_{2g+1,2}(f)$ is minimal. $\bullet$ The divisor $D_1$ corresponds to the ideal \[ I_{red} = I_{D_1}=\overline{(f,I_C):(I_{D^+}\overline{I_{D^-}})}. \] \noindent {\it Remark:} Contrarily to Cantors algorithm we can describe our algorithm by this single formula. Our algorithm uses a reduction function of the general form $f=p(x)+yq(x)$. In contrast Cantors algorithm uses reduction functions of the special form $y-p(x)$ several times (cf. the example below). \section{Picard curves} \noindent In this section we present an algorithm for Picard curves $C$ \[ y^3=a(x-x_1)(x-x_2)(x-x_3)(x-x_4)=:h(x) \] with four different $x_i$, $a\neq 0$. The projectivisation has a single 4-fold infinite point $P_\infty =(0,1,0)$. We choose $P_0:=P_\infty$. $C$ has only one infinite point and it is sufficient to consider affine ideals. Similarly to the case of hyperelliptic curves we replace the degree order by the weighted degreelexicographic order with $deg_{4,3}:=(x^ay^b):=4a+3b$ and $y>x$. Let $D=D^+ - D^- $ be a divisor of finite points. We determine the minimal element $f=p(x)+q(x)y+r(x)y^2$ of $I_{D^+}$ with respect to the above order. We have \[ (f) = D^+ + R_1 + \cdots + R_t - \deg_{4,3} (f) P_0 \] with $t:=\deg_{4,3}(f)-\deg(D^+)$ remaining finite points $D'=R_1 + \cdots + R_t$ whose $x$-coordinates are zeros of the polynomial \[ \left| \begin{array}{ccc} p & q & r \\ hr & p & q \\ hq & hr & p \end{array} \right| =p^3+q^3h+r^3h^2-3pqrh \] of degree $t$. We have \[ I_{D'} = (f,I_C):I_{D^+}. \] We determine the minimal element $g$ of $I_{D'+D^-}=I_{D'}I_{D^-}$ with respect to the above order. We have \[ (g) = D'+D^- + S_1 + \cdots + S_q - \deg_{4,3} (g)P_0 \] with $q:=\deg_{4,3}(g)-\deg(D')-\deg(D^-)$ remaining finite points $D''=S_1 + \cdots + S_q$. It follows \[ D''-qP_0 \sim -D'-D^- +(\deg_{4,3}(f)-q)P_0 \sim D-(\deg(D^+)-\deg(D^-))P_0. \] \begin{Le} $D''-q P_0$ is the reduced divisor for $D- \deg(D) P_0$. \end{Le} \noindent {\it Proof:} The proof follows from the fact that $q$ is minimal if $\deg_{4,3}(g)$ is minimal. $\bullet$ We have \[ I_{red} = I_{D''} = (g,I_C):I_{D'+D^-}. \] An algorithm for the Jacobian group of this curve is discussed in \cite{Es,FO}. Contrarily to our algorithm these algorithms use the concrete structure of the curve and require the distinction of many different cases. Our algorithm for Picard curves has a straightforward generalization to superelliptic curves \[ y^m=a(x-x_1)\cdots (x-x_n) \] with different $x_i$, $a\neq 0$ and ${\rm lcd} (m,n)=1$. \section{The case of characteristic $p$} \noindent In this last section we make some remarks about the case of characteristic $p$. Using the theory of \cite{Mo} one can show that our algorithm has an analogue if we replace\ $\CC$ by a field $k$ of characteristic $p$. $k$ has the algebraic closure $\overline{k}$. The divisor group $Div(C)$ is the free Abelian group consisting of formal finite sums $\sum_{P\in C(\overline{k})}m_P P$ with $m_P\in {\sf Z}\hspace{-0.4em}{\sf Z}\ $. A divisor is defined over $k$ if it is fixed by the natural Galois action of ${\rm Gal} (\overline{k},k)$. The divisors defined over $k$ form the subgroup $Div_k(C)$. Analogously one defines the group $Div^0_k(C)$. Principal divisors are defined as zeros and poles of rational functions $\frac{G(x,y,z)}{H(x,y,z)}$ where $G,H$ $(H\notin (F(x,y,z))$ are homogeneous polynomials of equal degree with coefficients in $k$. They form a subgroup $Div^P_k(C)\subset Div^0_k(C)$. We define $Jac_k(C):=Div^0_k(C) / Div^P_k(C)$. Furthermore we can define the analog notion of an reduced divisor. Let $D$ be an element of $Div_k(C)$ without infinite points. Then the polynomials $p(x,y)\in k[x,y]$ with $(p)\geq D$ form an ideal $I_D\subset k[x,y]$. One can show that there is a one-to-one correspondence between ideals $I$ of $k[x,y]$ with $I\supset I_C$ and divisors $D$ of $Div_k(C)$ of finite points. Now let $C$ be a hyperelliptic curve or a Picard curve. The affine part of the above hyperelliptic curves is smooth for $p\neq 2$ and the above Picard curves are smooth for $p\neq 3$. This case is interesting in view of applications in cryptography. Let $I^+,I^-$ be two ideals of $k[x,y]$ with divisors $D^+,D^-$. We apply the corresponding algorithm of the two previous sections to $I^+,I^-$. Because all operations are rational we obtain an ideal $I_{red}$ of $k[x,y]$. We have an analogue of Lemma 9,10. Therefore $I_{red}$ corresponds to a reduced divisor. \noindent {\it Example:} Let $C$ be the hyperelliptic curve $y^2=(x-3)(x-2)(x-1)x(x+1)(x+2)(x+3)=:h(x)$ for $k=F_{17}$ and let $D_1=(4,5)+(5,8) +(6,4)$, $D_2=(7,5)+(10,3)+(11,1)$. We have $I_{D_1}=(x^3+2x^2+6x+16,5x^2+9x+8-y)=:(a_1,b_1-y)$ and $I_{D_2}=(x^3+6x^2+2x+12,11x^2+5x+9-y)=:(a_2,b_2-y)$. \noindent 1. \it Cantors algorithm: \rm We have $s_1a_1+s_2a_2=1$ with certain $s_1$, $s_2$. We obtain the composed ideal $I_{D_1}I_{D_2}$ with the basis $(a,b-y):=(a_1a_2,(s_1a_1b_2+s_2a_2b_1-y) {\rm mod} a_1a_2)= (x^6+8 x^5+3x^4+13x^2+2x+5,x^5+7x^4+2x^3+6x^2+5x+5-y )$. The reduction process gives the ideals of equivalent divisors \\ $(a',b'-y):=(\frac{b^2-h}{a},(-b-y){\rm mod} a')= (x^4+6x^3+2x^2+5x+5, 6x^3+x^2+5x-y )$ and \\ $(a'',b''-y):=(\frac{{b'}^2-h}{a'},(-b'-y){\rm mod} a'')= (x^3+9x^2+3x,2x^2+13x-y)$.\\ The last ideal corresponds to the reduced divisor. \noindent 2. \it Our algorithm: \rm We obtain for $I_{D_1}I_{D_2}$ the Groebner basis $(x^6+8 x^5+3x^4+13x^2+2x+5,11x^4+9x^3+2x^2+9x+y(x+1))=:(.,f)$ with respect to the weighted lexicographic order and $(f,y^2-h(x)):(I_{D_1}I_{D_2})= (x^3+9x^2+3x,2x^2+13x-y)$.
1,108,101,565,906
arxiv
\section{Introduction} Modern visual SLAM methods achieve remarkable performance when evaluated on suitable high-quality data~\cite{fuentes2015visual}. However, in the context of downstream tasks, such as indoor robot navigation, a number of difficulties remain~\citep{cadena2016past, mishkin2019benchmarking, chen2020survey}. An imperfect navigation agent captures substantially different images from a human, as it may frequently face featureless walls and produce rapid turns~(see \figref{fig:challenges}). Further, despite advances in sensor technology, modern robots still often use cameras with noisy images, low frame rate, and a narrow field-of-view~\cite{habitatchallenge}. These factors make feature extraction and association difficult. Relocalization and loop closure can be challenging due to environmental changes and repetitive feature. Finally, integrating SLAM into a navigation pipeline is not trivial, because the map representation must be suitable for downstream planning, it may need to capture task-dependent information, and planning must be able to handle map imperfections. This paper introduces the Differentiable SLAM Network (SLAM-net) together with a navigation architecture for downstream indoor navigation. The key idea of SLAM-net is to encode a SLAM algorithm in a differentiable computation graph, and learn neural network model components for the SLAM algorithm end-to-end, by backpropagating gradients through the algorithm. Concretely SLAM-net encodes the particle filter based FastSLAM algorithm~\cite{montemerlo2002fastslam} and learns mapping, transition and observation models. SLAM-net fills a gap in the literature on differentiable robot algorithms~\cite{tamar2016value, gupta2017cognitive, karkus2018particle, karkus2019differentiable}. \begin{figure}[!t] \centering \includegraphics[width=0.95\textwidth]{figures/navigationtask.pdf} \caption{Visual robot navigation is challenging for SLAM, \eg, because the robot frequently faces featureless walls; it rotates quickly; the onboard camera produces noisy images; the frame rate is low; \etc{} The images were take by our navigation agent in the Habitat environment. } \label{fig:challenges} \end{figure} The benefit of SLAM-net compared to unstructured learning approaches is that its encoded particle filter provides a strong prior for learning. The benefit over classic SLAM is that all components are learned, and they are directly optimized for the end-objective. Concretely, SLAM-net learns RGB and RGB-D observation models for the encoded FastSLAM algorithm, which previously relied on handcrafted models and lidar sensors. Further, because of the task-oriented learning, feature extractors can learn be more robust against domain specific challenges, \eg, faced with downstream navigation; while on the flip side they may be less reusable across tasks. \begin{figure*}[!t] \centering \includegraphics[width=0.9\textwidth]{figures/slamnet_cvpr.pdf} \vspace{-0.2cm} % \caption{Differentiable SLAM-net. The global map is maintained by a collection of learned local grid maps. The trajectory is tracked by a particle filter. Particles represent trajectories and they are updated with learned neural network components: the mapping, transition, and observation models.} \label{fig:slamnet} \end{figure*} We validate SLAM-net for localization with RGB and RGB-D input, as well as downstream robot navigation in previously unseen indoor environments. We use the Habitat simulation platform~\citep{savva2019habitat} with three real-world indoor scene datasets. We additionally experiment with the KITTI visual odometry data~\cite{geiger2012we}. SLAM-net achieves good performance under challenging conditions where the widely used ORB-SLAM~\citep{mur2017orb} completely fails; and trained SLAM-nets transfer over datasets. For downstream navigation we propose an architecture similar to Neural SLAM~\cite{chaplot2020learning}, but with using our differentiable SLAM-net module. Our approach significantly improves the state-of-the-art for the CVPR Habitat 2020 PointNav challenge~\cite{habitat2020leaderboard}. % \section{Related work} \myparagraph{Learning based SLAM} Learning based approaches to SLAM have a large and growing literature. For example, CodeSLAM\citep{bloesch2018codeslam} and SceneCode\citep{zhi2019scenecode} learn a compact representation of the scene; CNN-SLAM\citep{tateno2017cnn} learns a CNN-based depth predictor as the front-end of a monocular SLAM system; BA-net \cite{tang2018ba} learns the feature metric representation and a damping factor for bundle adjustment. While these works use learning, they typically only learn specific modules in the SLAM system. Other approaches do end-to-end learning but they are limited to visual odometry, \ie, they estimate relative motion between consecutive frames without a global map representation~\cite{zhou2017unsupervised, li2018undeepvo}. Our method maintains a full SLAM algorithm and learns all of its components end-to-end. \myparagraph{Classic SLAM} Classic SLAM algorithms can be divided into filtering and optimization based approaches \cite{strasdat2012visual}. Filtering-based approaches maintain a probability distribution over the robot trajectory and sequentially update the distribution with sensor observations \cite{civera20091,azarbayejani1995recursive,davison2003real,montemerlo2002fastslam,montemerlo2003fastslam}. Optimization-based approaches apply bundle adjustment on a set of keyframes and local maps; % and they are popular for both visual \cite{strasdat2012visual, mur2015orb,mur2017orb,klein2007parallel} and lidar-based SLAM~\cite{hess2016real}. Our approach builds on a filtering-based algorithm, FastSLAM~\cite{montemerlo2002fastslam, montemerlo2003fastslam}. The original algorithm (apart from a few adaptations~\cite{barfoot2005online, lee2011rs, hartmann2012real}) works with a lidar sensor and hand-designed model components. Robot odometry information is typically used for its transition model, and either landmarks~\cite{montemerlo2002fastslam} or occupancy grid maps~\cite{grisetti2007improved} are used for its observation model. In contrast, we learn neural network models for visual input by backpropagation through a differentiable variant of the algorithm. We choose this algorithm over an optimization based method because of the availability of differentiable particle filters~\cite{jonschkowski2018differentiable, karkus2018particle}, and the suitability of the algorithm for downstream robot navigation. \myparagraph{Differentiable algorithms} Differentiable algorithms are emerging for a wide range of learning domains, including state estimation~\citep{haarnoja2016backprop, jonschkowski2018differentiable, karkus2018particle, ma2020particle}, visual mapping~\citep{gupta2017cognitive, karkus2020differentiable}, planning~\citep{tamar2016value, karkus2017qmdp, farquhar2017treeqn, oh2017value, guez2018learning, yonetani2020path} and control tasks~\citep{amos2018differentiable, okada2017path, east2019infinite, bhardwaj2020differentiable}. Differentiable algorithm modules have been also composed together for visual robot navigation~\cite{gupta2017cognitive, karkus2019differentiable, ma2020discriminative}. This work introduces a differentiable SLAM approach that fills a gap in this literature. While \citet{jatavallabhula2019gradslam} have investigated differentiable SLAM pipelines, they focus solely on the effect of differentiable approximations and do not perform learning of any kind. \myparagraph{Visual navigation} A number of learning based approaches has been proposed for visual navigation recently~\citep{gupta2017cognitive, anderson2018evaluation, mishkin2019benchmarking, karkus2019differentiable, wijmans2019dd, chaplot2020learning, chaplot2020neural, ramakrishnan2020occupancy}. Modular approaches include CMP~\cite{gupta2017cognitive}, DAN~\cite{karkus2019differentiable} and Neural SLAM~\cite{chaplot2020learning}. However, CMP assumes a known robot location, circumventing the issue of localization. DAN assumes a known map that is given to the agent. Neural SLAM~\cite{chaplot2020learning, ramakrishnan2020occupancy} addresses the joint SLAM problem, but it relies solely on relative visual odometry without local bundle adjustment or loop closure, and thus it inherently accumulates errors over time. We propose a similar navigation architecture to Neural SLAM~\cite{chaplot2020learning}, but utilizing our Differentiable SLAM-net module in place of learned visual odometry. % \section{Differentiable SLAM-net}\label{sec:slamnet} \subsection{Overview} The Differentiable SLAM-net architecture is shown in~\figref{fig:slamnet}. Inputs are RGB(D) observations $\ensuremath{o}_t$, outputs are pose estimate $\ensuremath{s}_t$ and global map $\ensuremath{M}_t$. SLAM-net assumes the robot motion is (mostly) planar. Poses are 2D coordinates with 1D orientation; the global map is a 2D occupancy grid. Internally SLAM-net represents the global map as a collection of local maps, each associated with a local-to-global transformation. Local maps are $\thickmuskip=2mu N\medmuskip=0mu\times N \medmuskip=0mu\times \ensuremath{N_\mathrm{ch}}$ grids that, depending on the configuration, may encode occupancy and/or learned latent features. We add a local map for each observation, but without knowing the robot pose we do not know the correct local-to-global map transformation. Instead, the algorithm maintains a distribution over the unknown robot trajectory and closes loops using particle filtering~\cite{doucet2001introduction}. Our algorithm is based on FastSLAM~\cite{montemerlo2002fastslam,montemerlo2003fastslam}, and our differentiable implementation is built on PF-nets~\cite{karkus2018particle}. The algorithm works as follows. The particle filter maintains $K$ weighted particles, where each particle represents a trajectory $\ensuremath{s}_{0:t}^k$. At $t=0$ all particle trajectories are set to the origin; particle weights are constant, and the local map collection is empty. In each time step a \emph{mapping model} predicts a local map $\ensuremath{m}_t$ from the input observation $\ensuremath{o}_t$, and $\ensuremath{m}_t$ is added to the collection. Particle trajectories are extended with samples from a probabilistic \emph{transition model} that estimates the relative motion given $\ensuremath{o}_t$ and $\ensuremath{o}_{t-1}$. Particle weights are then updated using an \emph{observation model} which measures the compatibility of $\ensuremath{m}_t$ and the past local maps $\ensuremath{m}_{1:t-1}$ assuming the particle trajectory $\ensuremath{s}_{0:t}^k$ was correct. The pose output is obtained by taking the weighted sum of particle trajectories. Optionally, a global occupancy grid map is obtained with simple 2D image transformations that combine local maps along the mean trajectory. The key feature of SLAM-net is that it is end-to-end differentiable. That is, the mapping, transition and observation models are neural networks, and they can be trained together for the end-objective of localization accuracy (and/or global map quality). To make the algorithm differentiable we use the reparameterization trick~\citep{kingma2013auto} to differentiate through samples from the transition model; and we use spatial transformers~\cite{jaderberg2015spatial} for differentiable map transformations. The rest of the operations of the algorithm, as presented, are already differentiable. While not used in our experiments, differentiable particle resampling could be incorporated from prior work~\citep{karkus2018particle, zhu2020towards, corenflos2021differentiable}. Further, due to limited GPU memory, to make use of the differentiable algorithm for learning our design choices on the local map representation and the formulation of the observation model are important. Next we introduce each component of SLAM-net. Network architecture details are in the Appendix. \begin{figure*}[t] % \centering \includegraphics[width=0.9\textwidth]{figures/navpipeline_cvpr.pdf} \caption{Visual navigation pipeline with the Differentiable SLAM-net, a path planner, and a motion controller.} % \label{fig:navpipeline} \end{figure*} \subsection{Transition model} The transition model is a CNN that takes in the concatenated current and last observations, $\ensuremath{o}_t$ and $\ensuremath{o}_{t-1}$, and outputs parameters of Gaussian mixture models with separate learned mean and variance for the relative 2D pose and 1D orientation. The transition model is pre-trained to maximize the log-likelihood of true relative poses along the training trajectories. It is then finetuned together with the rest of the SLAM-net components optimizing for the end-objective. \subsection{Mapping model} The mapping model is a CNN with a pre-input perspective transformation layer. The input is observation $o_t$, the output is local map $m_t$. Local maps serve two purposes: to be fed to the observation model and aid pose estimation by closing loops; and to construct a global map for navigation. Our local maps are $\thickmuskip=2mu N\medmuskip=0mu\times N \medmuskip=0mu\times \ensuremath{N_\mathrm{ch}}$ grids that capture information about the area in front of the robot. In one configuration local maps encode occupancy, \ie, the probability of the area being occupied for the purpose of navigation. This model is trained with a (per cell) classification loss using ground-truth occupancy maps. In another configuration local maps encode learned latent features that have no associated meaning. This model is trained for the end-objective by backpropagating through the observation model. In both cases we found it useful to add an extra channel that encodes the visibility of the captured area. For depth input this is computed by a projection; for RGB input it is predicted by the network, using projected depth for supervision. \subsection{Observation model} The observation model is the most important component of the architecture. It updates particle weights based on how ``compatible'' the current local map $\ensuremath{m}_t$ would be with past local maps $\ensuremath{m}_{1:t-1}$ if the particle trajectory $\ensuremath{s}_{0:t}^k$ was correct. Intuitively we need to measure whether local maps capture the same area in a consistent manner. Formally we aim to estimate a compatibility value proportional to the log-probability $\log{p(\ensuremath{m}_t | \ensuremath{m}_{1:t-1}, \ensuremath{s}_{1:t}^k)}$. We propose a discriminative observation model that compares pairs of local maps with a learned CNN. The CNN takes in a current local map $\ensuremath{m}_t$ and a past local map $\ensuremath{m}_\tau$ concatenated, and outputs a compatibility value. Importantly, the past local map is transformed to the viewpoint of the current local map according to the relative pose in the particle trajectory $(s_t^k, s_\tau^k)$. We use spatial transformers~\cite{jaderberg2015spatial} for this transformation. The overall compatibility is the sum of pairwise compatibly values along the particle trajectory. Compatibility values are estimated for all particles. Particle weights are then updated by multiplying with the exponentiated compatibility values, and they are normalized across particles. CNN weights are shared. For computational reasons, instead of comparing all local map pairs, we only compare the \emph{most relevant} pairs. During training we pick the last 4--8 steps; during inference we dynamically choose 8 steps based on the largest overlapping view area (estimated using simple geometry). \subsection{Training and inference} Our training data consists of image-pose pair trajectories (depth or RGB images, and 2D poses with 1D orientation); and optionally ground-truth global occupancy maps for pre-training the mapping model. The end-to-end training objective is the sum of Huber losses for the 2D pose error and 1D orientation error. We train in multiple stages. We first pre-train the transition model. We separately pre-train the observation model together with the mapping model for the end-objective, but in a low noise setting. That is, in place of the transition model we use ground truth relative motion with small additive Gaussian noise. Finally we combine all models and finetune them together for the end-objective. During finetuning we freeze the convolution layers and mixture head of the transition model. When the mapping model is configured to predict occupancy it is pre-trained separately and it is frozen during finetuning. An important challenge with training SLAM-net is the computational and space complexity of backpropagation through a large computation graph. To overcome this issue, during training we use only short trajectories (4-8 steps), and $\thickmuskip=2mu K=32$ particles without resampling. During inference we use the full length trajectories, and by default $\thickmuskip=2mu K=128$ particles that are resampled in every step. \subsection{Implementation details} We implement SLAM-net in Tensorflow~\cite{tensorflow2015-whitepaper} based on the open-source code of PF-net~\cite{karkus2018particle}. We adopt the training strategy where the learning rate is decayed if the validation error does not improve for 4 epochs. We perform 4 such decay steps, after which training terminates, and the model with the lowest validation error is stored. The batch size is 16 for end-to-end training, and 64 for pre-training the mapping and transition models. We use Nvidia GeForce GTX 1080 GPUs for both training and inference. For RGB input we configure local maps with 16 latent feature channels that are not trained to predict occupancy. For RGB-D input local maps are configured with both latent channels and one channel that predicts occupancy. Further, with RGB-D data we only use depth as input to SLAM-net. \section{Visual Navigation with SLAM-net}\label{sec:navigation} We propose a visual navigation pipeline (\figref{fig:navpipeline}) that combines SLAM-net with modules for path planning and motion control. In the pipeline SLAM-net periodically predicts the robot pose and a global occupancy grid map. % The map and pose are fed to a 2D path planner that plans a path to the goal. The path is then tracked by a local controller the outputs robot actions. % \myparagraph{Task specification} We follow the task specification of the Habitat 2020 PointNav Challenge~\cite{habitatchallenge}. A robot navigates to goals in previously unseen apartments using noisy RGB(D) input. The goal is defined by coordinates relative to the initial pose, but the robot location is unknown thereafter, and discrete robot actions generate noisy motion. % Navigation is successful if the robot takes a dedicated \emph{stop} action within 0.36 meters to the goal. % Note that this success criteria places high importance on pose estimation accuracy. \myparagraph{Path planner} The challenge of path planning in the context of visual navigation is the imperfect partial knowledge of the map and the robot pose. To address this we adopt a weighted variant of the D* algorithm~\citep{koenig2005fast} with costs that penalize moving near obstacles. In each step when the map and pose are updated by the SLAM-net, the path is replanned. For planning we convert the occupancy grid map to an 8-connected grid where cells are assigned a cost. We threshold the map ($\thickmuskip=2mu p>0.5$ is an obstacle, $\thickmuskip=2mu p<=0.5$ is free space) and define cell costs based on the distance to the nearest obstacle. Additionally, we use a collision recovery mechanism. Upon detecting a collision an obstacle is registered to the map at the current estimated pose. The robot is then commanded to turn around (6 turn actions) and takes a step back (1 forward action). Collisions are not directly observed. We trigger collision recovery if the estimated pose does not change more than $3cm$ following a forward action. A similar mechanism was proposed in \citet{chaplot2020learning}. We also hard-coded an initial policy that makes the robot turn around in the beginning of an episode. The initial policy terminates once the estimated rotation exceeds $370^{\circ}$. \myparagraph{Local controller} The planned path is tracked by a simple controller that chooses to turn or move forward aiming for the furthest straight-line traversable point along the path. The output of the controller are discrete actions. \section{Experiments}\label{sec:experiments} Our experiments focus on the following questions. 1)~Can SLAM-net learn localization in previously unseen indoor scenes? How does it compare to existing methods? 2)~Does SLAM-net enable downstream robot navigation? 3)~What does SLAM-net learn and how do model components and hyper parameters affect performance? 4)~Do learned models transfer to new datasets? 5)~What are the limitations of SLAM-net if applied, \eg, to autonomous driving data? \subsection{Datasets} \myparagraph{Habitat} Our main experimental platform is the Habitat simulator~\cite{savva2019habitat} configured with different real-world datasets: Gibson~\cite{xiazamirhe2018gibsonenv}, Replica~\cite{replica19arxiv}, and Matterport~\cite{Matterport3D}. The datasets contain a large number of 3D scans of real world indoor scenes, typically apartments. Habitat embeds the scenes in an interactive physics simulator for robot navigation. The simulator renders photorealistic but noisy first-person RGB(D) observations and simulates realistic robot motion dynamics. The camera has a horizontal FOV of $70^{\circ}$ and a resolution of $640\mytimes360$. For SLAM-net we downscale images to $160\mytimes90$. Depth values are in the range of 0.1 to 10 meters. Unless stated otherwise, we use the Habitat Challenge 2020 configuration~\cite{habitatchallenge}: Gaussian noise for RGB images; the Redwood noise model for depth images~\cite{choi2015robust}; the Locobot motion model for actuation noise~\cite{murali2019pyrobot}. To train and evaluate SLAM methods we generate a fixed set of trajectories. For navigation we let our method interact with the simulator. \myparagraph{Gibson data}\label{sec:datasets} Following \citet{savva2019habitat} we use 72 scenes from the Gibson dataset for training and further split the original validation set to 7 scenes for validation and 7 scenes for testing. We use 36k of the provided navigation episodes for training (500 per scene). Given a start and goal pose we generate a trajectory with a navigation policy that switches between a shortest-path expert (30 steps) and random actions (40 steps). For evaluation we generate 105 trajectories (15 per test scene) using three different navigation policies: the shortest-path expert (\textbf{traj\_expert}); the shortest-path expert mixed with random actions~(\textbf{traj\_exp\_rand}); and our final navigation pipeline~(\textbf{traj\_nav}). \myparagraph{Replica and Matterport data} We use the Matterport and Replica datasets for transfer experiments without additional training. We generate trajectories for evaluation similarly to the Gibson data, using the shortest-path expert policy. We use the original validation split with a total of 170 and 210 trajectories for Replica and Matterport respectively. \myparagraph{KITTI data} We conduct additional experiments with the KITTI real world driving data~\cite{geiger2012we}. We use the full KITTI raw dataset for training, validation, and testing. Following the KITTI Odometry Split~\cite{geiger2012we} the validation trajectories are 06 and 07, and the testing trajectories are 09 and 10. Since the original depth information for KITTI dataset is from sparse lidar, we use the completed depth data from the \cite{uhrig2017sparsity} as ground-truth depth. \myparagraph{Statistics} \tabref{tab:datastat} provides statistics for each set of evaluation trajectories. We provide the mean and standard deviation of the trajectory length, number of frames, and number of turn actions (where applicable). \begin{table}[t!] \centering \scalebox{0.85}{ \begin{tabular}{lccc} \toprule Dataset & length[m] & \#frames & \#turns \\ \midrule Gibson ({traj\_expert}) & 7.4{\small$\pm$3.8} & 51.1{\small$\pm$24.7} & 22.6{\small$\pm$11.6} \\ Gibson ({traj\_exp\_rand}) & 14.5{\small$\pm$7.2} & 152.3{\small$\pm$67.9} & 75.0{\small$\pm$34.6}\\ Gibson ({traj\_nav}) & 11.9{\small$\pm$8.8} & 117.0{\small$\pm$113.3} & 74.1{\small$\pm$84.0} \\ Matterport ({traj\_expert}) & 13.0{\small$\pm$7.0} & 82.0{\small$\pm$37.9} & 32.9{\small$\pm$14.1} \\ Replica ({traj\_expert}) & 8.0{\small$\pm$2.8} & 53.9{\small$\pm$17.7} & 23.5{\small$\pm$9.17} \\ KITTI-09 & 1680.3 & 1551 \\ KITTI-10 & 910.48 & 1161 \\ \bottomrule \end{tabular} }% {\vspace{0cm}\caption{Data statistics.}\label{tab:datastat}} \end{table} \begin{table*} \centering \scalebox{0.9}{ \begin{tabular}{l@{\hskip 0.8cm}c@{\hskip 1.0cm}cc@{\hskip .4cm}cc@{\hskip .4cm}cc@{\hskip 1.0cm}cc} \toprule Sensor & \textbf{} & \multicolumn{2}{c@{\hskip .4cm}}{\textbf{RGBD}} & \multicolumn{2}{c@{\hskip .4cm}}{\textbf{RGBD}} & \multicolumn{2}{c@{\hskip 1.0cm}}{\textbf{RGBD}} & \multicolumn{2}{c}{\textbf{RGB}} \\ Trajectory generator & \textbf{} & \multicolumn{2}{c@{\hskip .4cm}}{\textbf{traj\_expert}} & \multicolumn{2}{c@{\hskip .4cm}}{\textbf{traj\_exp\_rand}} & \multicolumn{2}{c@{\hskip 1.0cm}}{\textbf{traj\_nav}} & \multicolumn{2}{c}{\textbf{traj\_expert}} \\ Metric & {\small runtime$\downarrow$} & {\small SR$\uparrow$} & {\small RMSE$\downarrow$} & {\small SR$\uparrow$} & {\small RMSE$\downarrow$} & {\small SR$\uparrow$} & {\small RMSE$\downarrow$} & {\small SR$\uparrow$} & {\small RMSE$\downarrow$} \\ \midrule \textbf{SLAM-net (ours)} & 0.06s & \textbf{83.8\pct} & \textbf{0.16m} & \textbf{62.9\pct} & \textbf{0.28m} & \textbf{77.1\pct} & \textbf{0.19m} & \textbf{54.3\pct} & \textbf{0.26m} \\ Learned visual odometry & 0.02s & 60.0\pct & 0.26m & 24.8\pct & 0.63m & 30.5\pct & 0.47m & 28.6\pct & 0.40m \\ FastSLAM~\cite{montemerlo2003fastslam} & -- & 21.0\pct & 0.58m & 0.0\pct & 3.27m & 21.9\pct & 0.69m & X & X\\ ORB-SLAM~\cite{mur2017orb} & 0.08s & 3.8\pct & 1.39m & 0.0\pct & 3.59m & 0.0\pct & 3.54m & X & X \\ Blind baseline & 0.01s & 16.2\pct & 0.80m & 1.0\pct & 4.13m & 3.8\pct & 1.50m & 16.2\pct & 0.80m \\ \bottomrule \end{tabular}% }% {\vspace{0.0cm}\caption{Main SLAM results.}\label{tab:localization}} \vspace{-0.0cm} \end{table*} \begin{table} \centering \scalebox{0.9}{ \begin{tabular}{lccc@{\hskip .6cm}c@{\hskip .3cm}c} \toprule & \multicolumn{3}{c@{\hskip .6cm}}{\textbf{Component}} & \textbf{RGBD} & \textbf{RGB} \\ & T & M & Z & {\small SR$\uparrow$} & {\small SR$\uparrow$} \\ \midrule \textbf{SLAM-net (ours)} & \textbf{l} & \textbf{l} & \textbf{l} & \textbf{77.1\pct} & \textbf{55.2\pct} \\ & h & l & l & 43.8\pct & 19.1\pct \\ & l & h & h & 58.1\pct & X \\ FastSLAM & h & h & h & 21.9\pct & X \\ \bottomrule \end{tabular}% }% {\vspace{0.0cm}\caption{Learned vs. handcrafted SLAM components.}\label{tab:fastslam}} \vspace{-0.0cm} \end{table} \begin{table} \centering \scalebox{0.9}{ \begin{tabular}{l@{\hskip .6cm}c@{\hskip .6cm}c} \toprule & \textbf{RGBD} & \textbf{RGB} \\ & {\small SR$\uparrow$} & {\small SR$\uparrow$} \\ \midrule Default conditions & 3.8\pct & X \\ No sensor noise & 7.5\pct & 18.0\pct \\ No sensor and actuation noise & 18.0\pct & 20.4\pct \\ High frame rate & 30.4\pct & X \\ Ideal conditions & 86.0\pct & 43.5\pct \\ \bottomrule \end{tabular}% }% {\vspace{0.0cm}\caption{ORB-SLAM results under idealized conditions.}\label{tab:orbslam}} \vspace{-0.0cm} \end{table} \subsection{Baselines} % \myparagraph{Learned visual odometry} We use the transition model of SLAM-net as a learned visual odometry model. The model parameterizes a Gaussian mixture that predicts the relative motion between consecutive frames. When the model is used for visual odometry we simply accumulate the mean relative motion predictions. \myparagraph{ORB-SLAM} We adopt the popular ORB-SLAM~\citep{mur2015orb, mur2017orb} as a classic baseline. % The algorithm takes in RGB or RGB-D images, constructs a keypoint-based sparse map, and estimates a 6-DOF camera pose at every time-step. The algorithm relies on tracking features between consecutive frames. If there are not enough tracked key-points, the system is lost. When this happens we initialize a new map and concatenate it with the previous map based on relative motion estimated from robot actions. With RGB-D input re-initialization takes one time step, with RGB-only input it takes several steps. % We carefully tuned the hyperparameters of ORB-SLAM based on the implementation and configuration of \citet{mishkin2019benchmarking}, who tuned the algorithm for Habitat simulation data, although without sensor and actuation noise. For the main localization results in \tabref{tab:localization} we use the default velocity-based motion model as in \citep{mur2015orb}. In \tabref{tab:transfer} we replace the motion model with relative motion estimated from actions, which gave better results. % \myparagraph{FastSLAM} FastSLAM uses the same particle filter algorithm as our SLAM-net, but with handcrafted transition, mapping and observation models. We naively adapt the original algorithm~\citep{montemerlo2003fastslam} to be used with occupancy grid maps. Specifically, we use the same local map representation as in SLAM-net. The transition model is a Gaussian mixture that matches the ground truth actuation noise. % The mapping and observation models are then naive implementations of the inverse lidar model and the beam model described in Chapter~9.2 and Chapter~6.3 of \cite{thrun2005probabilistic}, respectively. We create artificial lidar sensor readings by taking the center row of depth inputs. In the observation model instead of pair-wise comparisons we combine the 32 most relevant local maps into a global map. The number of particles are chosen to be the same as for SLAM-net. \myparagraph{Blind baseline} This baseline ignores observation inputs, and instead it accumulates the nominal robot motion based on the ground-truth (but noisy) motion model. This serves as a calibration of the performance of other methods. \begin{table*} \centering \scalebox{0.9}{ \begin{tabular}{l@{\hskip 1.0cm}cc@{\hskip 1.0cm}cc@{\hskip 1.0cm}cc@{\hskip .8cm}cc} \toprule Dataset & \multicolumn{2}{c@{\hskip 1.0cm}}{\textbf{Replica}} & \multicolumn{2}{c@{\hskip 1.0cm}}{\textbf{Matterport}} & \multicolumn{2}{c@{\hskip 1.0cm}}{\textbf{Replica}} & \multicolumn{2}{c}{\textbf{Matterport}} \\ Sensor & \multicolumn{2}{c@{\hskip 1.0cm}}{\textbf{RGBD}} & \multicolumn{2}{c@{\hskip 1.0cm}}{\textbf{RGBD}} & \multicolumn{2}{c@{\hskip 1.0cm}}{\textbf{RGB}} & \multicolumn{2}{c}{\textbf{RGB}} \\ Metric & {\small SR$\uparrow$} & {\small RMSE$\downarrow$} & {\small SR$\uparrow$} & {\small RMSE$\downarrow$} & {\small SR$\uparrow$} & {\small RMSE$\downarrow$} & {\small SR$\uparrow$} & {\small RMSE$\downarrow$} \\ \midrule \textbf{SLAM-net (ours)} & \textbf{78.8\pct} & \textbf{0.17m} & \textbf{49.5\pct} & \textbf{0.39m} & \textbf{45.3\pct} & \textbf{0.31m} & \textbf{23.3\pct} & \textbf{0.54m} \\ Learned visual odometry & 51.2\pct & 0.31m & 22.4\pct & 0.75m & 17.7\pct & 0.67m & 15.2\pct & 0.93m \\ FastSLAM~\cite{montemerlo2003fastslam} & 10.0\pct & 0.91m & 5.7\pct & 1.81m & X & X & X & X \\ ORB-SLAM~\cite{mur2017orb} & 5.2\pct & 1.46m & 1.9\pct & 2.90m & X & X & X & X \\ Blind baseline & 7.7\pct & 0.92m & 5.7\pct & 2.3m & 7.7\pct & 0.92m & 5.7\pct & 2.3m \\ \bottomrule \end{tabular}% }% {\vspace{0.0cm}\caption{Transfer results.}\label{tab:transfer}} \vspace{-0.0cm} \end{table*} \section{SLAM results}\label{sec:results} Main SLAM results are summarized in \tabref{tab:localization}. Visualizations are in the appendix. Videos are available at \url{http://sites.google.com/view/slamnet}. % \subsection{Main results for SLAM} \tabref{tab:localization} reports success rate (SR) that measures the percentage of episodes where the final pose error is below 0.36 meters (to enable successful downstream navigation); and root-mean-square-error (RMSE) which measures the absolute trajectory error as defined in~\citep{handa2014benchmark}. Estimated trajectories are only aligned with the ground-truth at the beginning of each episode. We also report runtimes, measuring the average processing time per frame including loading the data (RGBD sensor, {traj\_expert} data). \myparagraph{SLAM-net learns successfully} We first observe that SLAM-net successfully learned to localize in many episodes despite the challenging data. Comparing columns we see that an imperfect navigation policy can significantly increase the difficulty of localization. Comparing SLAM-net across sensor modalities we find that SLAM-net performs reasonably well with RGB-only input, and the depth sensor helps substantially (54.3\% vs. 83.8\% success for {traj\_expert} data). \myparagraph{SLAM-net outperforms its alternatives} We find that SLAM-net outperforms learned visual odometry, the model-based FastSLAM and ORB-SLAM by a large margin across all datasets and sensor modalities; and its runtime (on GPU) is slightly better than ORB-SLAM. Next we discuss the comparison with FastSLAM and ORB-SLAM in detail. \subsection{Learned vs. handcrafted SLAM components} Interestingly SLAM-net outperforms FastSLAM. The algorithm is exactly the same, the difference is that FastSLAM has simple handcrafted model components, while SLAM-net has neural network components that are learned end-to-end. \tabref{tab:fastslam} combines learned (l) and handcrafted (h) alternatives for each of the model components: transition model (T), mapping model (M), observation model (Z). SLAM-net has all models learned, FastSLAM has all models handcrafted. We report results for the {traj\_nav} data. We find that learning any of the model components is useful, and learning all model components jointly gives the best performance. This can be attributed to both model representation and task-oriented learning. First, our neural networks may encode a more powerful function than our handcrafted models. Second, our we learn models end-to-end, so they are optimized for the task in the context of the algorithm and the dataset. For RGB-only input we do not have handcrafted mapping and observation models, but SLAM-net is able to learn effective models end-to-end. \subsection{Why does ORB-SLAM fail?} ORB-SLAM relies on temporal coherence between frames to track features, which is challenging in our domain due to the combined effects of sensor noise, sparse visual features, rapid turns (approx. $90^{\circ}/s$), low frame rate (approx. 3 fps), and narrow field of view (HFOV=$70^\circ$). We find that ORB-SLAM often fails to track features even with RGB-D input. With RGB-only input it fails in nearly all steps, hence we could not report a meaningful result. % In contrast to ORB-SLAM, SLAM-net does not rely explicitly on feature tracking, and it learns task-oriented models that can, \eg, learn more robust feature extractors for this domain. In \tabref{tab:orbslam} we evaluate ORB-SLAM in idealized settings for the {traj\_expert} data. Each row removes different types of challenges: no sensor noise, no actuation noise, high frame rate, and ideal condition. The high frame rate setting reduces the action step size in Habitat to achieve an equivalent 3 to ${\sim}30$ fps increase in frame rate. The ideal condition setting removes all the above challenges together. Our results show that ORB-SLAM only works well in ideal conditions, where its performance is comparable to SLAM-net in the hardest conditions. % If we remove only one type of challenge the performance remains significantly worse. The RGB-D results indicate that low frame rate has the largest impact. % For RGB the trend is similar, but the presence of observation noise makes feature tracking fail completely. % \subsection{Transfer results}\label{sec:transfer} An important concern with learning based SLAM method is potential overfitting to the training environments. We take the SLAM-net models trained with the Gibson data, and evaluate them for the Replica and Matterport datasets, with no additional training or hyperparameter tuning. These datasets contain higher quality scenes and cover a wide range of smaller (Replica) and larger (Matterport) indoor environments. The robot and camera parameters remain the same. Results are in \tabref{tab:transfer}. We observe strong performance for SLAM-net across all datasets and sensor modalities. Comparing alternative methods we observe a similar trend as for the Gibson data ({traj\_expert} columns in \tabref{tab:localization}). Note that results across datasets are not directly comparable as the length of trajectories differ, \eg, they are longer in the Matterport data (see \tabref{tab:datastat} for statistics). We believe that these results on photorealistic data are promising for sim-to-real transfer to real robot navigation data. \subsection{Ablation study} To better understand the workings of SLAM-net we perform a detailed ablation study. Results are summarized in \tabref{tab:ablation}. The table reports success rates for the Gibson {traj\_nav} data, using SLAM-net in different conditions. \myparagraph{Joint training is useful} Line (2) of \tabref{tab:ablation} evaluates the pre-trained transition and observation models without joint finetuning. We find that finetuning is useful, and its benefit is much more significant for RGB input. A possible explanation is that our RGB model uses maps with latent features, while the RGBD model uses both latent features and predicted occupancy. Without finetuning, the occupancy predictor may generalize better to our evaluation setting, where the overall uncertainty is higher than during pre-training. \myparagraph{Occupancy maps are useful for localization} Lines (3--5) use different channels in learned local maps, pre-trained occupancy predictions, learned latent features, or both. The RGBD model is comparable in all settings. Adding latent maps on top of occupancy only improves 1.9\%, which indicates that 2D occupancy is sufficient for localization. The latent map configuration is $4.7\%$ behind the occupancy maps, showing that we can learn useful map features end-to-end without direct supervision. \myparagraph{We can learn better map features if occupancy prediction is difficult} Comparing the RGB models we find that the occupancy maps do not perform well here, but end-to-end training allowed learning more effective features. The difference to RGBD can be explained by the substantially lower prediction accuracy of our occupancy map predictions. \myparagraph{Choosing what to compare matters} Lines (6--9) compare strategies for choosing which map-pose pairs to feed into our discriminative observation model. Line (6) uses the last 8 steps of the particle trajectory. Lines (7--9) choose the most relevant past steps based on their estimated overlapping view area. As expected, dynamically choosing what to compare is useful. While one would expect more comparisons to be useful, over 8 comparisons do not improve performance. Since we trained with 8 comparisons, this result indicates that our model overfits to this training condition. \myparagraph{More particles at inference time are useful} Lines (10--16) vary the number of particles at inference time. Surprisingly, as little as 8 particles can already improve over the visual odometry setting (line 10). Increasing the number of particles helps, providing a trade-off between performance and computation. The effect for RGB is less pronounced, and improvement stops over 128 particles. \begin{table} \centering \scalebox{0.9}{ \begin{tabular}{ll@{\hskip .6cm}c@{\hskip .6cm}c} \toprule \multicolumn{2}{l}{Sensor} & \textbf{RGBD} & \textbf{RGB} \\ \multicolumn{2}{l}{Metric} & {\small SR$\uparrow$} & {\small SR$\uparrow$} \\ \midrule (1) & SLAM-net (default) & \textbf{77.1\pct} & \textbf{55.2\pct} \\ (2) & No joint training & 66.7\pct & 8.6\pct \\ \abovestrut{0.20in} (3) & Occupancy map only & 75.2\pct & 23.8\pct \\ (4) & Latent map only & 70.5\pct & \textbf{55.2\pct} \\ (5) & Occupancy + latent map & \textbf{77.1\pct} & 44.8\pct \\ \abovestrut{0.20in} (6) & Fixed comparisons (8) & 44.8\pct & 29.5\pct \\ (7) & Dynamic comparisons (4) & 73.3\pct & 41.9\pct \\ (8) & Dynamic comparisons (8) & \textbf{77.1\pct} & \textbf{55.2\pct} \\ (9) & Dynamic comparisons (16) & \textbf{77.1\pct} & 40.0\pct \\ \abovestrut{0.20in} (10) & K=1 (VO) & 30.5\pct & 26.7\pct \\ (11) & K=8 & 60.0\pct & 35.2\pct \\ (12) & K=32 (training) & 72.4\pct & 39.1\pct \\ (13) & K=64 & 75.2\pct & 46.7\pct \\ (14) & K=128 (evaluation default) & 77.1\pct & \textbf{55.2\pct} \\ (15) & K=256 & 79.1\pct & 44.8\pct \\ (16) & K=512 & \textbf{82.9\pct} & 48.6\pct \\ \bottomrule \end{tabular}% }% {\vspace{0.0cm}\caption{Ablation results. }\label{tab:ablation}} \vspace{-0.0cm} \end{table} \subsection{KITTI odometry results} To better understand the limitations of our approach we apply it to the KITTI odometry data, which contains long trajectories of autonomous driving. We do not expect a strong performance. SLAM-net is designed to enable indoor robot navigation which is reflected in a number of design decisions. First, our local maps ignore information far from the camera. Second, we do not have a dedicated particle proposal mechanism for closing large loops. Third, a key benefit of our approach is the joint training of its components, however, this requires large and diverse training data. The KITTI data is relatively small for this purpose. Finally, images in the KITTI data are of high quality for which existing SLAM methods are expected to work well. Our results are in \tabref{tab:kitti}. We report RMSE in meters after trajectory alignment. SLAM-net results are averaged over 5 seeds. The ORB-SLAM results are for RGB only input taken from~\cite{mur2015orb}. As expected, SLAM-net does not perform as well as ORB-SLAM; nevertheless, it learns a meaningful model and outperforms learned visual odometry. % Looking at predicted trajectories we find that SLAM-net occasionally fails to capture turns of the road (visualizations are in the Appendix). One reason is that there are no particles near the true trajectory, or the observation model gives a poor prediction. The training data contains only a limited number of turns, which makes learning from scratch difficult. Indeed our model starts to overfit after a few epochs, suggesting that more training data would improve the performance. \begin{table} \centering \scalebox{0.9}{ \begin{tabular}{l@{\hskip .6cm}cc} \toprule Trajectory & \textbf{Kitti-09} & \textbf{Kitti-10} \\ Metric & {\small RMSE$\downarrow$} & {\small RMSE$\downarrow$} \\ \midrule \textbf{SLAM-net (ours)} & 83.5m & 15.8m \\ SLAM-net (best of 5) & 56.9m & 12.8m \\ Learned visual odometry & 71.1m & 73.2m \\ ORB-SLAM-RGB~\cite{mur2015orb} & \textbf{7.62m} & \textbf{8.68m} \\ \bottomrule \end{tabular}% }% {\vspace{0.0cm}\caption{KITTI results.}\label{tab:kitti}} \vspace{-0.0cm} \end{table} \section{Navigation results}\label{sec:navresults} The motivation of our work is to enable visual robot navigation in challenging realistic conditions. Our navigation results are reported in \tabref{tab:navigation} and \tabref{tab:leaderboard}, using RGB-D input. Videos are on the project website. We report two key metrics following~\citet{anderson2018evaluation}: success rates (SR) and success weighted path length (SPL). In \tabref{tab:navigation} we experiment with our navigation pipeline using different methods for localization and mapping, but keeping the planner and controller modules fixed. Navigation performance is strong with a ground-truth localization oracle, which validates our architecture and serves as an upper-bound for SLAM methods. The navigation architecture with SLAM-net significantly outperforms visual odometry, achieving 65.7\% success. Our navigation architecture with visual odometry is conceptually similar to that of Active Neural SLAM~\cite{chaplot2020learning} and Occupancy Anticipation~\cite{ramakrishnan2020occupancy}. % Our results are consistent with that of \citet{ramakrishnan2020occupancy} in matching conditions. We did not compare with the classic ORB-SLAM method here because of its poor performance in our previous experiments. Finally, we submitted our method to the Habitat Challenge 2020 evaluation server, which allows direct comparison with various alternative methods. \tabref{tab:leaderboard} shows the top of the leaderboard for the PointNav task. SLAM-net achieves $64.5\pct$ success, significantly improving over the SOTA (VO~\cite{vomethod}, $37.3\pct$). It also outperforms the challenge winner (OccupancyAnticipation~\cite{ramakrishnan2020occupancy}, $29.0\pct$) which was shown to be superior to Active Neural SLAM~\cite{chaplot2020learning}. \begin{table}[tb]% \centering \scalebox{0.9}{ \begin{tabular}{lcc} \toprule \textbf{SLAM component} & {\small SR$\uparrow$} & {\small SPL$\uparrow$} \\ \midrule Ground-truth & \textbf{90.7\pct} & \textbf{0.56} \\ \textbf{SLAM-net (ours)} & \textbf{65.7\pct} & \textbf{0.38} \\ Learned visual odometry & 32.4\pct & 0.19 \\ \bottomrule \end{tabular} }{\vspace{0.0cm}\caption{Navigation results.}\label{tab:navigation}}% \end{table} \begin{table}[tb]% \centering \scalebox{0.9}{ \begin{tabular}{llcc} \toprule \textbf{Rank} & \textbf{Method} & {\small SR$\uparrow$} & {\small SPL$\uparrow$} \\ \midrule 1 & \textbf{SLAM-net (ours)} & \textbf{64.5\pct} & \textbf{0.377} \\ 2 & VO~\cite{vomethod} & 37.3\pct & 0.266 \\ 3 & OccupancyAnticipation~\cite{ramakrishnan2020occupancy} & 29.0\pct & 0.220 \\ \bottomrule \end{tabular} }{\vspace{0.0cm}\caption{Habitat 2020 PointNav Challenge leaderboard accessed on 16 November 2020~\cite{habitat2020leaderboard}.}\label{tab:leaderboard}}% \end{table} \section{Conclusions} We introduced a learning-based differentiable SLAM approach with strong performance on challenging visual localization data and on downstream robot navigation, achieving SOTA in the Habitat 2020 PointNav task. Together, our results provide new insights for understanding the strengths of classic and learning based SLAM approaches in the context of visual navigation. Our findings % partially contradict the results of \citet{mishkin2019benchmarking}, who benchmarked classic and learned SLAM for navigation. While they found ORB-SLAM to be better than learning based SLAM in the same Habitat simulator, they used a noise-free setting and relative goals. As pointed out by \citet{habitatchallenge}, this setting is not realistic. Indeed, we tried running the public ORB-SLAM implementation of \citet{mishkin2019benchmarking} in our simulator setting and it failed completely; while our learning-based approach achieved strong performance. We believe that our work on differentiable SLAM may lay foundation to a new class of methods that learn robust, task-oriented features for SLAM. Future research may investigate alternative differentiable SLAM algorithms, \eg, that build on an optimization-based method instead of particle filtering. While our initial results are promising, future work is needed to apply SLAM-net to real-world robot navigation. A particularly interesting application would be learning to relocalize with significant changes in the environment, a setting known to be challenging for existing SLAM algorithms. \subsection*{Acknowledgement} {\small We would like to thank Rico Jonschkowski for suggesting to keep local maps and Gim Hee Lee for valuable feedback. This research/project is supported in part by the National Research Foundation, Singapore under its AI Singapore Program (AISG Award No: AISG2-RP-2020-016) and by the National University of Singapore (AcRF grant R-252-000-A87-114). } {\small \bibliographystyle{plainnat}
1,108,101,565,907
arxiv
\section{Introduction}\label{intro} Antiferromagnets exhibiting a linear magnetoelectric effect \cite{Landau} are of great interest for applications aiming to achieve electric control of magnetism. \cite{Fiebig,Borisov,Binek,Bibes,He} In such materials there is a term $- \mathbf{E}\hat\alpha\mathbf{H}$ in the free energy density, where $\hat\alpha$ is the magnetoelectric tensor. Due to this term the electric field induces a magnetization and the magnetic field induces a dielectric polarization, both in linear order. Magnetoelectric effect was first predicted \cite{Dzyaloshinskii} and experimentally observed \cite{Astrov,Folen,Astrov2,Rado} in Cr$_2$O$_3$, which remains the most promising material for applications. Magnetoelectric effect can arise due to several microscopic mechanisms, including electric field-induced changes of the single-ion anisotropy, Heisenberg exchange parameters, the $g$-tensor or Dzyaloshinskii-Moriya interaction (see Ref.\ \onlinecite{Bonfim} for a review). Each of these contributions can be further divided into electronic (clamped-ions) and lattice-mediated parts. Experimentally the electronic contributions can, in principle, be measured separately at frequencies that are large compared to those of the relevant optical phonon vibrations. First-principles methods can illuminate the microscopic mechanisms of the magnetoelectric effect. \cite{Iniguez,Delaney,Wojdel,Wojdel2,Mostovoy,Prosandeev,Bosquet,Bosquet2,Malashevich,Scaramucci} At zero temperature it is controlled by spin-orbit coupling. \'I\~niguez \cite{Iniguez} showed that at zero temperature the lattice-mediated contribution can be obtained by evaluating the electric and magnetic polarities and stiffnesses of the polar displacement modes. An alternative approach \cite{Bosquet} is to compute the electric polarization induced by the magnetic field. This method was used to calculate both lattice-mediated and electronic contributions to the transverse magnetoelectric susceptibility $\alpha_\perp$ of Cr$_2$O$_3$. The electronic contribution turned out to be as much as one third of and have the same sign as the lattice-mediated one. \cite{Bosquet} The orbital contribution to the magnetoelectric response has also been considered. \cite{Malashevich,Scaramucci} Longitudinal magnetoelectric susceptibility $\alpha_\parallel$ reaches a maximum at finite temperature, where it is dominated by Heisenberg exchange. This temperature-dependent effect in Cr$_2$O$_3$\ was studied by Mostovoy \emph{et al.} \cite{Mostovoy}, who obtained the relevant coupling constant from the electric polarization of a ferrimagnetically ordered unit cell and the temperature dependence from Monte Carlo simulations for the classical Heisenberg model. Their approach is tailored to Cr$_2$O$_3$\ and is not directly applicable to other systems. Only the total response was evaluated. In this paper we formulate a microscopic model of exchange-driven magnetoelectric response which generalizes the approach of Ref.\ \onlinecite{Mostovoy}. We study the longitudinal magnetoelectric susceptibility of Cr$_2$O$_3$\ in more detail, sorting out the electronic and lattice-mediated contributions and resolving the latter by normal displacement modes, and comparing the predictions of the quantum pair cluster, quantum mean-field, and classical mean-field approximations. We also test the possibility that non-Heisenberg spin interactions could be responsible for the sign change of $\alpha_\parallel$ in Cr$_2$O$_3$\ and conclude in the negative. The paper is organized as follows. In Section \ref{model} the microscopic model is formulated in terms of the microscopic coefficients coupling the spins to lattice displacements and directly to the electric field, and the general expressions for the electronic and lattice-mediated contributions to magnetoelectric susceptibility are derived. The computational procedure is described in Section \ref{method}, and the results are presented in Section \ref{results}. We find that the electronic contribution is a sizeble fraction of the lattice-mediated term and its sign is \emph{opposite}. Different statistical approximations lead to similar maximal values of the magnetoelectric susceptibility, but the latter is sensitive to the choice of the Hubbard $U$ parameter due to its effect on the magnetic susceptibility. Section \ref{conclusions} concludes the paper. \section{Microscopic model}\label{model} Here we restrict ourselves to the exchange-driven magnetoelectric effect. This means that the model Hamiltonian in zero magnetic field should be invariant under a coherent rotation of all spins. This restriction is appropriate for Cr$_2$O$_3$ where the orbital moments are almost completely quenched, but it may need to be relaxed for application to other systems with strong spin-orbit coupling. A spin orientation for a magnetic atom on lattice site $i$ will be denoted by a unit vector $\mathbf{e}_i$. Spin rotation symmetry implies that $\mathbf{e}_i$ should only appear in scalar combinations. We start from the expansion of the effective Hamiltonian to second order in lattice displacements $\mathbf{u}_i$ from an equilibrium reference state: \begin{align}\label{Ham} H&=E_0+\frac12\sum_{ij}\mathbf{u}_i\hat A_{ij}\mathbf{u}_j-\sum_i\mathbf{u}_i\hat q^*_{i}\mathbf{E}-\mathbf{H}\sum_i\mu_i\mathbf{e}_i \nonumber\\ &+H_{mag}\{\mathbf{e}_i\} -\frac12\sum_{jk}\left(\sum_i \mathbf{u}_i\mathbf{g}_{i,jk} + \mathbf{E}\mathbf{f}_{jk}\right) (\mathbf{e}_j\mathbf{e}_k) \end{align} where $\mu_i$ is the magnetic moment at site $i$, $\mathbf{E}$ and $\mathbf{H}$ the external electric and magnetic fields, $\hat q^*_i$ the Born effective charge tensors, $\hat A_{ij}$ the Born-von K\'arm\'an force constant matrix, and $H_{mag}$ the magnetic interaction Hamiltonian in the absence of displacements and external fields. The Hamiltonian (\ref{Ham}) is generally similar to that of Ref.\ \onlinecite{Yatom}, but we explicitly sort out the coupling of spins to lattice displacements in order to separate the ionic and electronic contributions to the magnetoelectric susceptibility. The magnetoelectric coupling is generated by the last term in (\ref{Ham}), where the sum over $j$, $k$ runs over magnetic sites only. The parameters may be expressed as \begin{equation} \mathbf{g}_{i,jk} = \frac{\partial J_{jk}(\mathbf{u},\mathbf{E})}{\partial\mathbf{u}_i} \; , \quad \mathbf{f}_{jk} = \frac{\partial J_{jk}(\mathbf{u},\mathbf{E})}{\partial\mathbf{E}}\label{coefs} \end{equation} where $J_{jk}$ is the Heisenberg exchange parameter for a pair of spins on sites $j$, $k$. The vectors $\mathbf{f}_{jk}$ describe the variation of the exchange parameters when external electric field is applied while the ions are clamped; this term generates the electronic magnetoelectric response. The vector $\mathbf{g}_{i,jk}$ gives the variation of the exchange parameter for pair $jk$ when the site $i$ is shifted from the reference configuration, or it can conversely be interpreted as the spin-dependent Kanzaki force.\cite{Kanzaki} Translational invariance demands \begin{equation}\label{sum-rule} \sum_i\mathbf{g}_{i,jk}=0. \end{equation} Coupling of atomic displacements to non-Heisenberg (e.\ g.\ biquadratic) spin interaction terms is also possible, but these terms are usually small in wide-gap insulators (this is explicitly checked below for Cr$_2$O$_3$). For systems where spin-orbit interaction has a significant contribution to the magnetoelectric response, the model may be extended by treating the exchange parameters as second-rank tensors. In this case the vectors $\mathbf{g}_{i,jk}$ and $\mathbf{f}_{jk}$ turn into third-rank tensors. Integrating out the spin degrees of freedom and treating the external magnetic field and the last (magnetoelectric) term in (\ref{Ham}) as small perturbations, we obtain an effective Hamiltonian for the lattice degrees of freedom: \begin{align}\label{Heff} H_\mathrm{eff}=-\sum_i\mathbf{u}_i\hat q^*_{i}\mathbf{E} +\frac12\sum_{ij}\mathbf{u}_i\hat A_{ij}\mathbf{u}_j -\frac12\mathbf H\hat\chi^0_m\mathbf{H}\nonumber\\ -\frac\beta2\sum_{jk}\left(\sum_i\mathbf{u}_i\mathbf{g}_{i,jk}+\mathbf{Ef}_{jk}\right)\sum_l\mu_l\left<(\mathbf{e}_j\mathbf{e}_k)\mathbf{e}_l\right>_0\mathbf{H} \end{align} where $\beta=1/kT$, $\hat\chi^0_m$ is the magnetic susceptibility tensor, $\left<\dots\right>_0$ the statistical average taken over the unperturbed state described by $H_{mag}$, and we have dropped the first-order magnetostrictive term generated by $\mathbf{g}_{i,jk}$, which does not affect the magnetoelectric response. Let us denote $\mathbf{L}_{jk}=\frac12\sum_l\mu_l\left<(\mathbf{e}_j\mathbf{e}_k)\mathbf{e}_l\right>_0$ and \begin{equation}\label{gi} \hat g^*_i=\beta\sum_{jk}\mathbf{g}_{i,jk}\otimes\mathbf{L}_{jk}. \end{equation} Referring to (\ref{Heff}), we see that $\hat g^*_{i}\mathbf{H}$ represents the force acting on site $i$ arising in response to the external magnetic field $\mathbf{H}$. Thus, $\hat g^*_i$ is the \emph{effective magnetic monopole charge} of the atom at site $i$. Conversely, $\hat g^*_i$ describes the change in the magnetization arising due to the displacement of site $i$. It depends on temperature and is non-zero for both magnetic and non-magnetic atoms. The sum $\sum_i\hat g^*_i=0$ thanks to (\ref{sum-rule}). The structure of the tensor $\hat g^*_i$ is determined by the corresponding magnetic site symmetry. For example, the symmetry of the Cr sites in Cr$_2$O$_3$ includes the $C_3$ axis, and therefore for these sites only the $zz$ component of $\hat g^*_i$ is non-zero. When a material is both piezoelectric and piezomagnetic, there is a contribution to the magnetoelectric susceptibility mediated by strain. Although this contribution can be derived from the same Hamiltonian (\ref{Heff}), we assume in the following that it is not present, as in the case of Cr$_2$O$_3$. Thereby we can treat $\mathbf{u}_i$ as internal to the unit cell and disregard the possible change of the volume and unit cell shape. Equilibrium displacements are found by minimizing (\ref{Heff}), which leads to \begin{equation}\label{equil} \sum_j\hat A_{ij}\mathbf{u}_j=\hat q^*_i\mathbf{E}+\hat g^*_i\mathbf{H}. \end{equation} Since the displacements $\mathbf{u}_i$ are identical in all unit cells, in Eq.\ (\ref{equil}) we can treat $i$ and $j$ as basis indices within the unit cell and $\hat A_{ij}$ as the Fourier transform of the force constant matrix at $\mathbf{q}=0$. This matrix has three zero eigenvalues corresponding to homogeneous lattice displacements and $3(N-1)$ finite eigenvalues (where $N$ is the number of sites in the unit cell). Homogeneous displacements should be excluded from consideration, and we therefore consider only the subspace of displacements for which $\sum_i\mathbf{u}_i =0$. Within this subspace the action of the symmetric matrix $\hat A$ is represented by its eigendecomposition $A^{\mu\nu}_{ij}\to\sum_n V^n_{i\mu} C_n V^n_{j\nu}$, where $C_n$ are the non-zero eigenvalues and $V^n_{i\mu}$ the normalized eigenvectors. Within this subspace (with homogeneous displacements projected out) the action of the matrix $\hat A$ can be inverted, resulting in $\mathbf{u}_i=\sum_j\hat G_{ij}\mathbf{F}_j$, where $\mathbf{F}_j$ are the forces in the right-hand side of (\ref{equil}) and $G^{\mu\nu}_{ij}=\sum_n V^n_{i\mu} C_n^{-1} V^n_{j\nu}$. The forces automatically satisfy the sum rule $\sum_i\mathbf{F}_i=0$, therefore the matrix $\hat G$ acts within its domain of definition. Substituting the equilibrium displacements in (\ref{Heff}), we find the free energy density: \begin{align}\label{res} \frac{F}{V}=-\frac12\mathbf{E}\hat\chi_e\mathbf{E}-\frac12\mathbf{H}(\hat\chi^0_m+\delta\hat\chi_m)\mathbf{H} -\mathbf{E}\hat\alpha\mathbf{H} \end{align} where \begin{align}\label{suscepte} \hat\chi_e&=\frac1\Omega \hat q^*_i\hat G_{ij}\hat q^*_j\\ \delta\hat\chi_m&=\frac1\Omega \hat g^*_i\hat G_{ij}\hat g^*_j\label{susceptm}\\ \hat\alpha&=\hat\alpha_{ion} + \hat\alpha_{el}\label{susceptme}\\ \hat\alpha_{ion}&=\frac1\Omega \hat q^*_i\hat G_{ij}\hat g^*_j\label{a-ion}\\ \hat\alpha_{el}&=\frac{\beta}{\Omega}\sum_{jk}\mathbf{f}_{jk}\otimes\mathbf{L}_{jk}\label{a-el} \end{align} Here $\Omega$ is the volume of the unit cell, $\hat\chi_e$ is the standard expression for the lattice-mediated dielectric susceptibility in the Born-von K\'arm\'an model, $\delta\hat\chi_m$ is the magnetostructural correction to the magnetic susceptibility, and $\hat\alpha$ is the magnetoelectric tensor, which includes the ionic (first) and the electronic (second) terms. We have dropped the phonon part of the free energy, which does not contribute to the linear susceptibilities in the harmonic approximation. Denoting the dielectric and magnetic monopole polarities of an eigenmode $n$ as $\mathbf{p}_n=\sum_i\hat q^*_i\mathbf{V}^n_i$ and $\mathbf{g}_n=\sum_i\mathbf{V}^n_i\hat g^*_i$, we can rewrite the ionic part of the magnetoelectric tensor as \begin{equation}\label{me} \hat\alpha_{ion}=\frac1\Omega\sum_nC_n^{-1}\mathbf{p}_n\otimes\mathbf{g}_n \end{equation} This expression agrees with that obtained by I\~niquez \cite{Iniguez} at zero temperature (where the monopole charges are controlled by spin-orbit coupling), but the present approach also provides a microscopic definition (\ref{gi}) of the temperature-dependent monopole charge. The microscopic model based on the Hamiltonian (\ref{Ham}) can be generalized to the case of magnetoelectric alloys by adding configuration-dependent Kanzaki forces (both spin-independent and spin-dependent) and force constants. The parameters can be fitted to first-principles calculations of the total energies or forces using several supercells with different configurational and magnetic orderings and lattice displacements. Fitting of the parameters $\mathbf{f}_{jk}$ requires the calculation of the electric polarization at zero lattice displacements. The magnetoelectric response of a random alloy can then be obtained as an explicit configurational average of Eqs.\ (\ref{a-ion})-(\ref{a-el}). The expressions (\ref{susceptme})-(\ref{me}) are valid for any magnetic structures, including multisublattice and noncollinear ones. They can be simplified in the case of a collinear antiferromagnet by noting that $\mathbf{L}_{jk}$ is parallel to the ordering axis and vanishes if sites $j$ and $k$ belong to equivalent antiferromagnetic sublattices with opposite magnetizations. Indeed, in this case the magnetic space group must contain a symmetry operation that interchanges the sites $j$ and $k$. This operation maps an arbitrary site $l$ onto an equivalent site with an opposite magnetization. The sum over $l$ is therefore zero. In the case of a two-sublattice antiferromagnet such as Cr$_2$O$_3$, it means $\mathbf{L}_{jk}\neq0$ only if $j$ and $k$ belong to the same sublattice. Taking these properties into account, we can define a scalar $l_{jk}$ as $\mathbf{L}_{jk}=\frac12\left<\mathbf{e}_j+\mathbf{e}_k\right>_0l_{jk}$, and $l_{jk}$ has the symmetry of the non-magnetic space group. The magnetoelectric response generally depends in a complicated way on all the parameters appearing in the Hamiltonian (\ref{Ham}). A significant simplification can be achieved if it is admissible to neglect intrasublattice spin correlations. This approximation is justified as long as the corresponding exchange parameters $J_n$ satisfy $\beta J_n\ll1$. Intrasublattice interactions in antiferromagnets are usually not dominant, unless there is strong frustration. Therefore, the above inequality is satisfied in most antiferromagnets at temperatures that are not too low compared to $T_N$. In particular, this is the case for Cr$_2$O$_3$ where the shortest intrasublattice spin pair is the fourth-neighbor one. The influence of weak interactions can be safely included on the mean-field level, while short-range interactions can still be treated using more accurate methods such as the thermodynamical cluster approximations. The neglect of correlations between sites $j$ and $k$ in the expression for $\mathbf{L}_{jk}$ leads, through a decoupling of spin averages, to $\beta l_{jk}\approx\chi$, where $\chi$ is the homogeneous longitudinal magnetic susceptibility, \begin{equation} \hat g^*_i \approx \chi m \sum_{jk} \mathbf{g}_{i,jk}\otimes \hat z \eta_{jk} \end{equation} where $m$ is the sublattice magnetization and $\eta_{jk}=+1$ ($\eta_{jk}=-1$) when $j$ and $k$ both belong to the sublattice with $m_z>0$ ($m_z<0$), and $\eta_{jk}=0$ if $j$ and $k$ belong to different sublattices, and finally \begin{equation}\label{gn} \mathbf{g}_n \approx \chi m \frac{\partial\Delta}{\partial u_n}\hat z, \end{equation} which should be substituted in Eq.\ (\ref{me}). In this last formula, $u_n$ is the amplitude of the $n$-th normal mode defined as $\mathbf{u}_i=u_n\mathbf{V}^n_i$, and $\Delta=(J^A_0-J^B_0)/2$, where $J^p_0=\sum_j J_{ij}$ with site $i$ belonging to sublattice $p$. Under the same condition the electronic contribution can be written as \begin{equation}\label{alpha-el} \alpha^{zz}_{el}=\frac1\Omega \chi m \frac{\partial\Delta}{\partial E_z}. \end{equation} Eqs.\ (\ref{gn})-(\ref{alpha-el}) have a structure equivalent to the phenomenological result of Ref.\ \onlinecite{Rado}. Thus, we see that this phenomenological form is appropriate for the exchange-driven magnetoelectric effect under the assumptions specified in the derivation of Eq.\ (\ref{me}). In particular, it does not require that the mean-field approximation is valid, but only that intrasublattice spin correlations are small, which is a weaker assumption. \section{Computational procedure}\label{method} Cr$_2$O$_3$\ is an antiferromagnetic insulator with a N\'eel temperature $T_N$ of 307 K. It has a corundum structure with the rhomboedral unit cell containing four equivalent Cr ions lying on the trigonal axis. The orientations of the Cr magnetic moments alternate along the trigonal axis. The magnetic point group $\bar 3'm'$ allows the magnetoelectric susceptibility tensor, which is diagonal and has two independent components, $\alpha_{\parallel}\equiv\alpha_{zz}$ and $\alpha_{\perp}\equiv\alpha_{xx}=\alpha_{yy}$, where the $z$ axis is aligned with the 3-fold axis. \cite{Borovik} The $\alpha_\parallel$ component, which is dominated by exchange mechanism, is the focus of our study. First-principles calculations were performed using the projector augmented wave method \cite{Blochl} implemented in the Vienna ab initio simulation package (VASP)\cite{Kresse,Kresse2}. The correlations within the Cr 3$d$ shell were described using the rotationally invariant LDA+U method. \cite{Liechtenstein} We set the Hund exchange parameter $J=0.58$ eV as obtained from the constrained occupation calculation, \cite{Shi} and studied the results as a function of the Hubbard $U$ parameter. The energy cutoff for the plane wave expansion was set to 520 eV, and a $\Gamma$-centered Monkhorst-Pack $k$-point grid \cite{Monkhorst} was used for the Brillouin zone integration. Relaxations, phonon and Berry phase calculations were performed for the rhombohedral unit cell using 0.02 eV Gaussian smearing and a $8\times8\times8$ $k$-point mesh. The Hellmann-Feynman forces were converged to 0.005 eV/\AA. The exchange parameters were obtained using the hexagonal supercell and the tetrahedron method with the $4\times4\times2$ $k$-point mesh. The $\mathbf{q}=0$ component of the force constant matrix is evaluated using the standard technique, and its non-uniform eigenvalues and eigenvectors are found. The Born effective charges are also calculated and used to evaluate the polarities $\mathbf{p}_n$ of the eigenmodes of the force constant matrix. Only the polar modes with nonzero $p_{nz}$ need to be considered. The magnetic monopole charges of the normal modes in Eq.\ (\ref{gn}) contain two factors: $\chi m$, which is determined by $H_{mag}$, and $\partial_n\Delta=\partial\Delta/\partial u_n$. The unperturbed magnetic Hamiltonian $H_{mag}$ is assumed to have a Heisenberg form, and the exchange parameters are obtained by fitting the calculated total energies of a number of magnetic configurations. \cite{Shi} The factors $\Delta_n$ are found by calculating $\Delta=(E_A-E_B)/4$, where $E_A$ and $E_B$ are the energies required to reverse one spin on sublattice A or B in the magnetic ground state. This is done using a 30-atom supercell with atomic displacements proportional to $\mathbf{V}^n_i$. Then $\Delta_n$ is found by numerical differentiation with respect to $u_n$. This approach can be viewed as a converse of that used by Mostovoy \emph{et al.} The quantity $\partial\Delta/\partial E_z$ needed for the evaluation of the electronic contribution (\ref{alpha-el}) is calculated at zero atomic displacements in the presence of external electric field \cite{Souza} with a subsequent numerical differentiation. \section{Results}\label{results} There are two displacement modes with non-zero polarities $p_{nz}$, both transforming according to the $A_{2u}$ irreducible representation of the $\bar 3m$ point group. We denote these modes as LO$_1$ and LO$_2$. The normalized eigenvectors are listed in Appendix A, which also includes the phonon frequencies. The stiffnesses $C_n$ and dielectric polarities $p_{nz}$ of these modes are listed in Table \ref{number}, along with the values of the derivative $\partial_n\Delta$ which enters the expression (\ref{gn}) for the magnetic polarity. The stiffer LO$_1$ mode has a much larger dielectric polarity, which results in larger displacements compared to the softer LO$_2$ mode. The values of $\partial_n\Delta$ for the two modes are similar. Thus, overall the LO$_1$ mode gives a much larger contribution to $\alpha_\parallel$, which is seen from its larger value of $\partial\Delta/\partial E$. The electronic contribution to $\partial\Delta/\partial E$ is also listed in Table \ref{number}. It is comparable in magnitude to the lattice-mediated contribution, but the sign is opposite. We can understand this sign difference by noting that the electric field tends to perturb the electronic charge density in a way that partially compensates the displacement of the positively charged Cr ions. A significant magnetoelectric response was observed in optical measurements, \cite{Pisarev,Krichevtsov1,Krichevtsov2} but the sign of this response was not determined. \begin{table}[htb] \caption{Stiffnesses $C_n$, dielectric polarities $p_n$, and exchange perturbations $\Delta/E$ (see text) of the two polar displacement modes contributing to $\alpha_\parallel$ in Cr$_2$O$_3$, calculated at $U=4$ eV.} \begin{tabular}{|c|c|c|c|c|} \hline Mode & $C_n$ (eV/\AA$^2$) & $p_{nz}$ ($e$) & $\partial_n\Delta$ (meV) & $\partial\Delta/\partial E$ (10$^{-3}e\cdot$\AA) \\ \hline LO$_1$ & 29.1 & 8.42 & 65.2 & 19.2 \\ LO$_2$ & 11.2 & 0.88 & 52.5 & 4.2 \\ Electronic & --- & --- & --- & $-9.0$ \\ \hline \end{tabular} \label{number} \end{table} For the magnetic thermodynamics, which determines the factor $\chi m$ in (\ref{gn}) and (\ref{alpha-el}), we use the quantum pair cluster approximation \cite{Vaks} for $S=3/2$ and compare its predictions with quantum ($S=3/2$) and classical mean-field approximations. Since the corundum lattice is low-coordinated, we expect that the pair cluster approximation can provide a notable improvement compared to MFA due to the inclusion of short-range order effects, but at very low temperatures it breaks down by developing unphysical features. \cite{Vaks} The application of the pair cluster approximation is similar to Ref.\ \onlinecite{Shi}, with the exception that here we only treat the nearest and next-nearest neighbors within the pair-cluster approximation, while more distant pairs are included on the mean-field level. This is consistent with the neglect of the intrasublattice correlations in (\ref{gn}) and (\ref{alpha-el}) and is justified by the small magnitude of the exchange parameters beyond the second coordination sphere. The temperature dependence of $\alpha_\parallel$ obtained using different statistical approximations is shown in Fig.\ \ref{methods} with the temperature given in reduced units. We see that although there are considerable variations in the shape of the curve, the maximum value of $\alpha_\parallel$ is rather similar in all three cases. \begin{figure}[htb] \includegraphics*[width=0.45\textwidth]{Fig1.eps} \caption{Magnetoelectric susceptibility $\alpha_{\parallel}$ calculated using different statistical approximations: pair-cluster (PC), quantum mean-field (QMFA), and classical mean-field (CMFA).} \label{methods} \end{figure} The longitudinal coefficient $\alpha_{\parallel}$ undergoes a sign change at a temperature of about 100 K, \cite{Astrov2} the origin of which remains unclear. Calculations of Ref.\ \onlinecite{Iniguez} gave a negligible value of the spin contribution to $\alpha_{\parallel}$ at zero temperature. It was further found \cite{Malashevich} that the orbital contribution dominates over the spin magnetism and has the right sign, but its magnitude is still too small compared to experiment. As is clear from Eq.\ (\ref{gn}) and (\ref{alpha-el}), the spin contribution does not change sign if the magnetic interaction is of a purely Heisenberg type. However, non-Heisenberg contributions could, in principle, make the \emph{effective} parameter $\Delta$ depend on the order parameter and thereby on temperature. A typical term in the Hamiltonian capable of inducing such an effect is $K_{12,13}(\mathbf{S}_1\mathbf{S}_2)(\mathbf{S}_1\mathbf{S}_3)$, where 2 is a nearest and 3 a next-nearest neighbor of site 1. Although very likely small compared to $J_1$ and $J_2$ exchange parameters, $K_{12,13}$ could be comparable to the relatively small $J_4$ which largely determines $\Delta$. A sign change of the effective $\Delta$ would manifest itself as a sign change of the magnetoelectric coefficient. However, $K_{12,13}$ and other such terms do not contribute to $\Delta$ calculated, as we did, using collinear spin configurations. To test whether non-Heisenberg terms like $K_{12,13}$ are appreciable in Cr$_2$O$_3$, we calculated the total energies $E_A(\theta)$ and $E_B(\theta)$ of a supercell with one Cr spin continuously rotated by an angle $\theta$ from 0 to $\pi$ on either of the two sublattices with ionic displacements induced by electric field. These calculations were performed using the self-consistently determined constraining fields (as implemented in VASP) but are otherwise similar to the evaluation of $\Delta$ (at $\theta=0$ and $\theta=\pi$ they are equivalent). The difference $\Delta(\theta)=E_A(\theta)-E_B(\theta)$ is plotted in Fig.\ \ref{angular}. It is seen that $\Delta(\theta)$ fits very well to a simple cosine. This indicates that the effect of non-Heisenberg interaction on $\Delta$ is negligible, and that the origin of the sign change in $\alpha_\parallel$ should be sought elsewhere. \begin{figure}[htb] \includegraphics*[width=0.45\textwidth]{Fig2.eps} \caption{Angular dependence of the parameter $\Delta=E_A-E_B$. The solid line shows the cosine fit to the data points (discs).} \label{angular} \end{figure} The maximum value of $\alpha_\parallel$ ($\alpha_{max}$) obtained in the pair-cluster approximation and the temperature $T_{max}$ at which this maximum is reached are shown in Fig.\ \ref{Udep} as a function of the Hubbard $U$ parameter used in the calculation. We see that $\alpha$ increases by a factor of 2 when $U$ is increased from 3 to 4 eV. In order to understand the origin of this strong dependence, we first examine the $\partial\Delta/\partial E$ factors for the lattice-mediated and electronic contributions, which are shown in Fig.\ \ref{delta-U}. We see that $\partial\Delta/\partial E$ decreases as a function of $U$ for both lattice-mediated and electronic mechanisms. However, the reduction of the electronic term is faster, so the total value increases, albeit rather slowly. Thus, the overall strong dependence of $\alpha_{max}$ on $U$ is almost entirely due to the enhancement of the magnetic susceptibility. \begin{figure}[htb] \includegraphics*[width=0.45\textwidth]{Fig3.eps} \caption{$\alpha_{max}$ and $T_{max}$ calculated within the pair-cluster approximation as a function of the Hubbard $U$ parameter. The grey dashed lines show the corresponding experimental values.} \label{Udep} \end{figure} \begin{figure}[htb] \includegraphics[width=0.45\textwidth]{Fig4.eps} \caption{Parameters $\partial\Delta/\partial E$ for lattice-mediated and electronic contributions as a function of the Hubbard $U$ parameter.} \label{delta-U} \end{figure} While the peak temperature $T_{max}$ calculated in the pair cluster approximation agrees with experiment for $U=4$ eV, which also provides optimal description of the electronic and structural properties, \cite{Shi} the peak magnetoelectric susceptibility $\alpha_{max}=2.35\times10^{-4}$ g.\ u.\ is strongly overestimated compared to the experimetal data, which appear to converge to the value of about $1.0\times10^{-4}$ g.\ u. \cite{Astrov,Folen,Astrov2,Kita,Kita2,Rivera,Wiegelmann,Wiegelmann2,Borisov} It is also larger than the value obtained by Mostovoy \emph{et al.} who used a smaller value of $U$, treated the spins classically, and used a different method of extracting the magnetoelectric coupling constant. (If we use the same parameters and approximations, the results of Ref.\ \onlinecite{Mostovoy} are reproduced.) The main reason for the disagreement in $\alpha_{max}$ with experiment is in the overestimation of the magnetic susceptibility. The experimental value near the N\'eel temperature is $\chi(T_N)\approx 25$ emu/g, \cite{Foner} whereas the pair-cluster approximation yields 42 emu/g. If we use the experimental susceptibility instead of the calculated one, $\alpha_{max}$ comes out at about $1.4 \times10^{-4}$ g.\ u., which is much closer to the experimental values. The reason for the overestimation of $\chi$ can be qualitatively understood using the Curie-Weiss expression $\chi=C/(T+\Theta)$, where $C$ is the Curie constant and $\Theta$ the Curie-Weiss temperature. For $S=3/2$, using the experimental value 25 emu/g for $\chi(T_N)$, we obtain $\Theta\approx450$ K. Similar values of $\Theta$ were found from the high-temperature susceptibility measurements \cite{Foex} ($550\pm50$ K) and from the exchange parameters obtained by fitting the inelastic neutron scattering data for magnon dispersions \cite{Samuelsen} ($527\pm76$ K). We can write $T_N\sim a \xi J_s$ and $\Theta\sim a |J_0|$, where $a$ is a common coefficient, $J_s=\sum_j\mathbf{e}_i\mathbf{e}_jJ_{ij}$, $J_0=\sum_j J_{ij}$, and $\xi$ is a suppression factor showing how much $T_N$ is suppressed by fluctuations compared to its mean-field value. In the pair-cluster approximation $\xi\approx0.8$. Thus, based on the experimental data we can conclude that $|J_0|$ is slightly greater than $J_s$. This implies that the intrasublattice exchange interaction is small compared to the intersublattice one. The results of our calculations contradict this picture, giving $|J_0|/J_s\approx0.42$ due to a fairly large value of $J_4\approx-0.2J_1$ at $U=4$ eV. We have verified the fidelity of our fit of the exchange parameters to the calculated total energies by increasing the number of input configurations to 42. The results are listed in Table \ref{Jij}, which shows that the fit is quite stable. Particularly, the take-one-out cross-validation (CV) score for this five-parameter fit is 0.7 meV. If $J_4$ is not included in the fit, a much larger CV score is obtained. If an additional parameter $J_6$ is included, its value comes out an order of magnitude smaller than $J_4$. These results suggest that $J_4$ is too large due to the inaccuracies of the electronic structure in the LDA$+U$ method. It is known that LDA systematically underestimates the binding energy of the oxygen $2p$ states in oxide insulators. In Cr$_2$O$_3$\ this leads to an overestimated hybridization with the Cr $3d$ states, which tends to increase with increasing $U$ due to the downward shift of the filled $3d$ orbitals. Therefore, since $J_4$ is expected to be dominated by superexchange, its overestimation in LDA$+U$ is quite natural. \begin{table}[htb] \caption{Exchange parameters $J_n$ (meV) obtained by fitting to total energies calculated at $U=4$ eV. A long dash indicates that the corresponding $J_n$ is not included in the fitting. The cross-validation score (CV, meV) for each fit is also provided.} \begin{tabular}{|c|c|c|c|c|c|c|} \hline $J_1$ & $J_2$ & $J_3$ & $J_4$ & $J_5$ &$J_6$ & CV \\ \hline 14.64 & 11.12 & -2.11 & -2.98 & 2.12 & --- & 0.7 \\ 19.57 & 14.18 & -1.94 & --- & 2.24 & --- & 6.9 \\ 14.64 & 11.12 & -2.12 & -2.98 & 2.12 & 0.12 & 0.1 \\ \hline \end{tabular} \label{Jij} \end{table} \section{Conclusions}\label{conclusions} We formulated a microscopic model of the temperature-dependent exchange-driven magnetoelectric susceptibility $\hat\alpha$ which includes the coupling of scalar spin products to atomic displacements and to the electric field. The parameters of the model can generally be obtained using first-principles calculations, and it can be extended to magnetoelectric alloys, which may help in the search for new materials with better magnetoelectric properties. If the intrasublattice spin correlations can be neglected (which is a good approximation for Cr$_2$O$_3$), then $\alpha_\parallel$ can be expressed as a product of the magnetic susceptibility, sublattice magnetization, and a factor that does not depend on temperature, as long as the elastic properties of the lattice do not depend on it. This relation was suggested phenomenologically by Rado. \cite{Rado} If, further, the intrasublattice interactions beyond the fourth coordination sphere in Cr$_2$O$_3$\ are negligibly small, our approach essentially becomes the converse of that of Mostovoy \emph{et al.}\cite{Mostovoy} Lattice-mediated and electronic contributions to $\alpha_\parallel$ have been sorted out, and the former was decomposed in the sum of contributions from the two normal displacement modes. The electronic contribution to $\alpha_\parallel$ in Cr$_2$O$_3$\ is comparable to the lattice-mediated contribution and has an opposite sign. Quantum pair cluster and mean-field approximations for spin $3/2$, as well as the classical mean-field approximations result in similar peak values of the magnetoelectric susceptibility. The latter, however, is a quickly increasing function of the Hubbard $U$ parameter, mainly thanks to the increasing magnetic susceptibility $\chi$. If $\chi$ is taken from experiment, we find $\alpha_\parallel$ in good agreement with experiment. However, the calculation at $U=4$ eV, which results in a good agreement with experiment for many other properties, overestimates $\chi$ by a factor of 1.7, which in turn is due to the relatively large value of the $J_4$ parameter. Finally, it was found that non-Heisenberg exchange in Cr$_2$O$_3$\ is negligibly small and can not account for the sign change of $\alpha_\parallel$ observed at low temperatures. \section{Acknowledgments } This work was supported by the NSF/SRC Supplement to the Nebraska MRSEC (DMR-0820521), the Center for NanoFerroic Devices (CNFD) and the Nanoelectronics Research Initiative (NRI). Computations were performed utilizing the Holland Computing Center at the University of Nebraska.
1,108,101,565,908
arxiv
\section{Introduction} \label{sec:intro} Until fairly recently, SGRBs were known predominantly as bursts of $\gamma$-rays, and largely devoid of observable traces at lower energies. The launch and successful operation of the {\it Swift} satellite has now enabled the detection and localization of X-ray afterglows from several events, enabling in turn the study of their properties at optical and radio wavelengths and the identification of the host galaxies at cosmological distances \citep{berger05,fox05,barthelmy05,hjorth05,gehrels05,bloom06,prochaska06}. The occurrence of a fraction of events among old stellar populations e.g., of an elliptical galaxy for GRB 050724, rules out a source uniquely associated with recent star formation. In addition, no bright supernova is observed to accompany SGRBs, contrary to what is seen in most nearby long-duration GRBs. It is clear by now that short and long events are not drawn from the same parent stellar population, and that SGRBs are far from standard \citep{nakarrev,gehrels09}. This hints at the underlying possibility that the progenitor itself may be quite different from burst to burst, and not entirely restricted to the most widely favoured scenario involving the merger of close binaries \citep[e.g.][]{paczynski86,eichler89} containing neutron stars (NS) and/or black holes (BH). Since the first evidence from the {\it Uhuru} and OSO-7 satellites revealed a population of highly luminous low-mass X-ray binaries (LMXBs) in globular clusters (GCs), it has been noted that the formation rate per unit mass of these objects is orders of magnitude higher in GCs than in the Galactic disk \citep{katz75,clark75}. This discovery stimulated a flurry of theoretical work into the formation of GCs LMXBs by the processes of two- and three-body encounters\citep{fabian75,heggie75,hills75}. These dynamical formation scenarios are a natural explanation for the high occurrence of LMXBs in GCs since the stellar densities, and hence encounter rates, are much higher in the cores of GCs than other regions of the Galaxy \citep{pooley03,heinke06}. In such environments, in fact, it is unavoidable that many stars undergo close encounters and even physical collisions, with high probability, within their lifetimes \citep[see e.g.,][]{rosswog09,raskin09}. It is the interplay between compact stars in such dense environments and their ability to trigger SGRBs that forms the main topic of this paper. In general, forming a compact binary system requires some mechanism to dispose of enough energy to effectively bind them. Two such processes have been considered for conditions in globular clusters \citep{hut83}. The first is the presence of an additional star which can carry away some energy in kinetic form. This has been considered by \citet{grindlay06} as a plausible SGRB channel by computing the outcomes of exchange interactions between binaries containing one compact object and a single neutron star in a collapsed GC core. The second is the loss of energy to internal stellar oscillations, excited by the tidal forces of one star on the other. Here we suggest an alternative and perhaps less restrictive mechanism for SGRB production related to the second mechanism given above, namely, the tidal capture and collision of compact objects in dense stellar environments. In this new scenario, the compact objects are contained within a globular cluster, and interact directly through close encounters rather than being driven together by pure gravitational wave emission in existing close binaries. Event rates for such interactions within galaxies have been found to be much too low to be of interest \citep{janka96b} when compared with the GRB event rate, but they may be frequent enough to have an important effect on their production in GCs\footnote{\citet{hansen98} have proposed collisions of compact objects with main sequence stars as possibly relevant for the production of GRBs in GCs, although it is not clear how the large baryon loading problem could be circumvented in this case}. In this paper we make a careful assessment of the various dynamical evolutionary pathways involving compact objects in such environments, focusing particularly on the frequency and physical character of the tidal capture of two compact objects that are ultimately capable of leading to a merger and thus powering a gamma-ray burst. We find that they can provide a substantial contribution to the total rate, and compute the evolution of the associated rates with redshift. Some pressing questions include: When would two passing relativistic stars capture each other in a bound close orbit? When two neutron stars collide, does the rapidly-spinning merged system have too much mass (for most presumed equations of state) to form a single stable object? If so, the expected outcome after a few milliseconds would therefore be a spinning BH, orbited by a torus of neutron-density matter. When a NS collides with a BH, does enough mass remain in the orbiting debris to catalyze the extraction of energy from the hole at a rate adequate to power a short-lived GRB? How do the long tidal tails thrown out through the outer Lagrange point affect the accretion stream around the primary star? Even if the evolution time scale for the bulk of the debris torus were no more than a second, is enough mass and energy still available to power the late time flares? What is the relative frequency and observable signatures of all these collision events and how do they compare to {\it Swift} observations? The structure of this paper is as follows: detailed hydrodynamic simulations of encounters of compact objects of various types and with varying impact parameters are presented in Section~\ref{sec:dynamics} together with a detailed description of the numerical methods and the initial models; the resulting gravitational wave signals are shown in Section~\ref{sec:gwaves}; Section~\ref{sec:rates} offers an estimate of the encounter rate as function of cosmic time, and compares it with the merger rate of compact binaries. We discuss our findings in Section~\ref{sec:disc}, and we summarize and conclude in Section~\ref{sec:ccl}. \section{Dynamics of tidal capture and disruption}\label{sec:dynamics} \subsection{Numerical implementation}\label{sec:numerics} The hydrodynamical calculations shown subsequently have been performed with the three dimensional Smooth Particle Hydrodynamics (SPH) code previously used to study merging double neutron star and black hole--neutron star binaries \citep{monaghan92,lee99a}. The problem does not allow for simplifications due to symmetry and SPH, being a Lagrangian scheme lends itself particularly well to this kind of situation. The tidal tails seen in binary mergers [see e.g. \cite{rasio94,lee01,rosswog03}] are a natural outcome in the present scenario as well, and following their formation and evolution is one of the main objectives here, which is not possible with grid--based codes. Self--gravitating spherical stars of a given mass, $M$, and radius, $R$, in hydrostatic equilibrium with a polytropic pressure--density relation $P=K\rho^{\Gamma}$, where $K$ and $\Gamma$ are constants, are constructed with $N\simeq 10^5$ fluid elements (SPH ``particles'') and used for the dynamical evolution. We have performed test simulations with varying spatial resolution to test for convergence, using from $N\simeq 10^4$ to $N\simeq 2 \times 10^5$ SPH particles. We are here mainly interested in the dynamics of the disruption process for a limited number of dynamical times, and have found that $N\simeq 10^5$ particles are sufficient for this purpose. We thus report all simulations at this initial uniform resolution level. Our calculations are Newtonian, so we cannot model a true black hole. We merely approximate one as a point mass $M_{\rm BH}$, any matter approaching within a Schwarzschild radius $r_{\rm Sch}=2GM_{\rm BH}/c^{2}$ being accreted (and its mass added to that of the hole). Clearly this is a problem that ideally should be treated in full General Relativity, but a first understanding can be gained with the use of this approach. We note that previous work on coalescing black hole-neutron star binaries \citep{lrrg05,rosswog05} has made use of the pseudo-Newtonian potential of \citet{pw80} for the black hole, which reproduces the existence and position of a last stable orbit for test particles in circular orbits around a Schwarszchild black hole. Doing this thus requires considering fairly small mass ratios, a condition which is not met in the present set of calculations, with the additional complication of highly eccentric orbits for the parabolic encounters of interest. We have hence elected to keep to a purely Newtonian formulation which, albeit simplistic, is quantifiably so. For neutron stars, most equations of state reveal that the radius varies little over a range of masses. In the case of polytropes, the mass--radius relation is $R \propto M^{(\Gamma-2)/(3\Gamma-4)}$, so if the adiabatic index is $\Gamma=2$, the radius is only a function of the structure constant $K$ and the central density, $\rho_{\rm c}$. Our standard ``neutron star'' is thus a spherical, non--spinning polytrope with $\Gamma=2$, $M_{\rm NS}=1.4 M_{\odot}$ and $R_{\rm NS}=13.4$~km. In order to investigate the effects of a different compressibility, we have also considered a neutron star with index $\Gamma=5/3$ of the same mass and radius for certain orbital parameters, detailed below. We have also considered the case of a low--mass white dwarf ($M_{\rm WD}=0.5M_{\odot}$) interacting with a black hole. In this case it is appropriate to use the equation of state for a cold non--relativistic Fermi gas, $P=K_{\rm n.r.} \rho^{5/3}$, giving a radius $R_{\rm WD}=1.1 \times 10^4$~km. Three-dimensional calculations of binary interactions are typically evolved for only a few tens of milliseconds, and during this short time large scale gravitational dynamics determine the final state of the system. Once the initial conditions are set, we use a simple ideal gas equation of state, where the pressure is given by $P=\rho u (\Gamma-1)$ and $u$ is the specific internal energy, to follow the thermodynamics of the gas, with no heating or cooling mechanisms present. Shocks are allowed to form in the usual numerical way (through an artificial viscosity), and are the only source of local dissipation. The single additional ingredient that is necessary in terms of global dissipation is the emission of gravitational waves, since it can (and does) affect the orbital evolution. For binary mergers, the point--mass approximation in the weak field limit is often used to compute an effective drag, removing angular momentum and energy from the system. It is switched off once the stars come into contact, or if one of them is tidally disrupted. However, for the present set of calculations on various orbits it is not a good approximation. Rather, we require an expression for the energy loss rate of extended bodies, which can be used for more general trajectories (for example initially parabolic). We thus compute the rate of energy loss as \begin{equation} \frac{dE_{\rm GW}}{dt}=\frac{1}{5} \frac{G}{c^{5}} \left[ \frac{d \ddot{I}_{jk}}{dt} \frac{d \ddot{I}_{jk}}{dt} \right], \end{equation} where $I_{jk}$ is the traceless mass quadrupole moment. The first two time derivatives can be easily calculated without having to perform two numerical time derivatives explicitly with the use of the continuity and Euler equations \citep{rasio92,lee99a}. The third derivative requires numerical treatment, and numerical noise in its calculation can be reduced by interpolation over neighboring points in the evolution. We have tested this implementation by computing the evolution of binaries with large separations, where the orbital decay time scale is much longer than the orbital period, and find excellent agreement with the point-mass formula for circular orbits. It is important to consider this more accurate treatment for parabolic encounters, since the energy loss rate at periastron can be substantially larger (by a factor of 2-3, depending on the impact parameter) than that estimated from the expression for point masses in circular orbits, leading to different, and overall, faster encounters. In addition, the effect on the loss rate because of the formation of tidal bulges on the stars is automatically taken into account, since it is the full fluid quadrupole moment that is used in computing the loss rate. \subsection{Conditions for stellar encounters}\label{sec:ICs} The stellar velocity dispersion in GCs is insignificant when compared to that acquired from the mutual gravitational acceleration of two compact, stellar mass objects as they approach each other. It is thus reasonable to assume at first that encounters will be parabolic, i.e., with vanishing orbital energy, $E$, at infinity. However the impact parameter, $b$, may vary, and is related to the total orbital angular momentum, $L$, in the two--body system. When the separation between the two masses, $a$, is large compared to their individual radii, $R_{\rm i}$, we may safely consider that they behave as point particles and compute the orbit accordingly. It is only when these quantities become comparable that finite--size effects need to be considered, and a full hydrodynamic calculation must be performed. It has been shown for the case of compact binary {\em mergers} that tidal effects can de-stabilize the orbit even in the absence of relativistic considerations at small enough separations \citep{lai93a,lai93b,rasio94}. The orbital decay rate thus induced is comparable to that due to gravitational radiation back reaction, and can even dominate the evolution for the last few cycles. The equation of state plays a role in the magnitude and growth of this effect, and for neutron stars it is almost certainly generically important. We have thus elected to study collisions by placing the stars (whether they are black holes, neutron stars or white dwarfs) initially at separations comparable to those used in merger calculations. This choice is always a trade-off, since using a larger separation will always be more accurate, at the cost of added computational effort (during which the components approach each other with little visible evolution). The point worth noting here is that as a result of tidal effects, a bulge will form on each star as they approach, but will slowly fall behind the line joining the centers. At periastron, a significant lag angle may have developed, and this can have an effect on the subsequent evolution of the material, through the action of gravitational torques by the more massive primary on the lighter, bar--like secondary. For the present set of calculations we have chosen to use spherical, non-perturbed stars for initial conditions. A solution in terms of compressible tri--axial ellipsoids is possible for the previous evolution \citep{lai93b}, but given our overall simplifications we believe this will not affect our results significantly. At the chosen initial separations, the induced tidal bulges and lag angles are still quite small\footnote{A detailed comparison of the differences between the use of spherical stars as opposed to tri--axial ellipsoids was carried out in the case of mergers \citep{lee00,lee01}. Overall, results differed by less than $\approx 5$\% for the equation of state used here with $\Gamma=2$.}. A convenient way to parameterize the encounters is by comparing the strength of the tidal field produced by the primary to the self gravity of the secondary, through \begin{equation} \eta=\left( \frac{M_{2}}{M_{1}} \frac{R_{\rm p}^3}{R_{2}^3} \right)^{1/2}, \end{equation} where $M_1$ and $M_2$ are the primary and secondary mass, $R_{\rm p}$ is the periastron distance, and $R_2$ is the radius of the secondary. With this definition, at a fixed mass ratio $q=M_2/M_1 \leq 1$, encounters with $\eta \gg 1$ have large impact parameters, while $\eta \simeq 1$ marks collisions where the secondary is tidally disrupted because the tidal field has become as intense as that holding the star together. An important difference between encounters with components of similar mass and those in which $q \ll 1$, as for the disruption of stars by supermassive black holes in AGN \citep{frank78,lacy82,carter83,rees88}, is that the periastron distance is much smaller in the former, and in fact comparable to the stellar radius. For a given intensity of the encounter, the periastron distance can be written as \begin{equation} R_{\rm p}=R_2 \eta^{2/3} q^{-1/3}. \end{equation} Thus for a disruptive encounter with $\eta \simeq 1$, $R_{\rm p} \simeq 200 R_2$ if $q=10^{-7}$, but $R_{\rm p} \simeq 2 R_2$ for $q\simeq 1/3$. In the first case the disruption is a purely gravitational encounter, while in the second it is additionally a direct collision, which may modify the mass ratio substantially {\em during} the encounter. The parameter $\eta$ can also be thought of as the ratio between the dynamical time scale of the star (or its rotational break up period) and the duration of the encounter. Thus for the intrinsic spin of the secondary to be of any relevance, it must be rotating near the Keplerian limit. The most rapidly spinning neutron stars detected in LMXBs in our galaxy \citep{backer82} and in GCs \citep{hessels06} are clearly below this threshold, by a factor 3-5, depending on the assumed equation of state. It is thus reasonable to assume that in most cases the neutron star spin is negligible for the purposes of the encounters considered here, and we do so in what follows. \subsection{Tidal capture and the formation of close binaries} When two compact stars pass close to each other so that $\eta \simeq 1$, the tidal perturbations produce accelerations of the stellar material relative to the stellar center. The internal energy gained by the stars is taken from the kinetic energy of their relative motion. While the internal energy $\Delta E_{\rm T}$ gained by each component is a small fraction of the star's internal energy, it can be comparable with the kinetic energy of relative motion of the two bodies at large separation. The two stars will thus become bound if enough energy is absorbed in exciting the oscillations. The resulting elliptical orbit immediatey after capture will have an initial eccentricity only slightly less than unity. However, there will be many successive passages at about the same periastron distance, and these will lead to further energy transfer. Ultimately the orbit should become nearly circular. The value of $\Delta E_{\rm T}$ and thus the resultant condition for tidal capture may be determined by requiring that the frequency of the perturbation be slightly smaller than the natural frequencies of the perturbed system. The tidal field excites non-radial modes of oscillation in the secondary and, while there are many of these, those most effectively excited have the smallest number of nodes, with an angular frequency $\omega_{\rm osc}$ that is usually some two to three times $\omega_{2} =(GM_2/R_2^3)^{1/2}$. Now, the angular frequency of the encounter $\omega_{\rm p}$ is effectively $V_{\rm p}/R_{\rm p}$ and can be written as $\omega_{\rm p}\approx 2\omega_{2} (R_2/R_{\rm p})^{3/2}$. Thus for $R_{\rm p}/R_2=2$, $\omega_{\rm p}/\omega_{\rm osc} \approx 0.3$ for the lowest modes. However, for $R_{\rm p}/R_2=10$, $\omega_{\rm p}/\omega_{\rm osc}\approx 0.03$. For such slow changes, the shape of each star will adjust to the changing form of the equipotential surfaces and the net heating is markedly reduced. The binaries formed by tidal capture are thus generally very hard. Since the fraction of the initial angular momentum which is transferred to stellar rotation can scarcely exceed a few percent because of the relatively small stellar radius of gyration, one can assume that the orbital angular momentum remains roughly constant. Hence in a circular orbit of radius $R_{\rm c}$ and relative velocity $V_{\rm c}$, $R_{\rm c}V_{\rm c}$ must equal the initial $R_{\rm p}V_{\rm p}$. With $V_{\rm p}$ obtained by the condition that $(1/2)\mu_{\rm r}V^2_{\rm p}=GM_2^2/R_{\rm p}$, where $\mu_r=M_1 M_2/(M_1+M_2)$ and $V_{\rm c}$ computed from force balance in a circular orbit, one finds $R_{\rm c} = 2R_{\rm p}$. To obtain more exact results for the resulting conditions for tidal capture, the excitation of the individual normal modes must be considered, and the heating summed over all modes. Analysis of this effect by \citet{press77} has taken the parabolic motion of the two stars accurately into account, but in the tidal potential only terms varying as $1/r^2$ and $1/r^3$ have been considered, an approximation valid for $R_{\rm p}/R_2 \geq 3$. Detailed calculations were first carried out using linear theory for $\Gamma=4/3$ \citep{press77, lee86} and for $\Gamma=5/3$ \citep{lee86}. They were followed by many other studies using both linear theory \citep{mcmillan87, koch92} and numerical hydrodynamical calculations \citep{rass91}. Such treatments give $R_{\rm p}/ R_2$ between 2 and 3 as a condition for tidal capture (the exact value depending on the properties of the stellar structure model used). Thus an approximate condition for tidal capture may be given as $R_{\rm p} \leq 3 R_2$ irrespective of the precise details since for $R_{\rm p}/ R_2 \leq 3$, $\omega_{\rm p} \sim \omega_{\rm osc}$. \begin{figure} \includegraphics[width=\columnwidth,angle=-90,scale=1.]{fig1.ps} \caption{The encounter of a neutron star with a black hole at relatively large impact parameter (run L$_3$) leads to a large number of periastron passages before tidal disruption. Here we show the separation (in km) between the neutron star and the black hole from the start of the simulation until full disruption of the star, taking about one quarter of a second.} \label{fig:BHNSL3} \end{figure} For compact binaries, angular momentum losses to gravitational waves, as well as dynamical mass ejection from the system must additionally be considered. The results for a black hole-neutron star encounter with mass ratio $q=0.31$, $\eta=3$ (corresponding to $R_{\rm p}/ R_2 = 3.1$), where the neutron star is modeled as a polytrope with $\Gamma=2$ (run L$_3$ in Table~\ref{tab:ICs}) are shown in Figure~\ref{fig:BHNSL3}. As expected, the strong tides trigger complex oscillations in the secondary star which can be clearly seen in the variation of the maximum density in the core after the first periastron passage. Further energy transfer occurs in subsequent passages as the orbit becomes tighter and progressively circular, until eventually some direct mass transfer takes place along with mass stripping. We find that the secondary is not shredded immediately, but only after more than a dozen passages at about the same periastron distance. Our condition for for tidal capture is in rough agreement with $R_{\rm p}/ R_2 \leq 3$ although the inclusion of gravitational waves as well as matter ejection from the system allows for a more rapid variation of angular momentum disposal. For $\eta \simeq 1$, the secondary star will experience a direct physical collision at least of its outer layers. The process is rather complex with some of the gas escaping entirely from the system on outbound trajectories. The core may preserve its integrity for a few orbital periods around the primary in a {\it common envelope} before finally coalescing. It is to this problem that we now turn our attention. \begin{deluxetable}{lcccccccc} \tablecaption{Parameters for orbital encounters.\label{tab:ICs}} \tablewidth{0pt} \tablehead{\colhead{Run} & \colhead{Prim.} & \colhead{Sec.} & \colhead{$\Gamma$} & \colhead{$\frac{M_1}{M_{\odot}}$} & \colhead{$\frac{M_2}{M_{\odot}}$} & \colhead{$q=\frac{M_2}{M_1}$} & \colhead{$\eta$} & \colhead{$\frac{R_{\rm p}}{R_{2}}$} } \startdata L$_0$ & BH & NS & 2 & 4.51 & 1.4 & 0.31 & 1.0 & 1.5 \\ L$_0\Gamma_{5/3}$ & BH & NS & 5/3 & 4.51 & 1.4 & 0.31 & 1.0 & 1.5 \\ L$_1$ & BH & NS & 2 & 4.51 & 1.4 & 0.31 & 1.5 & 1.9 \\ L$_2$ & BH & NS & 2 & 4.51 & 1.4 & 0.31 & 2.0 & 2.3 \\ L$_2\Gamma_{5/3}$ & BH & NS & 5/3 & 4.51 & 1.4 & 0.31 & 2.0 & 2.3 \\ L$_3$ & BH & NS & 2 & 4.51 & 1.4 & 0.31 & 3.0 & 3.1 \\ NSNS & NS & NS & 2 & 1.75 & 1.4 & 0.80 & 1.0 & 1.0 \\ BHWD & BH & WD & 5/3 & 2.5 & 0.5 & 0.20 & 1.0 & 1.7 \\ \enddata \end{deluxetable} A summary of the most important aspects of the initial conditions thus chosen is given in Table~\ref{tab:ICs}. There are six parabolic encounters involving neutron stars with black holes, one double neutron star collision, and one black hole--white dwarf encounter. The parameter $\eta$ has been computed from the point mass orbital parameters in Newtonian gravity. Naturally the centers of mass of each star do not follow these solutions because of finite size effects and the emission of gravitational waves, but they allow for a characterization of each case. Note that since $\eta \propto R_{\rm p}^{3/2}$ and $L \propto R_{\rm p}^{1/2}$, the change in $\eta$ by a factor 3 is actually a variation of $3^{1/3}\approx 1.4$ in orbital angular momentum. \subsection{Stellar disruption and disk formation}\label{sec:hydro} For binary mergers, tidal disruption usually occurs after a single periastron passage, because the system is already very tightly bound by gravity. In the case of collisions, we find that the secondary is not shredded immediately after the first interaction. It does, however, lose a substantial amount of orbital energy and angular momentum through two main channels: emission of gravitational waves and transfer by gravitational torques through the formation of the tidal bulge. The bulge in effect deforms the spherical star into a bar, which the primary can then spin-up during the brief encounter. The strong tides additionally trigger complex, nearly radial oscillations in the secondary which can be clearly seen in the variation of the maximum density in the core after periastron passage. Some direct mass transfer occurs as well, along with mass stripping which can form a first accretion structure around the primary, which is more massive for low initial impact parameters. Additionally, some matter is flung out through the exterior Lagrange point to large distances, creating a tidal tail. \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=1.]{fig2.eps} \caption{The neutron star core mass decreases during the collision with a black hole by tidal mass stripping. As the strength of the encounter, $\eta$, decreases, the neutron star survives for a greater number of periastron passages before being eventually shredded. The secondary's mass (in solar masses) is shown for runs L$_{0}$, L$_0\Gamma_{5/3}$, L$_{1}$, L$_{2}$ and L$_2\Gamma_{5/3}$ until disruption. The thick (thin) lines are for $\Gamma=2$ ($\Gamma=5/3$).} \label{fig:BHNScoremass} \end{figure} \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=1.]{fig3.eps} \caption{The separation (in km) between the neutron star and the black hole is shown until disruption of the neutron star for runs L$_{0}$, L$_{0}\Gamma_{5/3}$, L$_{1}$, L$_{2}$ and L$_{2}\Gamma_{5/3}$. Note the change in scale on the time axis when compared with Figure~\ref{fig:BHNSL3}. The thick (thin) lines are for $\Gamma=2$ ($\Gamma=5/3$).} \label{fig:BHNSsep} \end{figure} \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=1.]{fig4.eps} \caption{Parabolic collisions of neutron stars with black holes. Logarithmic contours of density in the orbital plane (equally spaced every 0.25 dex) are shown for runs L$_{0}$ (left), L$_{1}$ (middle) and L$_{2}$ (right). The lowest contour in bold is at $\log \rho \; [\mbox{g~cm$^{-3}$]}=10$. The time elapsed since the beginning of the simlulation is indicated in each panel (increasing top to bottom), as is the distance scale. Note the different number of periastron passages and tidal tails formed until final disruption in each case.} \label{fig:BHNSL0L1L2} \end{figure} In runs L$_0$ and L$_1$, the neutron star core (which now contains $\simeq 1~M_{\odot}$ and $1.2~M_{\odot}$ respectively, see Figure~\ref{fig:BHNScoremass}) does not survive the second encounter, and forms a massive disk around the black hole, as well as an elongated tidal tail. In run L$_2$ there is enough energy and angular momentum at the outset that the core is able to return a third time, after which it too is fully disrupted and forms a disk (see Figure~\ref{fig:BHNSsep}). Each successive passage feeds the accretion disk and simultaneously forms a tidal tail, which is not set to collide with previous ejections. We show in Figure~\ref{fig:BHNSL0L1L2} the time evolution for different initial impact parameters, projected onto the orbital plane. Even with three periastron passages, the collision and disruption are essentially over after $\simeq 50$~ms, because the initial passage drains enough orbital energy and angular momentum to bind the system very effectively. For runs carried out with a soft equation of state, $\Gamma=5/3$, the results are qualitatively the same for low impact parameter, with the main difference being in the spatial extent of the accretion disk formed, and in that of the tidal tails, both being greater than for $\Gamma=2$. For large impact parameter, the gravitational interaction is quantitatively different during the first periastron passage, since the star effectively resembles a point mass to a greater degree in run L$_2\Gamma_{5/3}$. Angular momentum and energy transfer through torques is thus less efficient and the core of the neutron star is transferred to a higher orbit than in run L$_{2}$ (note the difference in secondary apocenter values in Figure~\ref{fig:BHNSsep}). At the secondary passage $\simeq 26$~ms after the start of the simulation, however, the star is fully shredded by tidal forces and the final disk forms promptly. For the disruption of a star by a {\em supermassive} black hole, the former moves essentially in the fixed background metric imposed by the hole, and this allows for a simplified treatment of the dynamics. It was found in earlier studies \citep{rees88}, both through analytical considerations and direct numerical simulation, that essentially half the mass of the star is dynamically ejected, while the remaining half is captured by the black hole, being on eccentric trajectories which, with variable delay, will bring them back to the vicinity of the primary. \begin{figure} \includegraphics[width=\columnwidth,scale=1.]{fig5.eps} \caption{The total angular momentum as a function of time is shown for runs L$_{0}$, L$_{0}\Gamma_{5/3}$, L$_{1}$, L$_2$ and L$_{2}\Gamma_{5/3}$, in units of $L^{*}=M_{\rm tot}\sqrt{G \mu R_{\rm NS}}$, where $M_{\rm tot}$ is the total mass and $\mu$ is the reduced mass. For each case, successive periastron passages are marked on the curve as P$_{1}$, P$_{2}$, ... .The thick (thin) lines are for $\Gamma=2$ ($\Gamma=5/3$).} \label{fig:BHNSangmom} \end{figure} There are two important differences between such a scenario and that considered in the present study. First, as already noted, the mass ratio is of of order 0.1-1 instead of 10$^{-7}$, which can produce a direct collision with accompanying mass transfer simply by virtue of the small periastron distance of a disruptive event. Second, at the small distance scales implied by the fact that we are considering compact stellar mass objects, gravitational wave emission is intense, and can drain a substantial fraction of the total kinetic energy and angular momentum during a single passage (see Figure~\ref{fig:BHNSangmom}). Thus the system becomes non-conservative from a point of view of orbital dynamics, and the previous reasoning does not apply. After the neutron star has been fully disrupted, the remnant consists of a black hole surrounded by a torus, and a series of tidal tails, depending on the number of periastron passages which occurred. The tori are typically 200-300~km across and contain $M_{\rm disk}\simeq 0.1 M_{\odot}$. They are comparable in size and mass to those encountered during binary mergers, with densities $\rho \simeq 10^{11}-10^{12}$~g~cm$^{-3}$ and internal energies $u \simeq 10^{18}-10^{19}$erg~g$^{-1}$, equivalent to 1-10~MeV/baryon. The binary interaction is violent and complex, though, and this can be qualitatively seen in the fact that by the end of our calculations (roughly 20~ms) the accretion structures still show significant deviations from azimuthal symmetry. \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=1.]{fig6.eps} \caption{Logarithmic contours of density in the orbital plane (equally spaced every 0.25 dex) for the collision of two neutron stars (run NSNS) on a parabolic orbit with initial mass ratio $q=0.8$. The lowest contour in bold is at $\log \rho \; [\mbox{g~cm$^{-3}$]}=10$. After an initial passage the secondary is entirely disrupted and wraps around the primary, forming an envelope and a single tidal tail.} \label{fig:NSNScoll} \end{figure} For double neutron star encounters, the mass ratio is even greater (we have computed one collision for $q=0.8$, which is likely to be a lower bound for such systems). For the case considered with adiabatic index $\Gamma=2$, the two stars are actually the same size, and directly impact each other for an encounter strength $\eta=1$. The initial collision binds the stars in an elliptical orbit but does not lead to the formation of a significant tidal tail. The less massive secondary is strongly distorted and spun up, and a bridge of material temporarily joins the two stars. Upon a second passage the secondary is entirely shredded and wraps around the primary (see Figure~\ref{fig:NSNScoll}). The material from each stars remains largely separate, with that from the primary remaining essentially in the core, in a manner similar to what is obtained for mergers of unequal mass neutron stars. \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=1.]{fig7.eps} \caption{Azimuthally averaged angular velocity profile (solid line) and enclosed mass (dashed line) in the double neutron star collision remnant. The rapidly rotating inner core (P=2.1~ms) is surrounded by a large envelope. The rotation of the tidal tail is visible at radii greater than 100~km, and reference power laws are given for the envelope and tail.} \label{fig:NSNSrot} \end{figure} The final remnant in this case consists of a slightly differentially rotating core of 2.4$M_{\odot}$ with radius $R_{\rm core} \simeq 20$~km and a maximal rotation frequency of $\Omega_{\rm max} \simeq 3000$~rad/s, corresponding to a period of 2.1~ms (see Figure~\ref{fig:NSNSrot}). This is surrounded by an envelope out to $\sim$~80~km containing $M_{\rm env}\simeq 0.49 M_{\odot}$, where the orbital frequency is $\Omega \propto r^{-1.2}$. In the outer tail of material the rotation is essentially Keplerian, with $\Omega \propto r^{-1.5}$ (the rotation profile is shown as well in Figure~\ref{fig:NSNSrot}). Note that the core is above the threshold for collapse of a cold, non--rotating configuration in most equations of state, but could avoid this given the rapid and differential rotation present \citep{cook94,baumgarte00}. In addition significant heating of the core can raise the critical threshold mass for collapse to $~\simeq 1.35 M_{\rm cold}$ \citep{shibata06}. In this particular case the core could conceivably remain stable for a longer time, spinning down on a secular time scale due to the emission of gravitational waves or magnetic torques. If it were to indeed form a black hole, the distribution of angular momentum in the envelope is such that only material between 50 and 100~km, amounting to 0.17$M_{\odot}$, would have enough centrifugal support to form an accretion disk. For completeness we have also considered the interaction of a low mass ($M_{\rm WD}=0.5M_{\odot}$) white dwarf with a compact object ($M_{\rm co}=2 M_{\odot}$). The latter could be either a massive neutron star or a low mass black hole. Numerically it is irrelevant because the characteristic scales of the two objects are so different that it is impossible to resolve simultaneously the black hole horizon (or neutron star surface) and the entire white dwarf. For the actual calculation the absorbing accretion boundary has been placed 100~gravitational radii from the center of the compact object. \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=1.]{fig8.eps} \caption{Logarithmic density contours (equally spaced every 0.25 dex) for the encounter between a low mass white dwarf and a compact object. The lowest contour in bold is at $\log \rho \; [\mbox{g~cm$^{-3}$]}=3$. Note the difference in scales (spatial and temporal) when compared to those in Figures~\ref{fig:BHNSL0L1L2} and \ref{fig:NSNScoll}. The higher compressibility of the material also makes for a wider distribution of ejected material and a generally wider distribution of gas.} \label{fig:WDBHcoll} \end{figure} The white dwarf is modeled as a polytrope with adiabatic index $\Gamma=5/3$, appropriate for a cold, non-relativistic degenerate configuration, and the pressure is thus given by $P=K_{\rm n.r.} \rho^{5/3}$, where $K_{\rm n.r.}=(3/\pi)^{2/3}h^2/(20 m_e (2m_p)^{5/3})$. With the given mass, the stellar radius is $R_{\rm WD}=1.1 \times 10^{9}$~cm, a factor $f \simeq 800$ larger than our standard neutron star. In fact with the same input physics one could simply scale the results from the BH-NS interaction (given an identical mass ratio) by increasing distances by a factor $f$ and temporal scales by $f^{3/2}\simeq 2.3 \times 10^{4}$ (giving hundreds of seconds instead of tens of milliseconds). This is strictly not correct, however, since gravitational radiation reaction introduces an absolute scale into the problem, and energy and angular momentum losses through this channel are insignificant in comparison to the BH-NS case. The interaction proceeds then at a more leisurely pace, so much so that we were unable to follow it to a second periastron passage, even though the simulation covered nearly one thousand seconds. At this stage stripping during the close passage has formed a torus around the compact object, linked to the stellar core by a long and narrow bridge of material (see Figure~\ref{fig:WDBHcoll}). The core itself is rapidly spinning due to gravitational torques exerted by the primary, and the typical tidal tail has formed at large radii. Since we are considering encounters in which disruption occurs practically by construction (given the choice of the parameter $\eta$), the gravitational torques exerted on the secondary are of comparable magnitude in all cases (with appropriate scalings). We find that the angular frequency of the core after the first periastron passage is $\simeq (0.2-0.4) \Omega_{0}$, where $\Omega_{0}=(GM_2/R_2^{3})^{1/2}$ is the break up rotation frequency of the unperturbed secondary. Thus the spin periods are $\simeq 2$~ms and $\simeq 8$~s for encounters involving neutron stars and white dwarfs respectively. \begin{deluxetable}{lccc} \tablecaption{Disk, tail and ejected masses.\label{tab:masses}} \tablewidth{0pt} \tablehead{\colhead{Run} & \colhead{$M_{\rm disk}/M_{\odot}$} & \colhead{$M_{\rm tail}/M_{\odot}$} & \colhead{$M_{\rm ej.}/M_{\odot}$} } \startdata L$_0$ & 0.23 & 0.15 & 0.05 \\ L$_{0}\Gamma_{5/3}$ & 0.25 & 0.11 & 0.04 \\ L$_1$ & 0.16 & 0.31 & 0.13 \\ L$_2$ & 0.09 & 0.47 & 0.19 \\ L$_{2}\Gamma_{5/3}$ & 0.11 & 0.19 & 0.04 \\ NSNS & 0.17 & 0.13 & 0.03 \\ BHWD & 0.13 & 0.35 & 0.24 \\ \enddata \end{deluxetable} The mass of the disk (see Table~\ref{tab:masses}) present at the end of the calculation as a result of the encounter is computed from the fluid elements in close, essentially circular orbit about the central object. It is typically $M_{\rm disk} \simeq (0.1-0.3) M_{\odot}$, regardless of the type of encounter. Likewise, the mass of the tidal tails, $M_{\rm tail}$, is obtained by adding over all the fluid elements within these structures, whether they are bound to the central mass or not. For black hole-neutron star encounters there is a clear trend of decreasing disk mass and increasing tail mass as the initial impact parameter grows at fixed compressibility. The total amount of matter dynamically ejected (computed as that with positive total energy) also increases, and is a result of the greater number of ejection episodes associated with larger values of the orbital angular momentum. For run L$_{2}$ we find that nearly 0.2$M_{\odot}$ are thus lost. For the double neutron star collision the characteristics of the system (total mass and mass ratio) are such that the ejected mass is significantly lower, although a substantial tail is still present. We will return to the implications of mass ejection below. Including the effects of General Relativity will likely alter these values, by up to one order of magnitude if the differences between calculations performed in Newtonian theory and those using GR for mergers are used as guidance. \subsection{Tidal tails and mass ejection}\label{sec:tails} For the two members of the system to come together and eventually merge, they must lose energy and angular momentum. This can be achieved through the emission of gravitational waves, or the ejection of matter, or a combination of both. In either a binary system in circular orbit or a parabolic approach there is a substantial amount of rotation. Since a small amount of matter, removed to a large radius, can carry a great deal of angular momentum, the formation of tidal tails out of material stripped through the outer Lagrange points in the effective potential is an efficient way to produce a single object at the center. This is the fundamental reason why such structures form in either stellar or galactic collisions. \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=0.85]{fig9.eps} \caption{Tidal tails formed by the disruption of a neutron star by a black hole (run L$_{1}$) and a white dwarf (run WDBH). For the neutron star disruption the impact parameter was such that two periastron passages occurred, giving rise to two distinct ejection structures thousands of kilometers across by the end of the simulation. For the case involving the white dwarf, only one passage has occurred and the core of the star is still present. Note the much larger spread in the distribution of the fluid, due to the lower adiabatic index used in the case of the white dwarf.} \label{fig:tails} \end{figure} Just as the vanishing orbital energy of a parabolic orbit implies successive periastron passages as the core becomes more and more bound to the primary, so too it is easier to dynamically unbind matter to infinity when compared to a bound binary. As each passage proceeds a new ejection episode occurs, giving rise to an additional tail (Figure~\ref{fig:tails}). The amount of mass thus ejected, and shown in Table~\ref{tab:masses}, is considerably greater than for a binary coalescence, by about one order of magnitude \citep{lee01}. In all cases when multiple ejection events occur (we observed up to three for run L$_{2}$), the velocities and orientations are such that the initial tail will not be overtaken by latter ones. This is simply because the first one has a clear head start, but also because subsequent events occur at different orbital phases. An interesting point regarding the tails concerns their hydrodynamic stability. Their motion is essentially ballistic, dominated by the potential well of the central mass. They are nevertheless susceptible to the varicose, or sausage instability first identified by Rayleigh in 1899 \citep{chandra61}. This is due to self-gravity and occurs for cylindrical configurations of an incompressible fluid for perturbations with wavelength $\lambda > \lambda^{*}=2 \pi R_{\rm cyl}/x^{*}$, where $x^{*}\simeq 1$ and $R_{\rm cyl}$ is the radius of the cylinder. The fastest growing mode has $x=0.58$, wavelength $\lambda\simeq 11 R_{\rm cyl}$ and a growth time $\tau=4/(4 \pi G \rho)^{1/2}$ \citep{chandra61}. For sufficiently stiff equations of state (with adiabatic index $\Gamma > 2.5$), even though not strictly incompressible, this is actually seen in numerical simulations \citep{rasio94,lee00}: on a time scale given approximately by $\tau$, condensations form, and are separated roughly by the wavelength of the fastest growing mode given above. For softer equations of state (even such as the one used here for neutron stars when $\Gamma=2$) the effect is not present, and even less so for the calculation involving the neutron star with $\Gamma=5/3$ and the white dwarf (Figure~\ref{fig:tails}). \section{Emission of gravitational waves}\label{sec:gwaves} In the case of merging binaries, the early gravitational waveforms, when the separation is much larger than the stellar radius, can be computed using the weak field approximation analytically, and from them the stellar masses may be accurately determined. As the stars become distorted by the tidal field, the signal deviates form this solution and finite size effects accelerate the decay. The secondary is then fully accreted by the black hole (if one is present) or tidally disrupted, and the emission abruptly ceases. The precise frequency where this occurs can lead in principle to accurate determinations of the neutron star radius \citep{faber02}, and thus, since the mass is already known, to useful constraints on the equation of state at supra-nuclear densities. \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=1.]{fig10.eps} \caption{Gravitational waves emitted as a function of time for runs L$_{0}$, L$_0\Gamma_{5/3}$, L$_{1}$, L$_{2}$, L$_2\Gamma_{5/3}$, and NSNS as seen by an observer placed along the rotation axis ($r h_{+}$ is plotted (in m), where $r$ is the distance from the source to the observer). The thick (thin) lines are for $\Gamma=2$ ($\Gamma=5/3$). The vertical range is identical in all frames, but note the different scaling on the time axis. In all runs except for L$_{0}$, L$_{0}\Gamma_{5/3}$, and L$_{2}\Gamma_{5/3}$, the oscillations of the neutron star core following the first and even second periastron passage are clearly visible. When varying the stiffness of the equation of state, the waveforms are nearly indistinguishable for small impact parameter and clearly separated for large values, due to the difference in the associated orbital evolution.} \label{fig:BHNSgw} \end{figure} The total mass and relative velocities involved in a parabolic encounter are similar to those encountered in close binaries, and so we would expect the characteristic frequencies and strength, or amplitude of the signal in gravitational waves, to be comparable in this case. The biggest difference arises, and is crucial in terms of detectability, because the collision does not involve a leisurely spiral in over many orbital periods, and as thus lacks the characteristic precursor chirp signal which would slowly sweep through the interferometer's detection band. Figure~\ref{fig:BHNSgw} shows the computed waveforms (one polarization is given) for black hole-neutron star encounters and the double neutron star collision. A neutron star ($M_2$) approaching a stellar mass black hole ($M_1$) with $\eta \sim 1$ will be disrupted in a single passage and the particles in the disrupted remnant follow approximately independent Keplerian orbits. The detectable gravitational signal will thus have a burst-like behaviour, roughly characterized by an amplitude \begin{multline} h \sim{G M R_1 \over c^2D R_{\rm p}} \sim 10^{-22} \; \eta ^{-2/3} \left({D \over 100 \, {\rm Mpc}}\right)^{-1} \times \\ \left({M_1 \over 10 \, M_\sun}\right)^{2/3} \left({R_2 \over 10 \, {\rm km}}\right)^{-1} \left({M_2 \over 1.4 \, M_\sun}\right)^{4/3}, \end{multline} and frequency \begin{multline} f \sim \left({G M_1 \over R_p^3}\right)^{1/2} \sim 1.4 \times 10^{4} \, {\rm Hz} \; \times \\ \eta^{-1} \left({R_2 \over 10 \, {\rm km}}\right)^{-3/2} \left({M_2 \over 1.4 \, M_\sun}\right)^{1/2}. \end{multline} Here $M$ denotes the total mass, $M_{1}+M_{2}$. LIGO will be able to detect gravitational wave from impact involving neutron stars and stellar mass black holes if $\eta \leq 1$ and the distance is $D\leq 50$ Mpc. In what follows, we compare these simple estimates of gravitational radiation against more detailed results obtained with our numerical scheme. The signal exhibits a local peak at each periastron passage, until the time when the star is completely disrupted and the amplitude vanishes (note the different time scales on each plot in Figure~\ref{fig:BHNSgw}). In runs L$_{1}$, L$_{2}$ and NSNS, smaller oscillations of decaying amplitude after each passage (but perhaps the last) are also clearly present when $\Gamma=2$. For instance, for run L$_{2}$ between 3 and 15~ms, their frequency is $\nu_{\rm osc} \simeq 1750$~Hz. This is {\em not} due to the rotation of the neutron star (the spin frequency of the core is at this point approximately $\nu_{\rm spin} \simeq 320$~Hz) but essentially to radial vibration modes excited by the action of the primary at periastron passage. The frequency of such modes is close to the natural value given by hydrostatic equilibrium, namely $ \nu_{\rm osc} \simeq (G M / R^{3})^{1/2}/2 \pi \simeq 1400$~Hz. The small impact parameter and rapid disruption of the star in run L$_{0}$, and the high compressibility used in run L$_2\Gamma_{5/3}$ does not allow for a clear manifestation of such oscillations. \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=1.]{fig11.eps} \caption{Gravitational wave energy spectrum for runs L$_{0}$, L$_{0}\Gamma_{5/3}$, L$_{1}$, L$_{2}$, L$_{2}\Gamma_{5/3}$ and NSNS. The ranges are identical in all frames. The reference power law is $dE/df \propto f^{-1/3}$, the characteristic spectrum for the in-spiral of a point mass binary. The thick (thin) lines are for $\Gamma=2$ ($\Gamma=5/3$). The oscillating nature of these spectra is due to the finite extent of wave trains present in the amplitudes during different segments of temporal evolution.} \label{fig:BHNSgwsp} \end{figure} \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=1.]{fig12.eps} \caption{Power radiated in gravitational waves for black hole-neutron star encounters and the double neutron star collision. There is one successively weaker peak for each periastron passage until complete tidal disruption. The curve labeled ``Binary'' shows the luminosity computed for a black hole-neutron star binary with the same mass ratio, $q=0.31$ and initial separation, $r_{i}=3.7 R_{\rm NS}$, as runs L$_0$ through L$_{2}$, for which disruption occurs promptly after the beginning of mass transfer \citep{lee01}. The thick (thin) lines are for $\Gamma=2$ ($\Gamma=5/3$). When varying the stiffness of the equation of state, the luminosities are nearly indistinguishable for small impact parameter and clearly separated for large values, due to the difference in the associated orbital evolution.} \label{fig:BHNSGWlum} \end{figure} The power spectrum of these gravitational waves is shown in Figure~\ref{fig:BHNSgwsp}. There are peaks at 1900, 1600 and 1000~Hz for runs L$_{0}$, L$_{1}$ and L$_{2}$, respectively. These correspond to the interaction time for each run, which increases (thus decreasing the frequency) as the impact parameter (or equivalently, the total angular momentum in the system) increases. Secondary periastron passages make this peak wider at higher frequencies. This is particularly evident when comparing the results of runs L$_2$ and L$_2\Gamma_{5/3}$. The latter has a single late-time periastron passage (instead of three for the former), and the peak is broadened to $\simeq 2000-3000$~Hz. We note that the oscillation frequency of the core, visible in the signal when $\Gamma=2$, is undetectable in these spectra, being buried within the main peak due to the binary interaction (this is to be expected, since by definition of a tidally disruptive event, the interaction time is roughly equal to the free fall time scale of the star). The spectrum for run NSNS shows a similar overall structure. We note that it is difficult to extract a clean spectral signal with a high signal to noise ratio, because the simulated time is quite limited. Secondary high frequency variations at $\simeq 5000$~Hz can be seen in the spectra for runs L$_{0}$, L$_{1}$ and NSNS and are related to the asymmetry of the waveform around the time of closest approach. The large number of periastron passages in run L$_{2}$ makes the spectrum more noisy and this feature is not observed. Finally, we also show the gravitational wave luminosity in Figure~\ref{fig:BHNSGWlum}, the integration of which reveals that the efficiency for their emission is $\epsilon= E_{\rm GW} /(M c^{2})= 10^{-2}, 7 \times 10^{-3}, 6.8 \times 10^{-3}, 5.9 \times 10^{-3}$ for runs L$_{0}$, L$_{1}$, L$_{2}$ and NSNS respectively. For comparison, the efficiency in the case of a merging binary with the same mass ratio is $\approx 5\times 10^{-3}$ \citep{lee01}. We note also from Figure~\ref{fig:BHNSGWlum} that the maximum power radiated during a collision and even during subsequent close approaches can be significantly higher than that in a merging binary with the same mass ratio, due to the larger velocities in an eccentric orbit at periastron. \section{The rates of collisions and tidal captures of compact objects in globular clusters} \label{sec:rates} The highest stellar densities in GCs are reached in the core, in particular during core collapse. In this section we estimate the collision and tidal capture rate as a function of time by first reconstructing the core evolution of a typical post-core-collapse GC. We then re-scale the resulting encounter rate evolution with the measured mass and size distribution of GCs in galaxies to obtain the rate per host galaxy. Finally, for a given galaxy luminosity density distribution, we obtain per volume of space and as function of time, the expected global rate of collisions and tidal capture of compact objects in the cores of GCs. The details of the encounter rate calculation will be presented in an forthcoming paper (van de Ven et al., in prep.), of which we give here a summary focussing on close encounters between compact stars. \subsection{Close encounters} \label{sec:encounters} We assume the different types of stellar objects, $i$, are distributed homogeneously within a spherical core of radius $r_\mathrm{c}$, each with fractional number $f_i \le 1$ and total number density $n_\mathrm{c}$. We further assume that the stars follow a Maxwellian velocity distribution function with dispersion $\sigma_\mathrm{c}$. Together with the dominating gravitational focusing, this means we can approximate \citep{heggie75} the total collision rate as \begin{multline} \label{eq:nucoll} \nucol = 2.1 \times 10^{-3} \; \mathrm{Gyr}^{-1} \; \frac{f_1\,f_2}{1+\delta_{12}} \times \\ \Bigl(\,\frac{n_c}{10^6\;\mathrm{pc}^{-3}}\,\Bigr)^2 \Bigl(\,\frac{r_c}{0.1\;\mathrm{pc}}\,\Bigr)^3 \Bigl(\,\frac{\sigma_c}{10\;\mathrm{km\,s}^{-1}}\,\Bigr)^{-1} \times \\ \Bigl(\,\frac{M_1+M_2}{1\;\mathrm{M}_\odot}\,\Bigr) \Bigl(\,\frac{\Rmin}{10\;\mathrm{km}}\,\Bigr), \end{multline} where $\delta_{12}=1$ if type 1 and 2 are equal, and $\delta_{12}=0$ otherwise. The stars have masses $M_i$, and their separation at closest approach is given by $\Rmin$, which we take to be the sum of the stellar radii, i.e., $\Rmin = R_1 + R_2$. In addition, we also consider encounters in which stars pass close enough to each other to form a binary by transfering orbital energy to internal stellar oscillations. We adopt a cross section for tidal capture of the form \citep{lee86} \begin{equation} \label{eq:Sigtid} \Sigma_\mathrm{tid} = a \left( \frac{v_\mathrm{inf}}{v_{\star,2}} \right)^{-\beta} R_2^2, \end{equation} where $v_\mathrm{inf}$ is the relative velocity at infinity, and $v_{\star,2} = (2GM_2/R_2)^{1/2}$ is the escape velocity at the surface of the secondary, captured star. We use the fitting functions of \cite{kimlee99} to obtain the amplitude $a$ for encounters between different stellar types, while the slope $\beta \simeq 2.2$ in all cases. In case of collisions dominated by gravitational focussing, the cross section is of the same form, but with slope $\beta = 2$. Henceforth, we can express the tidal capture rate $\nutid$ in the same way as the collision rate in equation~\eqref{eq:nucoll}, but with closest approach given by \begin{equation} \label{eq:Rmintid} \Rmin = \Gamma(2-\beta/2) \frac{a}{\pi} \frac{M_2}{M_1+M_2} \left( \frac{2\sigma_\mathrm{c}}{v_{\star,2}} \right)^{2-\beta} R_2, \end{equation} where $\Gamma$ is the complete gamma function. \begin{deluxetable}{*{7}{r}} \tablecaption{Relative encounter rates. \label{tab:rates}} % \tablewidth{0pt} % \tablehead{ \colhead{type~1} & \colhead{type~2} & \colhead{$M_2/\mathrm{M}_\odot$} & \colhead{$R_2/\mathrm{km}$} & \colhead{$\xicol$} & \colhead{$\nutid/\nucol$} & \colhead{$\Rmin/R_2$} } % \startdata NS & NS & 1.4 & 10 & 1.00 & 6.68 & 13.37 \\ BH & BH & 4.5 & 13 & 4.18 & 6.80 & 13.60 \\ WD & WD & 0.5 & 11000 & 392.87 & 2.99 & 5.99 \\ BH & NS & - & - & 4.85 & 7.18 & 16.51 \\ NS & WD & - & - & 747.11 & 7.12 & 7.12 \\ BH & WD & - & - & 1966.61 & 10.03 & 10.04 \enddata \end{deluxetable} In what follows, we focus on the collision and tidal capture rate between two neutron stars. Since we assume a homogeneous distribution of stars in the core, these results can be re-scaled for close encounters between other types of stellar objects. In Table~\ref{tab:rates}, we give these scaling factors in case of a neutron star (NS), a stellar black hole (BH), and a white dwarf (WD), with (typical) masses and radii as indicated. After taking into account the relative differences in fractional numbers $f_i$, the collision rate follows from multiplying the (default) NS-NS collision rate by the factor $\xicol$ in column 5. In turn, multiplying the resulting collision rate with the factor in column 6 yields an estimate of the tidal capture rate. Here we neglect the weak dependence of $\Rmin$ in equation~\eqref{eq:Rmintid} on $\sigma_\mathrm{c}$, which in turn varies only mildly during the evolution of the core. While in general a tidal capture does not necessarily lead to the coalescence of the two stellar objects \citep{lee86}, in case of compact objects the approach is so close (see $\Rmin/R_2$ in column 7) that they will merge well within a Hubble time. In particular, for two neutron stars the close encounter rate is boosted by a factor $\simeq 6.7$ due to tidal capture, without any significant delay with respect to collisions because of the very efficient emission of gravitational waves. The encounter rate might be significantly enhanced due to interactions between single stars and stellar binaries (and even between binaries), which have a much larger cross section, with $\Rmin$ proportional to the binary separation. Moreover, due to mass segregation the more massive stellar objects, including binaries, sink towards the center and can be important or even dominant in the core. Still, only the stars in very ``hard'' binaries with small enough separations (and thus smaller cross sections) might coalesce within a Hubble time. We discuss binaries in Section~\ref{sec:compactbinaries} below, in particular focussing on compact binaries as they are commonly believed to be the progenitors of SGRBs. We restrict however the encounter rate calculations to collisions and tidal captures between single stars, and hence consider the resulting values as lower limits. \subsection{Evolving encouter rate} \label{sec:evolution} After gradual contraction during an early phase, the core of a GC can go into deep self-similar collapse \citep{lbe80}. The collapse is halted due to engery release from interactions with binaries, also known as binary burning \citep{hills75}, and/or from other sources including a possible intermediate-mass black hole \citep{shapiro77}. The post-collapse evolution has been extensively studied \citep[see e.g.][]{heggie89}, but many aspects are still intensively investigated and debated. For example, \cite{fregeau2008} recently proposed that the observed post-core-collapse GCs ($\simeq 20$\,\% in the Milky Way) are most likely in the binary burning phase, while the remaining GCs are still in the process of core contraction and have not yet reached the binary burning phase. In the meantime, matching Monte Carlo models of M\,4 \citep{heggiegiersz2008} and of NGC\,6397 \citep{gierszheggie2009} seem to reveal that both GCs are past collapse in the binary burning phase, even though only NGC\,6397 is one of the observed post-core-collapse GCs, while the surface brightness of M\,4 is well fitted by a standard King profile. \cite{gierszheggie2009} argue that the differences in the surface brightness profiles are most likely due to fluctuations in the core after collapse. They point out that it is well possible that most GCs are post-core-collapse, but only a fraction happens to be at the ``peak" of the fluctuations that corresponds to a cusped surface brightness profile and leads to post-core-collapse classification. These fluctuations are not necessarily the well-known gravothermal oscillations \citep{bettwieser84,goodman87}, but could be the result of the stochastic nature of binary burning. Even though it is clear that the details of these fluctuations are far from known \citep[but see][]{heggiegiersz2009}, the core after collapse is expected to be \emph{on average} larger than those of the observed post-core-collapse GCs, and to increase over time to explain the overall dimming in the central surface brightness \citep[see also Fig.~10 of][]{gierszheggie2009}. To mimick this average behaviour, we assume a gradually expanding core after deepest core collapse, which in turn we describe by two classifcal self-similar solutions in two phases. In the early phase expansion mirrors the self-similar late phase collapse, but at a slower rate \citep{ilb83}. After a time since deepest collapse roughly equal to the duration of the late phase collapse \citep{heggie85}, the core enters the late phase in which it follows the self-similar expansion of an isolated system \citep{henon65}. We derive the encounter rate during all four phases of collapse and expansion. In addition to this core evolution model with gradual core expansion after deepest collapse, we also consider a model in which the core properties are kept fixed after deepest collapse. Since the latter ``halt of collapse" would mimick binary burning without fluctuations, it provides an upper limit to the contribution of high-density cores, and hence an upper limit to the encounter rate. On the other hand, while the core expansion is just an average approximation to the possible complex fluctuations after deepest collapse, we believe it to provide a closer estimate to the encounter rate. Moreover, we find below (see also Figure~\ref{fig:m15_trh_frac}) that the predicted fraction of (post-)core-collapse GCs is similar to that observed for the MW. The core expansion could also naturally explain why only loose GCs have a shallow/depleted global mass function \citep{demarchi2007}. We adopt M\,15 as a proto-typical GC that underwent core collapse. Because this GC has been extensively studied and modelled \citep{dull97, mcnamara04, bosch06}, its current properties are known accurately. Given the high concentration $c = \log(r_\mathrm{c}/\rt) \gtrsim 3$, where $\rt$ is the tidal radius, we assume that the core of M\,15 is currently still very close to deepest collapse. The fraction of its age $\tage \simeq 13.2$\,Gyr \citep{baumgardt03} that M\,15 spent in the early and late phase of core collapse depends on its initial concentration $\cini$. The latter can be inferred from evolved (single-mass) Fokker-Planck models \citep{quinlan96} for a given initial half-mass relaxation time $\trhini$. In turn, the latter follows from the current half-mass relaxation time $\trh \simeq 1.42$\,Gyr as $\trhini \simeq \trh(\tage) - \tage/\xides$, under the assumption of a constant mass loss rate from the GC \citep[e.g.][]{vesperini97}. Since the rate of dissolution, $\xides$, depends again on the initial concentration \citep{gnedin99}, we have to find $\cini$ iteratively. Doing this, we infer for M\,15 $\xides \simeq 51$, $\trhini \simeq 1.68$\,Gyr, and corresponding $\cini \simeq 1.70$. This implies that M\,15 spent about $8.0$\,Gyr in the early phase, and took another $5.2$\,Gyr to reach the current phase of deep core collapse. Even though the encounter rate during the late phase is expected to dominate over the encounter rate during the early phase, we still take the increase in $\nuenc$ during the early phase into account. In particular, we take into account the changes in the fractions of stellar types due to mass segregation in the early phase, whereas we assume the fractions to remain constant during deep collapse. We predict the core expansion by mirroring the late phase collapse but with an expansion rate that is a factor 3 slower than the collapse rate. After about $5.2$\,Gyr (the duration of the late phase collapse) in the future, we expect the core of M\,15 to go into the late self-similar expansion phase. \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=1.]{fig13.ps} \caption{An estimate of the encounter rate between two neutron stars in M\,15 as function of time (or redshift at the top). The thin lower curves show the predictions for the collision rates, while the thick upper curves present the tidal capture rates. The solid curves are for the fiducial core evolution model, whereas the dashed curves assumes a faster collapse (and expansion), and the dash-dotted curves indicate the effect of an earlier collapse. The dash-triple-dotted curve shows an additional model in which the core properties are kept fixed after deepest collapse. (See Sections~\ref{sec:evolution} and~\ref{sec:uncertainties} for further details.)} \label{fig:m15_nuencevo} \end{figure} Figure~\ref{fig:m15_nuencevo} presents the resulting rates as function of time (and redshift along the top axis) for close encounters between two neutron stars in the core of M\,15. The number fraction of neutron stars in the core at the beginning of the early phase is less than $1$\%, but it rises due to mass segregation to about $55$\% during the self-similar deep collapse \citep{dull97}. The thin lower curves show the predictions for the collision rates, while the upper thick curves present the tidal capture rates. The solid curves are for the fiducial core evolution model, whereas the dashed curves assumes a faster collapse (and expansion), and the dash-dotted curves indicate the effect of an earlier collapse. These two variations on the fiducial model are further discussed in Section~\ref{sec:uncertainties} below. Finally, the dash-triple-dotted curve shows the additional model in which the core properties are kept fixed after deepest collapse. \subsection{Average encounter rate} \label{sec:average} We compute an average encounter rate per host galaxy by assuming that all GCs follow an evolution similar to that of M\,15, with a scaling in time, based on the half-mass relaxation time $\trh \propto \rh^{3/2} M^{1/2}$. We adopt the distribution in half-mass radii $\rh$ of GCs derived by \citet{jordan05}, which is independent from the distribution in their total masses $M$ \citep{mclaughlin00}. It is believed that initially the GC mass distribution followed a power-law, but that especially the less-massive GCs dissolved, most, if not all, before going into core collapse \citep{mclaughlin08}. Hence, we adopt the current GC mass distribution, which is well described by a lognormal distribution \citep{jordan06}. After randomnly drawing from these distributions in $\rh$ and $M$, we arrive at a current half-mass relaxation time, i.e., after a time $\tage \simeq 13$\,Gyr since the formation of an average old GC. Adopting the same initial concentration $\cini = 1.70$ as estimated for M\,15, we infer the corresponding initial half-mass relaxation time, which we use to re-scale the above derived encounter rate evolution for M\,15. \begin{figure*} \includegraphics[width=\columnwidth,angle=0,scale=1.0]{fig14a.ps} \hfill \includegraphics[width=\columnwidth,angle=0,scale=1.0]{fig14b.ps} \caption{The \emph{left panel} shows the distribution of half-mass relaxation times $\trh \propto \rh^{3/2} M^{1/2}$, based on the independent distributions in half-mass radii $\rh$ and total mass $M$ of globular clusters. This results in the solid histogram, which turns into the dotted histogram for initial half-mass relaxation times $\trh(0)$. The vertical lines show that M\,15 has a larger than average half-mass relaxation time, so we expect that currently more than half of all GCs are already past reaching deepest core collapse. This is confirmed by the thin solid curve in the \emph{right panel}, showing the fraction of GCs that past reaching deepest core collapse as function of time. However, in this fiducial core evolution model it is likely that some time after deepest core collapse the GCs are not anymore \emph{observed} to be post-core-collapse because the core is expanding. Henceforth, to estimate the fraction of observed post-core-collapse GCs, we only count at a given time GCs that are in the late collapse or early expansion phase when the core is smaller. The resulting thick curves show that for the first three models the current fraction (vertical solid line) is similar to the fraction of $\simeq 20$\,\% of post-core-collapse GCs observed in the Milky Way. Only the fourth halt-of-collapse model predicts a much higher fraction because the core properties are held fixed after deepest collapse.} \label{fig:m15_trh_frac} \end{figure*} The current and initial half-mass relaxation distributions are shown as solid and dotted histograms in the left panel of Figure~\ref{fig:m15_trh_frac}, while the vertical lines indicate the corresponding values for M\,15. Since M\,15 has a larger than average half-mass relaxation time, we expect that currently more than half of all GCs are already past reaching deepest core collapse. This is confirmed by the thin solid curve in the right panel of Figure~\ref{fig:m15_trh_frac}, showing the fraction of GCs that past reaching deepest core collapse as function of time. In this fiducial core evolution model it is likely that some time after deepest core collapse the GCs are not anymore \emph{observed} to be post-core-collapse because the core is expanding. Henceforth, to estimate the fraction of observed post-core-collapse GCs, we only count at a given time GCs that are in the late collapse or early expansion phase when the core is smaller. The resulting thick curves show that for the first three models the current fraction (vertical solid line) is similar to the fraction of $\simeq 20$\,\% of post-core-collapse GCs observed in the Milky Way. Only the halt-of-collapse model predicts a much higher fraction because the core properties are held fixed after deepest collapse. \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=1.]{fig15.ps} \caption{An {\em average} encounter rate between two neutron stars in the core of globular clusters per host galaxy with luminosity $\Lg = \{0.2,1.0,5.0\} \times \Ls$. The expected number of GCs per host galaxy, given in brackets, follows from \citet{mclaughlin99}. For each globular cluster, the collision rate of M\,15 is re-scaled by the half-mass relaxation time $\trh \propto \rh^{3/2} M^{1/2}$, randomly drawn from the independent distributions in half-mass radii $\rh$ and total mass $M$ of globular clusters. The meaning of different line styles is the same as in Fig.\ref{fig:m15_nuencevo}.} \label{fig:m15_nuencscaled} \end{figure} The number of GCs for which we re-scale the encounter rate evolution of M\,15 depends strongly on the total luminosity $\Lg$ of the host galaxy \citep{mclaughlin99}. In Figure~\ref{fig:m15_nuencscaled} we plot the combined encounter rate for a typical $\Ls$ galaxy with about a thousand GCs, as well as for a galaxy that is a factor $5$ less and more luminous with the number of GCs comparable to that in the Milky Way and in a giant elliptical galaxy, respectively. In the latter two cases we show only the tidal capture rates for the fiducial model, wheras for the $\Ls$ galaxy we also indicate the effect of a faster and earlier collapse as discussed in Section~\ref{sec:uncertainties}, as well as the additional half-of-collapse model. The results for the collision rate are not shown for clarity, but are similar except for a factor $\simeq 6.7$ decrease in amplitude. In all cases, the encounter rate increases strongly up to redshift $z \sim 1$, but slowly decreases again toward $z = 0$. Next, we estimate a global encounter rate by combining the previously derived encounter rates per host galaxy with the galaxy luminosity density distribution. The latter is well described by the Schechter function \begin{equation} \label{eq:gallumdistr} \Phig(L) = (\Phigs/\Ls) \; (L/\Ls)^\alpha \exp(-L/\Ls), \end{equation} with $\Phigs = 3.77 \times 10^{-3}$\,Mpc$^{-3}$\ and $\alpha = -1.30$ \citep{faber07}. Since the above estimate of the combined encounter rate evolution depends on the {\em current} luminosity of the host galaxy, the evolution of $\Phig(L)$ is not needed to compute the global encounter rate as function of time. However, the latter evolution should be taken into account when discussing the properties of host galaxies, which do change significantly with redshift (see Section~\ref{sec:galacticenvironment}). \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=1.]{fig16.ps} \caption{The predicted {\em global} rate of close encounters between two neutron stars in the cores of globular clusters, per volume of space and as function of time (or redshift at the top). The values are computed from combining the average collision rate per host galaxy (Fig.~\ref{fig:m15_nuencscaled}) with the galaxy luminosity density distribution. The meaning of different line styles is the same as in Fig.\ref{fig:m15_nuencevo}.} \label{fig:m15_nuencspace} \end{figure} In Figure~\ref{fig:m15_nuencspace}, we present our prediction for the global encounter rate $\Renc$ (in yr\,$^{-1}$\,Gpc$^{-3}$) between two neutron stars in the core of GCs. The rates both of collisions (thin curves) and of tidal captures (thick curves) show a clear increase around $z \sim 1$, followed by a gradual decline. The tidal capture rate for the fiducial model (thick solid curve) peaks around $z \simeq 0.7$, at a value of $\Renc \simeq 55$\,yr\,$^{-1}$\,Gpc$^{-3}$. This value is fully consistent with the estimated event rates for SRGBs, which are of the order 8--30\,yr\,$^{-1}$\,Gpc$^{-3}$\ for isotropic radiation \citep{guetta06}. Collimated radiation with opening angles of 30--60$^\circ$, would boost up the event rates of SGRBs to our predicted global encounter rates. However, since the latter are likely to be rather conservative, smaller opening angles would still be possible. \subsection{Faster and ealier collapse} \label{sec:uncertainties} To infer the initial concentration of M15 and the corresponding time spent in the early and late phase of collapse, we used the results of a series of cluster models with single-mass stars \citep{quinlan96}. Several complicating factors in real clusters can change the rate of collapse \citep[see e.g.\ review by][]{chernoff93}: for example stellar evolution and especially the presence of primordial binaries slow down the collapse, while mass loss from a tidal boundary and in particular mass segregation accelerate the collapse. Even though the fraction of primordial binaries in GCs is believed to be only a few per cent \citep[e.g.][]{hurley07, davis08}, the binding energy of a single hard binary is sufficient to significantly slow down or perhaps even prevent core collapse \citep{hut92, heggie06}. On the other hand, because the relaxation time is inversely proportional to the mean stellar mass, the acceleration of core collapse due to mass segragation is roughly proportional to the increase in the mean mass, which in the core of M\,15 is about a factor $1.19/0.43 \sim 3$ \citep{dull97}. The detailed inclusion of these competing mechanism is beyond the scope of this paper, but to investigate the effect of a changing collapse rate on the encounter rate, we assume a 50\% faster core collapse (and expansion) than the fiducial M\,15 core evolution model. At the same time, this implies a lower initial concentration of $\cini \simeq 1.47$. The results of this ``faster collapse'' model on the encounter rate are shown with dashed curves in Figures~\ref{fig:m15_nuencevo}--\ref{fig:m15_nuencspace}, and~\ref{fig:m15_nuenccumul}. As can be seen from Figure~\ref{fig:m15_nuencspace}, the main effect is a lowering of the global encounter rate by about 24\% with respect to the fiducial model to $\Renc \simeq 41$\,yr\,$^{-1}$\,Gpc$^{-3}$\ at the peak, which remains around $z \sim 0.7$. We already mentioned in Section~\ref{sec:evolution} that after deepest core collapse the core can undergo fluctuations, either due to gravothermal oscillations \citep{bettwieser84,goodman87}, or as a result of the stochastic nature of binary burning \citep{gierszheggie2009}. In either case, these fluctuations can result in change in the core radius $r_\mathrm{c}$ by factors of a few, and even order of magnitude changes in the core density $\rho_\mathrm{c}$. We do not attempt to capture this fluctuating nature, but we consider here a possible consequence of it. Because the (central) relaxation time is inversely proportional to $\rho_\mathrm{c}$, only a relatively small fraction of the time during an oscillation will be spent at large $\rho_\mathrm{c}$ and correspondingly small $r_\mathrm{c}$. Still, if we happen to observe a post-collapse GC during this special time at the ``peak" of the fluctuation, we will underestimate the time elapsed since deepest collapse under our assumption of a steady expansion in our fiducial core evolution model. To investigate the possible effect on the encounter rate, we consider a shift the time of deep core collapse back in time by an amount of $2.6$\,Gyr, i.e., half of the duration of the late-phase collapse. As a consequence, the early-phase collapse is shorter by the same amount and the initial concentration increases to $\cini \simeq 1.79$. The smooth expansion model in this case predicts a current core radius and core density that are respectively almost 65\% larger and a factor 3 smaller than the observed values of $r_\mathrm{c} = 0.05$\,pc \citep{noyola06} and $\rho_\mathrm{c} = 6.5 \times 10^6$\,M$_\odot$\,pc$^{-3}$\ \citep{bosch06}. The results of this ``earlier collapse'' model on the encounter rate are indicated by the dash-dotted curves in Figures~\ref{fig:m15_nuencevo}--\ref{fig:m15_nuencspace}, and~\ref{fig:m15_nuenccumul}. Figure~\ref{fig:m15_nuencspace} shows that the global encounter rate is increased by about 14\% with respect to the fiducial model to $\Renc \simeq 62$\,yr\,$^{-1}$\,Gpc$^{-3}$\ at the peak, which occurs earlier, at $z \sim 1.0$. The latter is directly related to the applied shift in time, whereas the former is the result of more GCs reaching deep collapse in $\tage = 13$\,Gyr (see also Figure~\ref{fig:m15_trh_frac}). The above faster and earlier collapse models imply respectively a lower and higher initial concentration for M\,15 than used in the fiducial model. For each model, we then fixed the initial concentration of all GCs to that of M\,15 when computing the average encounter rate. It is already very difficult to infer the initial concentration for M\,15, let alone the distribution of initial concentrations for the population of old GCs. Even so, we have recomputed the average encounter rate adopting a Gaussian distribution in $\cini$, with a mean at the initial concentration of M\,15 and with dispersion in the range $[0,0.3]$. As a result of the dispersion, the rate of dissolution $\xides$ is not anymore fixed to that of M\,15, which in turn leads to an additional broadening in the half-mass relaxation distribution when converting from current $\trh$ to initial $\trh(0)$ values. However, since the resulting variations in the conversion are typically less than $\sim 0.1$\,Gyr, the effect on the average encounter rate is very small. Changing the mean, as we do in the faster and earlier collapse models, does have a significant effect on the average encounter rate. Nevertheless, we see from Figure~\ref{fig:m15_nuencspace} that the evolution of the global encounter rate is quite similar even for significant deviations from the fiducial mdoel. In other words, as long as M\,15 is indeed representative of an (old) GC that underwent core collapse and expansion, we expect our predictions for the encounter rates to be robust. \subsection{Binaries} \label{sec:compactbinaries} We have not included the potentially important effect of binaries in our encounter rate calculations. The fraction of primordial binaries in GCs is thought to be only a few per cent \citep{hurley07}, as confirmed by measuring photometric binaries beyond the half-light radius \citep[see e.g.][]{davis08}. In the core of GCs tidal captures can significantly increase the fraction of binaries. Placing useful observational constraints on the fraction of binaries in the core is not an easy task because many of them could be dark as a result of binary-single interactions in which the more numerous main sequence stars (MS) and white dwarfs (WD) are replaced by compact remnants. Binary systems have a larger cross section for intersection and they mass segregate faster into the denser core than individual stellar objects. Three-body interactions are thus indeed likely to create compact binaries in the cores of GCs, including double neutron star (NS-NS) and black hole--neutron star (BH-NS) systems. As already noted in the Introduction, the merger of such dynamically formed compact binaries is potentially an important channel for the production of SGRB progenitor systems \citep{grindlay06}, different from the close encounters between single compact stellar objects we have considered so far. As we discuss below in \ref{sec:dynamicalcompactbinaries}, for encounters involving the exchange of a main sequence star with a neutron star or a black hole, the resulting separation will be too large for coalescence to take place in less than a Hubble time. Thus this channel is unlikely to contribute to the overall formation rate of viable SGRB progenitors. The occurrence of a direct impact may release a substantial amount of energy, but whether this can lead to the conditions required for SGRB production remains to be evaluated. The three main ways to arrive at compact binaries are (i) primordial compact binaries, (ii) primoridal binaries of which the non-compact member(s) are replaced by compact objects via one (or two) exchange interactions, and (iii) the same as (ii) but starting from binaries which themselves are first formed through tidal capture of two single stellar objects. We consider all three ways below and show that in the cores of GCs, the latter dominates the formation rate of compact binaries, but is not, as commonly believed, dominant over the rate of close encounters between two single compact objects. While SGRBs are potentially created instanteneously when two compact objects closely encounter each other, the separation of the compact binary has to be small enough such that the time for the compact objects to merge, in addition to the formation time, is less than the Hubble time. Note that we expect very little contribution to the close encounter rate from interactions between a single compact object (NS or BH) and a compact object that is a member of a binary which also contains a extended object (MS or WD). Even though we show below that the fraction of the latter binaries can be(come) significant, in nearly all cases the (exchange) interaction will be with the extended object \citep[e.g.][]{sigurdsson93}. \subsubsection{Primordial compact binaries} \label{sec:primordialcompactbinaries} The formation of primordial binaries follows the cosmic star formation rate which peaks at high redshift $z\sim3$ \citep[e.g.][]{hopkins06}. Primordial binaries with two massive stars ($\gtrsim 8$\,M$_\odot$) that survive the mass loss and possible asymmetry of the supernova explosion of the secondary provide compact NS-NS and BH-NS binaries. They potentially could produce SGRBs at high redshift with a distribution extending to lower redshift depending on the range of initial binary separations and thus merger times. However, even if a non-neglible fraction of primordial massive binaries survive and result in close compact binaries \citep[$\sim 10^{-2.5}$,][]{narayan91}, massive binaries are only a very small fraction of all primordial binaries for a reasonable, not too top-heavy initial mass function ($\sim 10^{-4.5}$, for an IMF with a Salpeter power law slope from $100$\,M$_\odot$\ down to $0.3$\,M$_\odot$, and constant down to $0.1$\,M$_\odot$). In GCs with already at most a few per cent primordial binaries \citep[$\sim 10^{-1.5}$,][]{hurley07, davis08} and likely additional complications to form and retain compact binaries \citep[see also][]{ivanova08}, their possible contribution to producing SGRBs should be negligible; the GCs in the Milky Way contain a total of $\sim 10^{7.5}$ stars, which implies that we expect only $\sim 1$ primordial compact binary formed in the GCs of the Milky Way. However, in the ``field'' of a galaxy the primordial binary fraction is of order unity and the vast amount of them results in a number of compact binaries that is not anymore insignificant; we expect $\sim 10^5$ primordial compact binaries formed in the ``field" of the Milky Way \citep[see, e.g.][for initial estimates]{narayan91,phinney91}. Merger rate calculations for such primordial binaries in the galactic field have become increasingly sophisticated \citep{kalogera04,osh05,osh08,osh10}, and now consider not only various stellar evolutionary channels, but also different host galaxy types (spiral vs. elliptical). The results are still hindered by the fact that there is a small number of observed systems, giving broad estimates in the range 1--100\,yr\,$^{-1}$\,Gpc$^{-3}$. \subsubsection{Dynamically formed compact binaries} \label{sec:dynamicalcompactbinaries} Compact binaries formed dynamically in GCs have long delay times between compact object birth and binary formation as they have to wait for collapse of the core \citep{hopman06}. This helps their formation in three ways: (i) mass segregation increases the relative fraction of heavier-than-average binaries as well as compact remnants in the core, (ii) tidal capture increases the binary fraction in the core, in particular the ``hard'' binaries with small separations (whereas the ``soft'' binaries with large separations are being ``ionized''), (iii) the high core density allows three-body exchange interactions in which a member of an existing binary is replaced with a compact remnant. Henceforth, we expect in the cores of GCs the compact binary formation rate to follow the close encounter rate, with a similar peak around $z \simeq 0.7$ as shown in Figure~\ref{fig:m15_nuencspace}. Whereas after this initial delay the potential production of SGRBs is nearly instantaneous in the proposed encounter scenario, there is an additional delay in the binary merger scenario given by the rate of angular momentum losses through gravitational waves. For a NS-NS binary dynamically formed around $z\simeq 0.7$ to merge within the next $\simeq 6$~Gyr through the emission of gravitational waves, the binary separation required is $a \lesssim 3.93$\,R$_\odot$. As mentioned above, most of the compact binaries in GCs are expected to be formed through three-body exchange interactions: a NS (average mass $1.4$\,M$_\odot$, and average radius $10$\,km) replaces the less-massive main-sequence star (MS; $0.4$\,M$_\odot$, $3.3 \times 10^5$\,km) or white dwarf (WD; $0.5$\,M$_\odot$, $1.1 \times 10^4$\,km) member in an existing binary with a NS. The latter NS-MS or NS-WD binary might be itself the result of a previous similar three-body exchange interaction with a MS-MS, WD-MS or WD-WD binary. We assume as minimum separation of these binaries the closest distance before ignition of mass transfer occurs \citep{paczynski71}. This yields $a \gtrsim 1.72$\,R$_\odot$\ and $a \gtrsim 0.054$\,R$_\odot$\ for NS-MS and NS-WD binaries, respectively. We obtain similar lower limits of about $1.312$, $1.365$, and $0.043$ in units of R$_\odot$\ for MS-MS, WD-MS, and WD-WD binaries, respectively. Even though after each exchange the binary hardens, the binary separation after an exchange, $a_\mathrm{fin}$, is typically larger than the binary separation before the exchange, $a_\mathrm{ini}$. \cite{sigurdsson93} show that the distribution of $a_\mathrm{fin}/a_\mathrm{ini}$ strongly peaks around the mass ratio $m_f/m_e$, between the single stellar object from the ``field'' and the stellar object ``exchanged'' from the binary, but with a long tail towards lower values. Taking for $a_\mathrm{ini}$ the minimum separation of each binary, we compute the median of the latter distribution\footnote{We adopt an approaching velocity from infinity of $v_\infty = 2\sigma_\mathrm{c} \simeq 20$\,km\,s$^{-1}$, but the results are not sensitive to the precise value that is adopted.} to arrive at the average $a_\mathrm{fin}$ after the first exchange interaction, and repeat this for the MS-MS, WD-MS, and WD-WD binaries that require a second exchange interaction to arrive at a NS-NS binary. For all three binaries with a MS as a member the resulting \emph{minimum} binary separations ($5.21$, $12.8$ and $12.4$ in units of R$_\odot$\ for NS-MS, MS-MS, and WD-MS, respectively) are larger than the above \emph{maximum} binary separation for merger of the dynamically formed NS-NS binary ($3.93$\,R$_\odot$\ for merging within $\simeq 6$\,Gyr). Note that this is still the case even when we ignore the time to reach core collapse and allow the merger to take a full Hubble time, corresponding to a maximum binary separation of $4.8$\,R$_\odot$. On the other hand, a NS-WD binary results in a NS-NS binary with a minimum separation $a \gtrsim 0.126$\,R$_\odot$\ well below the maximum for merging. Similarly, a WD-WD binary yields a NS-WD binary with $a \gtrsim 0.102$\,R$_\odot$\ after the first exchange, and a NS-NS binary with $a \gtrsim 0.240$\,R$_\odot$\ after the second exchange. We assume that the formation rate of these binaries through three-body exchange interactions follows that of the collision rate, but with a different amplitude, which we estimate below. \subsubsection{Compact binaries from primordial binaries} \label{sec:fromprimordialbinaries} Here we estimate the rates at which compact binaries in the cores of GCs are dynamically formed from primordial binaries, of which one or both members are non-compact stellar objects. We take $f_\mathrm{pri} = 0.02$ for the total fraction of primordial binaries in GCs, which is not well known but thought to be at most a few per cent \citep[e.g.][]{hurley07, davis08}. To derive the fraction of primordial binaries that have a NS and a WD, we adopt an IMF with Salpeter power-law slope from $100$\,M$_\odot$\ down to $0.3$\,M$_\odot$, and constant down to $0.1$\,M$_\odot$. Supposing that stars above $20$\,M$_\odot$\ evolve into black holes (BHs), stars above $7$\,M$_\odot$\ turn into SNe, stars above $0.8$\,M$_\odot$\ end their life as WDs, and lower-mass stars are still in the MS, we find corresponding number fractions of about $0.0012$ $0.0043$ $0.1014$, and $0.8930$ for BH, NS, WD, and MS stars, respectively. While we start with drawing the masses of the primary and secondary, $M_1$ and $M_2$, indepdently from the IMF, we also consider the case that the mass ratio $M_2/M_1$ is distributed uniformly in the range from zero to unity. The former case is fully consistent with observed (spectroscopic) binaries with longer periods ($P>10^3$\,days), but there are indications of a flatter distribution in mass ratios for the shorter-period binaries, possibly because stars that form close to each other become closer in mass by reaching an equilibrium in mass transfer. We adopt a lognormal distribution in binary periods $P$ (in days) with mean $\mu_{\log P} = 4.8$ and standard deviation $\sigma_{\log P}=2.3$ \citep{duquennoy91}. We first consider the formation of NS-NS binaries from primoridal NS-WD binaries through a single exchange interaction. With the masses of the primary and secondary independently drawn from the IMF, we expect a fraction $f_\mathrm{NS-WD,pri} \simeq 8.81 \times 10^{-4}$ of all primordial binaries to be a NS-WD binary. Together with the stars and compact remnants that are heavier than the MS stars, the binary sinks to the core of a GC. During deep core collapse the NS-WD undergoes a three-body exchange interaction with a NS for which we adopt the number fraction $f_\mathrm{NS} \simeq 0.547$ of M\,15 \citep{dull97}. We only count the NS-WD binaries with separations in the range $[0.054,1.669]$\,R$_\odot$. The lower limit is set to avoid stable mass transfer, while the upper limit increases after the exchange interaction to the maximum separation of $3.93$\,R$_\odot$\ allowed for merger of the dynamically formed NS-NS binary within $\simeq 6$\,Gyr. Given the above lognormal distribution in binary periods, the fraction of NS-WD binaries in this range of separations is $f_\mathrm{sep} \simeq 1.83 \times 10^{-2}$. The corresponding mean separation is $\overline{a} \simeq 0.829$\,R$_\odot$. We now obtain the rate from eq.~(\ref{eq:nucoll}) with $\Rmin = \overline{a} + r_\mathrm{NS} \simeq 0.829$\,R$_\odot$, $f_1 = f_\mathrm{pri} \, f_\mathrm{NS-WD,pri} \, f_\mathrm{sep} \simeq 3.23 \times 10^{-7}$, $f_2 = f_\mathrm{NS} \simeq 0.547$, and $m_1 + m_2 \simeq 2 \times 1.4 + 0.5 = 3.3$\,M$_\odot$. In this way, we predict that the rate $\Rdyn$ to turn a primordial NS-WD binary dynamically into a NS-NS binary that can merge within the next $\simeq 6$\,Gyr, is a factor $\Rdyn/\Rcol \simeq 4.02 \times 10^{-2}$ smaller than the collision rate, and a factor $\Rdyn/\Rtid \simeq 0.618 \times 10^{-2}$ smaller than the tidal capture rate. We now turn to the additional dynamical formation of a NS-NS binary from a primoridial WD-WD binary through two subsequent three-body exchange interactions with a NS. We assume that the first exchange to form the NS-WD binary happens some time before deepest core collapse, with a formation rate that can be computed in the same way as above. The fraction of primodial binaries that are WD-WD binaries is about $f_\mathrm{WD-WD,pri} \simeq 1.03 \times 10^{-2}$. We count the WD-WD binaries with separations in the range $[0.043,0.708]$\,R$_\odot$, with the lower limit again to avoid stable mass transfer, while the upper limit increases after two subsequent exchange interactions to the maximum separation of $3.93$\,R$_\odot$\ allowed for merger of the dynamically formed NS-NS binary within $\simeq 6$\,Gyr. This range in separation corresponds to a fraction $f_\mathrm{sep} \simeq 1.10 \times 10^{-2}$ and mean separation $\overline{a} \simeq 0.371$\,R$_\odot$. We then use as before eq.~(\ref{eq:nucoll}) with $\Rmin \simeq 0.371$\,R$_\odot$, while we set $f_1 = f_\mathrm{pri} \, f_\mathrm{NS-WD} \, f_\mathrm{sep} \simeq 2.27 \times 10^{-6}$, and $m_1 + m_2 \simeq 1.4 + 2 \times 0.5 = 2.4$\,M$_\odot$. As in Section~\ref{sec:evolution}, we assume $f_2 = f_\mathrm{NS}(t)$ to vary from its primordial value to $0.547$ during the early-phase collapse, and then to remain the same during the late-phase collapse. We intergrate the resulting rate of exchange interactions between a primordial WD-WD binary and single NS over the full duration of the core collapse phase. This yields the expected number ($\simeq 4.24 \times 10^{-4}$) of dynamically formed NS-WD binaries with separations within the above range. We then divide this number by the number of stellar systems ($\simeq 2.86 \times 10^3$) in the core $N_c = (4\pi/3) n_c r_c^3$ at deepest core collapse to arrive at a fraction $f_\mathrm{NS-WD,dyn} \simeq 1.48 \times 10^{-7}$. Next, we repeat the rate computation but now for dynamically forming a NS-NS binary in a second exchange interaction with $f_1 = f_\mathrm{NS-WD,dyn}$, $f_2 = f_\mathrm{NS} \simeq 0.547$, $m_1 + m_2 \simeq 2.4$\,M$_\odot$, and $\Rmin = 0.873$\,R$_\odot$. The latter value arises from the above mean separation $\overline{a} \simeq 0.371$\,R$_\odot$\ after the first exchange interaction, multiplied by the (median) increase after the second exchange interaction. In this way, we find that the rate to dynamically form a NS-NS binary from a primordial WD-WD binary through two subsequent three-body exchange interactions is smaller than the collision rate by a factor $\Rdyn/\Rcol \simeq 1.94 \times 10^{-2}$, and smaller than the tidal capture rate by a factor $\Rdyn/\Rtid \simeq 0.299 \times 10^{-2}$. By combining the above two formation channels, we thus expect that the rate to dynamically turn primordial binaries into compact NS-NS binaries that can merge in time is smaller than the tidal capture rate by a factor $\simeq 0.916 \times 10^{-2}$. In case that the mass ratio $M_2/M_1$ of the primordial binary is uniformly distributed, the fraction of primordial NS-WD and WD-WD binaries increases to $f_\mathrm{NS-WD,pri} \simeq 4.76 \times 10^{-3}$ and $f_\mathrm{WD-WD,pri} \simeq 7.18 \times 10^{-2}$, i.e., a factor $\simeq 5.41$ and $\simeq 6.98$ higher than when $M_1$ and $M_2$ are independently drawn. As a result, the rate to form compact NS-NS binaries increases by a factor $\simeq 5.92$, but is still only a fraction $\simeq 5.43 \times 10^{-2}$ of the tidal capture rate. A similar calculation for compact BH-NS binaries dynamically formed from primordial binaries shows that also in this case the minimum separations of primordial binaries with a MS star as a member are larger than the maximum separation of $\lesssim 6.34$\,R$_\odot$\ for which the BH-NS can merge within $\simeq 6$\,Gyr. However, BH-NS binaries can form dynamically from primordial NS-WD or BH-WD binaries via a single exchange interaction with respectively a BH or NS, or from primoridal WD-WD binaries via two subsequent exchanges interaction with both a NS and a BH. We compute the rate for each of these formation channels in the same way as above. We assume that the fraction of BHs relative to the (changing) fraction of NSs remains equal to the primoridal fraction, i.e., $f_\mathrm{BH}/f_\mathrm{NS} \simeq 0.283$ throughout. If instead of chosing the masses of the primary $M_1$ and secondary $M_2$ of the primordial binary indepedently, we assume that the mass ratio $M_2/M_1$ is uniformly distributed, the fraction of primordial NS-WD, BH-WD and WD-WD binaries increases by a factor $5.41$, $1.73$, and $6.98$, respectively. At the end, the combination of the three formation channels yields a formation rate of compact BH-NS binaries is smaller than the tidal capture rate between a single BH and NS by a factor $\simeq 0.593 \times 10^{-2}$ if $M_1$ and $M_2$ are independently drawn, and a factor $\simeq 1.88 \times 10^{-2}$ when $M_2/M_1$ is uniformly distributed. \subsubsection{Compact binaries from tidally captured binaries} \label{sec:fromtidallycapturedbinaries} It is clear from the above calculations that in the cores of GCs close encounters between single compact objects are significantly more likely than mergers of compact binaries that are dynamically formed from primordial binaries. When starting instead from binaries that are formed through tidal captures the chance of dynamically forming and merging a compact binary can be much higher as we show below. As before binaries with a MS as a member are omitted, because they result after exchange interaction(s) in compact binaries with minimum separations that are larger than the maximum separation allowed for them to merge in time. We first consider the formation of NS-NS binaries from tidally captured NS-WD binaries. To compute the fraction of tidally captured NS-WD binaries, we integrate the tidal capture rate between a NS and WD over time until deepest core collapse, and then divide by the number of stellar systems in the core. The tidal capture rate is given by eq.~(\ref{eq:nucoll}) after substitution of $\Rmin$ from eq.~(\ref{eq:Rmintid}). We assume $f_1 = f_\mathrm{NS}(t)$ and $f_2 = f_\mathrm{WD}(t)$ to vary during the early-phase collapse from their primordial values to respectively $0.547$ and $0.218$, and then to remain the same during the late-phase collapse. This yields the expected number ($\simeq 8.86$) of tidally captured NS-WD binaries with (pericenter) seperations up to $0.109$\,R$_\odot$. This is well below the upper limit of $1.669$\,R$_\odot$\ that increases after the exchange interaction to the maximum separation of $3.93$\,R$_\odot$\ allowed for merger of the dynamically formed NS-NS binary within $\simeq 6$\,Gyr. The lower limit to avoid stable mass transfer is $0.054$\,R$_\odot$, and gives rise to a fraction $f_\mathrm{sep} \simeq 0.511$ of tidally captured NS-WD binaries with separations in this allowed range, and with corresponding mean separation of $\overline{a} \simeq 0.093$\,R$_\odot$. Finally, we divide by the $\simeq 2.86 \times 10^3$ stellar systems in the core to arrive at a fraction $f_\mathrm{NS-WD,tid} \simeq 1.72 \times 10^{-3}$ of tidally captured NS-WD binaries. The calculation for the subsequent exchange reaction with a NS, is the same as above in Section~\ref{sec:fromprimordialbinaries} for the formation of NS-NS binaries from primordial NS-WD binaries, except that in eq.~(\ref{eq:nucoll}) we now have $f_1 = f_\mathrm{NS-WD,tid}$. In this way, we estimate that the rate $\Rdyn$ to turn a tidally captured NS-WD binary dynamically into a NS-NS binary that can merge within the next $\simeq 6$\,Gyr is a factor $\Rdyn/\Rtid \simeq 3.70$ larger than the tidal capture rate. We now turn to the additional dynamical formation of a NS-NS binary from a tidally captured WD-WD binary through two subsequent three-body exchange interactions with a NS. Since the tidal capture rates between two WDs and between a WD and a NS are similar (Table~\ref{tab:rates}), we expect the rate to be significantly less than just estimated for NS-NS binary formation from a tidally captured NS-WD binary, which only needs one subsequent exchange interaction. We assume that both the tidal capture of the two WDs and the first subsequent exchange happen before deepest core collapse. This means we first calculate the fraction of WD-WD binaries formed through tidal capture as function of time, i.e., $f_\mathrm{WD-WD,tid}(t)$. The calculation is the same as for $f_\mathrm{NS-WD,tid}$ above, but with $f_1 = f_2 = f_\mathrm{WD}(t)$, and we integrate the tidal capture rate up to time $t$, followed by division with the number of stellar systems in the core $N_c(t) = (4\pi/3) n_c(t) r_c(t)^3$ at that time. To have the WD-WD binary (pericenter) seperations up to $0.092$\,R$_\odot$\ within the allowed range of $[0.043,0.708]$\,R$_\odot$, implies $f_\mathrm{sep} \simeq 0.530$ and a mean separation of $\overline{a} \simeq 0.072$\,R$_\odot$. Next, we substitute $\Rmin = \overline{a} + r_\mathrm{NS} \simeq 0.072$\,R$_\odot$, $f_1 = f_\mathrm{WD-WD,tid}(t)$, $f_2 = f_\mathrm{NS}(t)$ and $m_1 + m_2 \simeq 2 \times 0.5 + 1.4 = 2.4$\,M$_\odot$\ in eq.~(\ref{eq:nucoll}). Integrating the resulting rate over the full duration of the core collapse phase, yields the expected number ($\simeq 4.08 \times 10^{-3}$) of dynamically formed NS-WD binaries from tidally captured WD-WD binaries. We then divide by the $\simeq 2.86 \times 10^3$ stellar systems in the core at deepest collapse, to arrive at a fraction $f_\mathrm{NS-WD,dyn} \simeq 1.43 \times 10^{-6}$. As above in Section~\ref{sec:fromprimordialbinaries}, we repeat the rate calculation for dynamically forming a NS-NS binary in a second exchange interaction with a NS. In this way, we find that the rate to form a NS-NS binary dynamically from a tidally captured WD-WD binary through two subsequent three-body exchange interactions is smaller than the tidal capture rate by a factor $\Rdyn/\Rtid \simeq 0.555 \times 10^{-2}$. By combining the above two formation channels, we thus expect that the rate to dynamically turn tidally captured binaries into compact NS-NS binaries that can merge in time is larger than the tidal capture rate by a factor $\simeq 3.71$. As expected, this factor is mainly the result of the first formation channel, of which the rate is much larger, by a factor $\simeq 667$, than the second formation channel. Next, we estimate in a similar way also the rates to dynamically form BH-NS binaries from tidally captured NS-WD or BH-WD binaries via a single exchange interaction with respectively a BH or NS, or from tidally captured WD-WD binaries via two subsequent exchanges interaction with both a NS and a BH. From the three formation channels together, we predict a formation rate of BH-NS binaries that is larger than the tidal capture rate between a single BH and NS by a factor $\simeq 8.42$. Again, the contribution from the third formation channel that involves two subsequent exchange interactions is much smaller, by factors $\simeq 72.8$ and $\simeq 371$, than the first and second formation channel. \subsubsection{Merger versus encounter rate} \label{sec:mergervsencounter} The above calculations provide an estimate of the relative frequencies of (binary) mergers and close encounters between two compact objects in the cores of GCs. Combining the results from Sections~\ref{sec:primordialcompactbinaries} -- \ref{sec:fromtidallycapturedbinaries}, we estimate for a compact NS-NS (BH-NS) binary a higher merger than tidal capture rate by a factor $\simeq 3.7$ ($\simeq 8.4$). These estimates can be further improved by allowing a range in WD and MS star masses (instead of adopting mean masses of $0.5$\,M$_\odot$\ and $0.4$\,M$_\odot$, respectively), by sampling the full distribution in the ratio of binary separations before and after a three-body exchange interaction (instead of taking the median value of $a_\mathrm{fin}/a_\mathrm{ini}$), and by including the formation and subsequent merger of compact binaries throughout the lifetime of a GC instead of concentrating on deepest core collapse with a peak around $z \simeq 0.7$). Also the binaries will interact among themselves, so that four-body interactions could become important, especially since the number of tidally captured binaries is increasing; when for all ten possible binary pairings from a BH, NS, WD and MS, we integrate the corresponding tidal capture rate up to the time of deepest collapse, we find a total of $\simeq 207$ dynamically formed binaries. Given the $\simeq 2.86 \times 10^3$ stellar systems in the core, we obtain a binary fraction of $\simeq 7.23$\,\% compared to a primordial binary fraction of a few per cent. These and other improvements are clearly interesting but beyond the scope of this paper. From the above estimates, we can conclude that in the cores of GCs the rate of close encounters between two single compact objects can become similar to the rate of merging of two compact objects in a binary. The latter compact binaries most likely are formed through a three-body exchange interaction with a binary which itself originated from a tidal capture between a compact and non-compact (WD) stellar object (Section~\ref{sec:fromtidallycapturedbinaries}). The alternative dynamical formation of compact binaries from primordial binaries (Section~\ref{sec:fromprimordialbinaries}) is less likely by about two orders of magnitudes. A main reason is that the fraction of primordial binaries in GCs is at most a few per cent, which in turn also implies that the number of primordial compact binaries in GCs is negligble, while they are thought to be the dominant source of SGRBs in the ``field'' of a galaxy (Section~\ref{sec:primordialcompactbinaries}). While SGRBs are commonly assumed to orginated from compact binary mergers, we thus find that close encounters between two compact objects in the core of GCs can also provide a significant contribution. \section{Discussion}\label{sec:disc} Dense stellar systems in some ways are like an ecological network, where feedback is extremely important and apparently isolated events can have far-reaching consequences. In a globular cluster, as we discussed, dynamical interactions between passing stars can form new binaries and modify the properties and even the membership of existing binaries. Motivated by this, we have investigated the production of compact binaries via two and three body encounters. We find that event rates within globular clusters are expected to be significant, and can become similar to the overall production of merging compact binaries. This hints at the underlying possibility that SGRB progenitors may not be entirely restricted to the most widely favored scenario involving the merger of compact binaries in the field. Much of our effort in this section will therefore be dedicated to determining what are the expected characteristics of SGRBs arising from these encounters and how do they compare to recent observational constraints. \subsection{Prospects for the production of SGRBs}\label{sec:SGRBs} It is clear from the calculations presented in Section~\ref{sec:hydro} that the formation of an accretion disk around the primary is a robust result, regardless of the initial orbital parameters (as long as disruption occurs, of course). The particulars of each case, however, are variable in several aspects, and can lead to interesting diversity. First, and most importantly, the resulting disk mass is not always equal. This is crucial in terms of the available energy in the system, as it sets the overall scale for an accretion powered event that could produce a SGRB. We find that even for the relatively small variation in angular momentum (or equivalently, impact parameter) for black hole--neutron star encounters the resulting disk mass, $M_{\rm disk}$, varies almost by a factor of three. We have previously estimated \citep{lrrg05,lrr07} that the energy that can be potentially extracted from such disks to produce a GRB scales as $M_{\rm disk}^{2}$ for neutrino powered events, and as $M_{\rm disk}$ for magnetically dominated bursts. The range in mass thus possibly spans an order of magnitude when converted to total energy release. Second, the nature of the primary itself can produce a different outcome. The ``cleanest'' scenario involving a black hole leads to accretion in the hypercritical regime discussed above. However, for double neutron star collisions, a range of possibilities remains. For the one calculation of this type we have performed, at the end of the simulation the central core is rapidly rotating and is surrounded by a massive envelope. If the core can avoid collapse, it is possible that the rapid rotation will wind up the magnetic field to large magnetar-like values \citep{price06} and allow for repeated episodes of energy release \citep{usov92,kluzniak98}. Third, the presence of large tidal tails in which material will fall back onto the central object at a later time is a generic feature of the present set of calculations. The mass involved in these structures is considerably larger than for binary mergers for the reasons already mentioned in Section~\ref{sec:tails}. As the impact parameter in the black hole neutron star collisions increases, the mass in the tail can become even larger than that in the disk. Thus the properties of the fall back material will dominate the behavior at late times \citep{lrrlc09}. \begin{figure} \begin{center} \includegraphics[width=0.4\textwidth]{fig17a.eps} \hfill \includegraphics[width=0.4\textwidth]{fig17b.eps} \hfill \includegraphics[width=0.4\textwidth]{fig17c.eps} \end{center} \caption{Differential distribution of mass with specific energy for the fluid at the end of runs L$_{0}$, L$_{0}\Gamma_{5/3}$, L$_{1}$, L$_{2}$ and L$_{2}\Gamma_{5/3}$. The thick (thin) lines are for $\Gamma=2$ ($\Gamma=5/3$). The vertical line marks the morphological division between matter that lies in the torus around the black hole and that contained in the tidal tails. A fraction of the latter has positive energy and will escape the system.} \label{fig:dmdeL} \end{figure} The differential energy distribution in the tails is shown in Figure~\ref{fig:dmdeL}. Material with negative energy is either in the torus surrounding the black hole, or in a portion of the tail that is bound to it. Fluid with positive energy will eventually leave the system. The thick black vertical line in each plot separates the torus and the tails morphologically, making it clear that a substantial portion of the tails is bound and will fall back. In the simple analytical estimates performed initially for stellar interactions with supermassive black holes, the distribution of mass with specific energy of the material from the disrupted star was constant, thus giving rise to a fall back accretion rate $\dot{M}_{\rm fb} \propto t^{-5/3}$, computed assuming ballistic trajectories \citep{rees88}. We have computed the corresponding accretion rate here as well, also by assuming that the fluid in the tails is on ballistic trajectories in the potential well of the central mass (allowing us to follow it for a much longer time). The more complex interaction we have outlined for the case of comparable masses produces a different decay law, closer to $\dot{M}_{\rm fb} \propto t^{-4/3}$, see Figure~\ref{fig:mdotfall}. There is also variability on shorter time scales superposed on this decay, due to inhomogeneities in the tidal tails. Characterizing this requires a full hydrodynamical and thermodynamical treatment of the motion in the tails at late times \citep{rosswog07,lrrlc09,metzger09}. \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=1.]{fig18.eps} \caption{The fall back accretion rate onto the central object from tidal tails (in solar masses per second) is shown for runs L$_{0}$, L$_{0}\Gamma_{5/3}$, L$_{1}$, L$_{2}$ and L$_{2}\Gamma_{5/3}$, along with a reference power law with decay index $-4/3$. The thick (thin) lines are for $\Gamma=2$ ($\Gamma=5/3$).} \label{fig:mdotfall} \end{figure} Essentially, the bulk of the material in the tail will return to the vicinity of the compact object within a few seconds. As it has finite angular momentum, it will not directly impact the primary but describe an accentric orbit around it. Moreover, since it is not composed of test particles but is a fluid stream, dissipation close to periastron will circularize the orbit at a radius roughly equal to the distance from which it was ejected in the first place. If angular momentum transport processes are present and the gas is able to cool with some efficiency, it will then form an accretion disk and feed the primary with mass and energy. The viscous time scale will be much longer than the dynamical re-injection time scale, and thus for transport purposes it will be as if a disk had been formed practically impulsively around the black hole (or neutron star), subsequently evolving on a secular time scale. It is thus possible, in principle, to account generically in this scenario for a prompt episode of energy release as well as for activity many dynamical time scales later. Finally, we have shown also that the nature of the secondary (neutron star vs. white dwarf) will lead to a substantially different final configuration. In the white dwarf case the accretion disk is much larger, and thus the densities are substantially lower (the total mass being comparable). The key question in this case is whether the gas can find an efficient mechanism to cool, and thus accrete. Otherwise the material will only be blown to large radii and not release enough gravitational binding energy to account for the energy budget of a GRB. \subsection{Time delay} \label{sec:timedelay} The observed event rate of SGRBs as function of redshift can provide constraints on a time delay between the formation of the progenitors and the explosion \citep{guetta05, guetta06, nakar06, bloomprochaska06}. SGRBs exhibit great diversity in terms of their host galaxies, and a cursory comparison of the redshift distribution of SGRBs with the universal star formation rate (SFR) reveals what appears to be a significant time delay of a few Gyr. A large progenitor lifetime would help explain the apparent high incidence of galaxy cluster membership \citep{Ped05,bloom06,berger07b}, while on the other hand, shorter lifetimes are required to explain the population of SGRBs at moderately high redshift \citep{berger07a,graham09}. Short delays have been pointed out as a possibility for merging binaries directly from populations synthesis calculations \citep{belczynski02,belczynski06,osh08}, and could be responsible for a significant fraction of the observed event rate. An alternative approach for constraining the distribution of time delays may be to use the event rates of SGRBs in different types of galaxies \citep{gal-yam08,zheng07}. On average, early-type galaxies have their stars formed earlier than late-type galaxies, and this difference, together with the time delay between progenitor formation and SGRB outburst, inevitably leads to different burst rates in the two types of galaxies. For instance, the morphological types for SGRBs reflect a higher incidence of early-type galaxies than Type Ia supernovae and this would suggest associated progenitor lifetimes significantly exceeding a few Gyrs \citep{zheng07}. Making more quantitative statements about the ages of the progenitor systems is not only hampered by small number statistics but also from the lack of robust predictions of the distribution of merger sites as a function of time. \begin{figure} \includegraphics[width=\columnwidth,angle=0,scale=1.]{fig19.ps} \caption{The predicted cumulative redshift distribution of SGRBs under the assumption that they are produced by tidal capture of two neutron stars in the cores of globular clusters. The meaning of different line styles is the same as in Fig.\ref{fig:m15_nuencevo}.} \label{fig:m15_nuenccumul} \end{figure} Observational evidence for a significant time delay between tracers of star formation and SGRB outburst excludes that the production of SGRBs is exclusively related to the short time-scale for evolution and death of massive stars, which are believed to be the progenitors for long GRBs. However, significant time lags between the cosmic star formation rate and the SGRB redshift distribution occur naturally if the progenitor is a compact binary in the field that merges \citep{cenko08,salvaterra08,hopman06}, as well as in the case of a close encounter between two compact stellar objects in a dense stellar environment. For a merger, the time delay reflects the time for the two compact objects to merge by emission of gravitational waves, while for close encounters a significant rate is only achieved when the stellar density rises significantly through the core collapse of a stellar cluster. Figure~\ref{fig:m15_nuenccumul} shows our predicted cumulative redshift distribution for the progenitors of SGRBs assuming they are produced by tidal capture of two neutron stars in GCs. Further redshift determinations are required to help differentiate between various ways of forming a short GRB, although, as we argue next in Section~\ref{sec:galacticenvironment}, detailed observations of the astrophysics of individual GRB host galaxies may be essential before stringent constraints on the lifetime of short GRB progenitors can be placed. \subsection{Galactic environment} \label{sec:galacticenvironment} The observed offsets from what has been argued are the plausible hosts, if true, also holds important ramifications for the sort of viable progenitors \citep[see][]{belczynski06,zemp09}. Very large offsets seen from early-type hosts would seem to be at odds with progenitor systems with small systematic kicks such as those occurring in globular clusters, although with such large physical offsets the possibility remains that the association with the putative host is coincidental. On the other hand, based on the small offsets from some low-mass galaxy hosts \citep{prochaska06,soderberg06,berger09,bloomprochaska06}, SGRB progenitors cannot all have large systematic kicks at birth and inherently large delay times from formation. Compact binaries in the field are expected to experience a kick, leading to mergers away from their point of origin. The displacement depends on the distribution of kick velocities, merger times, and host masses, with predicted values in the range 10--100\,kpc \citep{fryer99, belczynski06,zemp09}, depending on the formation channel, host galaxy type and mass. By contrast, GCs are expected to show on average moderate displacements: the spatial distribution of GCs peaks around the half-light radius ---typically a few kpc--- of the host galaxy. What is more, due to the large red giant star density in the GC core, there is the possibility for the interaction of the external shock with a denser external medium than that of the IGM \citep{prrl09}. If a significant fraction of SGRBs is indeed produced in GCs we also expect to see a strong preference for this scenario occurring in luminous host galaxies, because the number of GCs increases steeply with the host galaxy luminosity as $\propto L_V^{>1.5}$ \citep{mclaughlin99}, whereas the number of primordial binaries increases only as $\propto L_V$. Red, early-type galaxies are on average more luminous than blue, late-type galaxies, which would lead to a higher incidence of this kind of SGRB in ellipticals when compared to spiral galaxies. The current limited sample indicates that, if unidentified hosts are in fact ellipticals, SGRBs would be approximately evenly distributed between early and late-type hosts \citep{berger09}. However, the morphology of galaxies changes with time due to internal (secular) evolution, and in particular when they merge. As a result, the fraction of late-type to early-type galaxies increases toward higher redshift, and hence SGRBs in GCs might occur also more frequently in spiral galaxies. In other words, if (old) GCs are formed in the highest density peaks before reionization \citep[e.g.][]{moore06}, and subsequently were accreted in galaxies through mergers, SGRBs or any other transient connected with GCs would provide a unique tracer of the hierchical build-up of galaxies. This look-back on galaxy evolution is of course limited by the observability of such transients, both due to their intrinsic brightness and the duration of the event, as well as due to the time delay discussed in Section~\ref{sec:timedelay}. The latter time delay is probably also what limits the possible contribution from intermediate-age GCs which are thought to form in gas-rich galaxy mergers. The formation time of these intermediate-age GCs plus the time for their cores to collapse will significantly shift the potential production of SGRBs towards lower redshift with respect to old GCs. Up to this point, we have argued for the production of SGRBs in GCs as oppposed to originating from primordial compact binaries within the host galaxy field, but we have not yet distinguised between mergers or close encounters between compact stellar objects in GCs. Like intermediate-age versus old GCs, we also expect the redshift distribution of SGRBs in the merger scenario to be skewed toward lower redshift than in the encounter scenario. From the global encounter rate in Figure~\ref{fig:m15_nuencspace}, we expect a significant SGRB event rate only below $z \sim 1$. This is still consistent with SGRBs found recently around $z \sim 1$ \citep{berger07a}, but is very challenging or perhaps impossible in case of the additional time delay in case of merging compact binaries. \subsection{Nuclear clusters} \label{sec:nuclearclusters} The high stellar densities in which both close encounters and three-body interactions become significant may, besides in the collapsed cores of GCs, also be reached in the nuclei of galaxies. Of particular interest are the nuclear (stellar) clusters (NCs) as they might also experience core collapse. NCs have been found in 50\%--75\% of both late-type and early-type galaxies, but they are absent in the elliptical galaxies brighter than absolute magnitude $M_B \sim -19$ (although this could in part be an observational bias due to the presence of steep central cusps), and the frequency of nucleation also falls to zero in dwarf galaxies fainter than $M_B \sim -12$ \citep[see the review by][]{boker08}. NCs have typical half-light radii \citep[$\sim 3.5$\,pc;][]{boker02} similar to GCs, but their typical masses \citep[$\sim 3 \times 10^6$\,M$_\odot$;][]{walcher05}, and hence their average density is also higher than that of GCs by the same factor. On the other hand, the half-mass relaxation time of NCs is about an order of magnitude longer than those in GCs, both due to their higher typical mass as well as due to the higher velocity dispersion of the surrounding galaxy. The latter results in a flow of energy from the galaxy to the NC, which opposes core collapse and might even lead to core expansion \citep{dokuchaev85, kandrup90}. The result is a minimum compactness of NCs in order to resist expansion \citep{quinlan96, merritt09}, which is close to their observed sizes. This, together with their longer half-mass relaxation times when compared with GCs, argues against NCs experiencing core collapse and thus providing a fertile ground for the production of SGRBs via close encounters between compact stellar objects. In addition, the presence of a possible (super or intermediate) massive black halo in NCs \citep{seth08} inhibts core collapse, and NCs with black holes always expand \citep{merritt09}. Still, it is possible that NCs were formed with a high initial concentration (and without an as yet significantly massive black hole) at the highest density peaks so that their cores collapsed early on. Later on the core and NC as a whole expanded, partly at least due to growing heat input from the surrounding galaxy of which the velocity dispersion increases as a result of its (secular and/or hierarchical) evolution. An additional and likely even stronger expansion of a NC is expected in case the host (dwarf) galaxy is stripped away when it is accreted by a larger galaxy; a formation history that is believed to be applicable also to the most massive GCs in the Milky \citep[e.g][]{georgiev09}. In both cases, the currently observed larger size and longer half-mass relaxation time would not be an indication of a possible core collapse in the past. If NCs indeed experienced such a core collapse significantly earlier than GCs, we expect from comparing the earlier collapse and fiducial model in Figure~\ref{fig:m15_nuencspace} that the SGRBs potentially produced through close encounters of compact objects in NCs will be skewed to higher redshift $z>0.7$. At the same time, we expect the SGRB to be located in the center of typically a faint (nucleated) dwarf galaxy. Also, the multiple stellar populations observed in local nuclear clusters \citep[e.g.][]{walcher06} very likely imply on-going star formation at higher redshift. This is all in line with the recently observed high-redshift SGRBs and their host galaxy properties \citep{berger09}, but clearly requires further study of NCs and in particular evidence for past core collapse. We finally note that even though the presence of a central black hole inhibits core collapse and hence prevents short (two-body) relaxation times, secular resonant relaxation can operate on a much smaller time scale. This alternative relaxation process can increase the ellipticity of stellar objects on (near) Keplerian orbits, and hence not only bring them closer to the black hole \citep[e.g.][]{rauch96}, but also to each other. This increases again the change of close encounters, and, if it involves two compact objects, the potential to produce SGRBs in the vicinity of massive black holes. \section{Summary and conclusions}\label{sec:ccl} It is evident from the work presented here that tidal capture in close encounters and collisions between compact stellar objects in the cores of GCs can provide a viable channel for the production of SGRBs at rates which are significant when compared to those from the mergers of primordial compact binaries, in both GCs and the field. For GCs with small primordial binary populations, we have argued that the formation of coalescing compact binaries which are capable of powering a SGRB is dominated by dynamical exchanges of NSs (BHs) with tidally formed WD-NS(BH) binaries and by the close encounters of NSs with NS/BHs. Through a realistic though conservative calculation, we predict an event rate for the tidal capture and collision of two neutron stars in the collapsed core of a GC that steeply increases to $\sim 50$\,yr\,$^{-1}$\,Gpc$^{-3}$\ around $z \sim 0.7$, and is followed by a gradual decline to $\sim 30$\,yr\,$^{-1}$\,Gpc$^{-3}$\ at $z = 0$. This is consistent with the currently observed event rate and redshift distribution of SGRBs. Furthermore, since the number of GCs both steeply increases with galaxy luminosity and peaks at the half-light radius of the host galaxy, we expect SGRBs to appear in both late and early-type galaxies and to be displaced from the galaxy center, in line with the cursory identification of and location in host galaxies. Using detailed hydrodynamics simulations we have explored the collision of NSs with WD/NS/BHs for a range of system parameters, to complement previous knowledge on binary mergers. We have shown that close encounters involving neutron stars and/or black holes can account for both a prompt episode of energy release, as well as for activity many dynamical times later through a accretion disk formed from the re-infall of disrupted material expelled in a bound tail. Relatively small changes in the impact parameter of the close encounter, as well as the nature of the secondary clearly result in a significant diversity in the final configuration. The significant amount of material that is dynamically ejected comes from the multiple periastron passages experienced before full tidal disruption takes place, each one carrying a distinctive signature of its thermodynamical history. Thus, while it could contribute significantly to the observed abundance of r-process elements \citep{ls74,ls76,symbalisty82,frei99}, not all of it need do so in principle. \acknowledgments We thank J. Bloom, C. Fryer, J. Guillochon, D. Heggie, P. Hut, V. Kalogera, R. O'Shaughnessy, X. Prochaska, F. Rasio and S. Rosswog for useful discussions and comments on the draft. We thank both referees for constructive comments on this work, which helped improve the final version. Part of this work was carried out during visits to the Institute for Advanced Study in Princeton, the University of California in Santa Cruz, and the Instituto de Astronom\'{\i}a at UNAM, whose hospitatlity is gratefully acknowledged. This work was supported in part by CONACyT-83254 and DGAPA-UNAM-IN-113007 (WL), NASA NNX08AN88G and the David and Lucile Packard Foundation (ER), UCMEXUS (ER and WL) and NASA through Hubble Fellowship grant HST-HF-01202.01-A, awarded by the Space Telescope Science Institute, which is operated by the Association of Universities for Research in Astronomy, Inc., for NASA, under contract NAS 5-26555 (GvdV).
1,108,101,565,909
arxiv
\section{Introduction} \label{intro} The study of the star formation process and the origin of stellar initial mass function (IMF), defined as the distribution of stellar masses at the time of birth, are key issues in astrophysics. Since majority of stars tend to form in clusters or groups, young star clusters are considered to be the fundamental units of star formation (Lada \& Lada 2003). Young star clusters are useful tool to study the IMF as they contain statistically significant number of young stars of rather similar age spanning a wide range of masses. Since these objects are not affected by the dynamical evolution as the ages of these objects are significantly less in comparison to their dynamical evolution time, the present day mass function (MF) of these objects can be considered as the IMF. However, a recent study by Kroupa (2008) argues that even in the youngest clusters, it is difficult to trace the IMF, as clusters evolve rapidly and therefore eject a fraction of their members even at a very young age. In the last decade, there have been a large number of studies in great detail in several young clusters within 2 kpc of the Sun investigating these issues (e.g., Lada \& Lada 2003, Pandey et al. 2008, Jose et al. 2008). Although the theoretical expectation is that the IMF of a cluster should depend on the location, size, metallicity, density of the star forming environment and other conditions such as temperature or pressure (Zinnecker 1986; Larson 1992; Price \& Podsiadlowski 1995), for clusters located within 2 kpc, there is no compelling evidence for variation in the stellar IMF above the solar mass (e.g. Meyer et al. 2000; Kroupa 2002; Chabrier 2005). With the aim of understanding the star formation process and IMF in/around young star clusters, we selected an young cluster NGC 1624 ($\alpha_{2000}$ = $04^{h}40^{m}38^{s}.2$; $\delta_{2000}$ = $+50^{\circ}27^{\prime}36^{\prime\prime}$; l=155.36; b=+2.62) associated with the bright optical \mbox{H~{\sc ii}~} region Sh2-212 (Sharpless 1959). A colour composite image using the bands $B$, blue; \mbox{[O~{\sc iii}]}~, green; and \mbox{[S~{\sc ii}]}~, red for an area $ \sim 10\times10$ arcmin$^2$ centered at NGC 1624 is shown in Fig. \ref{cfht} (left panel), where the cluster seems to be embedded in the \mbox{H~{\sc ii}~} region. The cluster is located significantly above the formal galactic plane ({\it Z} $\sim$ 250 pc) for an estimated distance of 6.0 kpc (cf. Sect. \ref{distance}). The kinematic and spectrophotometric distances to NGC 1624 vary from 4.4 kpc (Georgelin \& Georgelin 1970) to 10.3 kpc (Chini \& Wink 1984). An IRAS point source (IRAS 04366+5022) with colours similar to that of the ultra-compact \mbox{H~{\sc ii}~} (UC\mbox{H~{\sc ii}~}) region (Wood \& Churchwell 1989) is located at the periphery of Sh2-212. The molecular gas distribution of this region was mapped by CO observations (Blitz et al. 1982; Leisawitz et al. 1989; Deharveng et al. 2008). Particularly, Deharveng et al. (2008) studied the region using $J = 2-1$ lines of $^{12}$CO and $^{13}$CO and reported a bright and thin semi-circular structure of molecular gas (in the velocity range -34.0 kms$^{-1}$ to -32.7 kms$^{-1}$) in $^{13}$CO at the rear side of Sh2-212 along with a filamentary structure (-36.8 kms$^{-1}$ to -35.9 kms$^{-1}$) extending from southeast to northwest. The semi-circular ring itself contains several molecular clumps, the most massive of which (-36.1 kms$^{-1}$ to -35.1 kms$^{-1}$) contains a massive young stellar object (YSO) which is the exciting source of the associated UC\mbox{H~{\sc ii}~} region (see Fig. 1). They concluded that Sh2-212 is a good example of massive-star formation triggered via the collect and collapse process. They also reported the flow of ionized gas and suggested that this may be the indication of `Champagne flow' towards the north of Sh2-212. A careful view of Fig. 1 (right panel) reveals that the central region of NGC 1624 is relatively devoid of gas and dust, whereas the outer regions, particularly east, south-east and west seem to be obscured by molecular gas. However, it is to be noted that the semi-circular structure containing clumps is located at the rear side of the cluster. The present study is an attempt to understand the stellar content, young stellar population and the form of IMF/ $K$-band luminosity function (KLF) of the cluster NGC 1624 associated with Sh2-212 using our optical and radio continuum observations along with the near-infrared (NIR) archival data. In Sections 2 and 3, we describe the observations, data reductions and archival data used in the present work. Sections 4 to 8 describe various cluster parameters and young stellar properties derived using optical, NIR and radio continuum data. Sections 9 and 10 describe the IMF and KLF of the region and in section 11 we have summarized the results. \section{OBSERVATIONS AND DATA REDUCTIONS} In the following sections we describe the observations and data reductions carried out in order to have a detailed study of NGC 1624. \subsection{Optical CCD Photometry} \label{obs} The CCD $UBVRI$ observations of NGC 1624 were carried out using Hanle Faint Object Spectrograph and Camera (HFOSC) of the 2-m Himalayan Chandra Telescope (HCT) of Indian Astronomical Observatory (IAO), Hanle, India on 2004 November 3. The 2048 $\times$ 2048 CCD with a plate scale of 0.296 arcsec pixel$^{-1}$ covers an area of $\sim$ 10$\times$10 arcmin$^2$ on the sky. We took short and long exposures in all filters to avoid saturation of bright stars. PG 0231 field from Landolt (1992) was observed to determine atmospheric extinction as well as to photometrically calibrate the CCD frames on the same night. The log of observations is tabulated in Table \ref{obslog}. The CCD frames were bias-subtracted and flat-field corrected in the standard manner using various tasks available under IRAF\footnote{IRAF is distributed by National Optical Astronomy Observatories, USA}. Aperture photometry was done for the standard stars of PG 0231 field and the following calibration equations were derived using a least-squares linear regression:\\ \noindent $(U-B) = (1.269\pm 0.020) (u-b) - (2.617\pm 0.026)$,\\ \noindent $(B-V)=(0.915\pm 0.016) (b-v) - (0.284\pm0.012)$,\\ \noindent $(V-R) = (1.056\pm 0.013) (v-r) - (0.011\pm0.010)$,\\ \noindent $(V-I) = (1.022\pm 0.009) (v-i) + (0.188\pm0.008)$,\\ \noindent $V = v+(0.024\pm 0.011) (V-I) - (0.495\pm0.013)$,\\ where, $u,b,v,r,i$ are the instrumental magnitudes corrected for the atmospheric extinctions and $U,B,V,R,I$ are the standard magnitudes. The standard deviations of the residuals, $\Delta$, between standard and transformed $V$ magnitudes, $(U-B)$, $(B-V)$, $(V-R)$ and $(V-I)$ colours of standard stars were 0.020, 0.045, 0.018, 0.014 and 0.021 mag, respectively. Different frames of the cluster region having same exposure time and observed with the same filters were averaged. Photometry of cleaned frames was carried out using the DAOPHOT-II (Stetson 1987) profile-fitting software. We repeated the observations of NGC 1624 in $V$ and $I_c$ filters to get deeper photometry on 2006 December 12 using the 104-cm Sampurnanand Telescope (ST) of Aryabhatta Research Institute of observational sciencES (ARIES), Naini Tal, India. Log of the observations is given in Table \ref{obslog}. The 2048 $\times$ 2048 CCD with a plate scale of 0.37 arcsec pixel$^{-1}$ covers a field of $\sim 13\times13$ arcmin$^2$ on the sky. To improve the signal to noise ratio (S/N), the observations were carried out in binning mode of $2\times2$ pixel. Secondary standards from the HCT observations were used to calibrate the data taken with ST. A combined photometry catalog is made using these two observations and this catalog has typical photometric errors of the order of $\sim$ 0.01 mag at brighter end ($V\sim$ 15), whereas the errors increase towards the fainter end ($\sim$ 0.04 at $V$ $\sim$ 21). The catalog is available in electronic form and a sample table is given in Table \ref{optdata}. In order to check the accuracy of the present photometry, we compared our photometry with the $UBV$ photometry of 14 stars carried out by Moffat et al. (1979). The mean and standard deviation of the difference between Moffat's and our photometry in $V$, $U-B$ and $B-V$ are $0.008 \pm 0.006$, $0.005 \pm 0.015$ and $0.004 \pm 0.006$, respectively, suggesting that the two photometries are in good agreement. To study the luminosity function (LF)/MF, it is necessary to take into account the incompleteness of the present data that could occur due to various factors (e.g., crowding of the stars). We used ADDSTAR routine of DAOPHOT-II to determine the completeness factor (CF). The procedure has been outlined in detail in our earlier work (see e.g., Pandey et al. 2001). Briefly, we randomly added artificial stars to both $V$ and $I$ images taken with ST in such a way that they have similar geometrical locations but differ in $I$ brightness according to mean $(V-I)$ colour ($\sim 1.5$ mag) of the data sample. Luminosity distribution of artificial stars was chosen in such a way that more number of stars were inserted towards the fainter magnitude bins. The frames were reduced using the same procedure used for the original frames. The ratio of the number of stars recovered to those added in each magnitude interval gives the CF as a function of magnitude. Minimum value of the CF of the pair (i.e., $V$- and $I$-bands ) for the cluster region and field region (outside the cluster region), given in Table \ref{cf_opt}, is used to correct the data incompleteness. \subsection {Spectroscopic observations} Low resolution optical spectroscopic observations of 4 optically bright sources of NGC 1624 were made using HFOSC of HCT. The log of observations is given in Table \ref{obslog}. The spectra in the wavelength range 3800-6840 $\AA$ with a dispersion of 1.45 $\AA$ pixel$^{-1}$ were obtained using low resolution grism 7 with a slit having width 2$^{\prime\prime}$. One-dimensional spectra were extracted from the bias-subtracted and flat-field corrected images using the optimal extraction method in IRAF. Wavelength calibration of the spectra were done using FeAr and FeNe lamp sources. Spectrophotometric standard (Feige 110) was observed on 2006 September 08 and flux calibration was applied to the star observed on the same night. \subsection{Radio Continuum Observations} Radio continuum observations at 1280 MHz were carried out on 2007 July 17 using the Giant Metrewave Radio Telescope (GMRT), India. GMRT has a `Y' shaped hybrid configuration of 30 antennae, each of 45 m diameter. Details of the GMRT antennae and their configurations can be found in Swarup et al. (1991). For the observations, the primary flux density calibrators used were 3C48 and 3C286. NRAO Astronomical Image Processing System (AIPS) was used for the data reduction. The data were carefully checked for radio frequency interference or other problems and suitably edited. Self calibration was carried out to remove the residual effects of atmospheric and ionospheric phase corruptions and to obtain the improved maps. \section {Archival data} \subsection {Near-infrared data from 2MASS} NIR $JHK_s$ data for point sources within a radius of 10 arcmin around NGC 1624 have been obtained from Two Micron All Sky Survey (2MASS) Point Source Catalog (PSC) (Cutri et al. 2003). To improve photometric accuracy, we used photometric quality flag (ph$\_$qual = AAA) which gives a S/N $\ge$ 10 and a photometric uncertainty $ <$ 0.10 mag. This selection criterion ensures best quality detection in terms of photometry and astrometry as given on the 2MASS website\footnote {http://www.ipac.caltech.edu/2mass/releases/allsky/doc/}. The $JHK_s$ data were transformed from 2MASS system to the California Institute of Technology (CIT) system using the relations given by Carpenter (2001). We used this data set to calibrate the NIR archival data from Canada-France-Hawaii Telescope (CFHT) (see Sect. \ref{cfhtdata}) and also to produce the radial density profile of NGC 1624 (see Sect. \ref{rd}). \subsection {Near-infrared data from CFHT} \label{cfhtdata} NIR data for the region were obtained from the Canadian Astrophysical Data Centre's (CADC) archive program. The NIR observations of the region were taken on 2002 October 20 (PI: L. Deharveng) using the instrument CFHT-IR at the 3.56-m CFHT. The 1024 $\times$ 1024 pixel HgCdTe detector with a plate scale of 0.211 arcsec/pixel was used for the observations. The catalog by Deharveng et al. (2008) lists a total of 891 sources in $JHK$ bands. Since our aim was to study the KLF of the region, where the estimation of the completeness of the photometry (ref. Sect. \ref {obs}) was necessary, we re-reduced the CFHT observations. We used dithered images at 9 different locations having 10 frames at each position around the UC\mbox{H~{\sc ii}~} region of this field. Flat frames and sky frames were made from the median combined object frames. The sky subtracted and flat field corrected dithered images in each band were aligned and then combined to achieve a higher S/N. The final mosaic image covers an area of $5^{\prime}.2 \times 5^{\prime}.2$ with the UC\mbox{H~{\sc ii}~} region at the centre and is shown in Fig. \ref{cfht}. Photometry of the processed images were obtained using the DAOPHOT-II package in IRAF. Since the region was crowded, we performed PSF photometry on the images. The 2MASS counterparts of the CFHT sources were searched within a match radius of 1 arcsec. The CFHT instrumental magnitudes were compared to the selected 2MASS magnitudes to define a slope and zero point for the photometric calibration. The rms scatter between the calibrated CFHT and 2MASS data (i.e., $2MASS - CFHT$ data) for the $J, H$ and $K$-bands were 0.07, 0.08 and 0.06, respectively. In order to check the photometric accuracy, we compared our photometry with the photometry reported by Deharveng et al. (2008). The average dispersion between these two samples was $\sim$ 0.1 mag in $JHK$ bands with absolutely no shift, which shows that the present photometry is in agreement with the previous study. To ensure good photometric accuracy, we limited our sample with those stars having error $<$ 0.15 mag in all three bands and thus we obtained photometry for 951 sources in $J, H$ and $K$-bands. Additional 31 sources detected only in the $H$ and $K$ bands ($J$ drop out sources) having error $<$ 0.15 mag are also included in our analysis. Data of three saturated sources have been taken from the 2MASS catalog. The detection limits were 19.0, 18.4 and 18.0 mag for $J$, $H$ and $K$-bands, respectively. We combined the optical and NIR catalog within a match radius of 1 arcsec and the final catalog used in the present analysis is available in electronic form and a sample table is shown in Table \ref{optdata}. We estimated the completeness limit of the data using the ADDSTAR routine of DAOPHOT-II. The procedure was the same as mentioned for the optical images (see Sect. \ref{obs}). Completeness was greater than 90$\%$ for magnitudes brighter than 17.0 and reduced to 80 $\%$ for the magnitude range 17.0 - 17.5 in $K$-band. We did not find any significant spatial variation of the completeness factor within the entire area of $5^{\prime}.2 \times 5^{\prime}.2$ and hence we used an average completeness factor of the region for our analysis. \section{Structure of the cluster} \subsection{Two dimensional surface density distribution} The initial stellar distribution in star clusters may be governed by the structure of parental molecular cloud and also how star formation proceeds in the cloud (Chen et al. 2004, Sharma et al. 2006). Later evolution of the cluster may then be governed by internal gravitational interaction among member stars and external tidal forces due to the Galactic disk or giant molecular clouds. To study the morphology of the cluster, we generated isodensity contours for stars in $K$-band from CFHT data and is shown in Fig. \ref{ssnd}. The contours are plotted above 3-sigma value of the background level as estimated from the control field. The star mark in Fig. \ref{ssnd} represents the location of the cluster centre (Sect. \ref{rd}). The surface density distribution of the CFHT data reveals prominent sub-structures which seem to be distributed symmetrically around the cluster centre at a radial distance of $\sim$ 35 arcsec. Interestingly, these sub-structures are lying just inside the thin molecular layer shown in Fig. \ref{cfht}. \subsection{Radial stellar surface density and cluster size}\label{rd} The radial extent of a cluster is one of the important parameters used to study the dynamical state of the cluster. We used the star count technique to study the surface density distribution of stars in the cluster region and to derive the radius of the cluster. To determine the cluster centre, we used the stellar density distribution of stars in a $\pm$ 30 pixel wide strip along both X and Y directions around an eye estimated centre. The point of maximum density obtained by fitting a Gaussian curve was considered as the centre of the cluster. The coordinates of the cluster centre were found to be $\alpha_{2000}$ = $04^{h}40^{m}38^{s}.2 \pm 1^{s}.0$; $\delta_{2000}$ = $+50^{\circ}27^{\prime}36^{\prime\prime} \pm 15^{\prime\prime}$. To investigate the radial structure of the cluster, we derived the radial density profile (RDP) using the ST observations for $V \le $ 20 mag and 2MASS $K_s$-band data ($K_s \le$ 14.3 mag). Sources were counted in concentric annular rings of 30 arcsec width around the cluster centre and the counts were normalized by the area of each annulus. The densities thus obtained are plotted as a function of radius in Fig. \ref{rad}, where, one arcmin at the distance of the cluster (6.0 kpc, cf. Sect. \ref{distance}) corresponds to $\sim$ 1.8 pc. The upper and lower panels show the RDPs obtained from optical and 2MASS $K_s$-band data, respectively. The error bars are derived assuming that the number of stars in each annulus follows Poisson statistics. Radius of the cluster $(r_{cl})$ is defined as the point where the cluster stellar density merges with the field stellar density. The horizontal dashed line in Fig. \ref{rad} shows the field star density. For the optical RDP, the field star density is determined from the corner of our optical CCD image, whereas for the NIR RDP, the field star density is determined from an area which is 10 arcmin away from the cluster centre. The error limits in the field density distribution are shown using dotted lines. To parametrize the RDP, we fitted the observed RDP with the empirical model of King (1962) which is given by \begin{equation} \hspace{20mm}{\rho (r) = {{\rho_0} \over \displaystyle {1+\left({r\over r_c}\right)^2}}} \end{equation} where $r_c$ is the core radius at which the surface density $\rho(r)$ becomes half of the central density, $\rho_0$. The best fit to the observed RDPs obtained by a $\chi^2$ minimization technique is shown in Fig. \ref{rad}. The core radii thus estimated from optical and NIR RDPs are 0.50 $\pm$ 0.06 and 0.48 $\pm$ 0.05 arcmin, respectively. Within errors, the King's profile (Fig. \ref{rad}, solid curve) seems to be merging with the background field at $\sim$ 2.0 arcmin both for the optical and 2MASS data. Hence, we assign a radius of 2.0 arcmin for NGC 1624. Here we would like to point out that the core radius and boundary of the cluster are estimated assuming a spherically symmetric distribution of stars within the cluster. This approach is frequently used to estimate the extent of a cluster. \section{Analysis of optical data} \subsection{Reddening in the cluster} \label{reddening} To study the nature of the extinction law towards NGC 1624, we used two-colour diagrams (TCDs) as described by Pandey et al. (2003). The TCDs of the form of ($V-\lambda$) versus ($B-V$), where $\lambda$ is one of the broad-band filters ($R,I,J,H,K,L$), provide an effective method for separating the influence of normal extinction produced by the diffuse interstellar medium from that of the abnormal extinction arising within regions having a peculiar distribution of dust sizes (cf. Chini \& Wargau 1990; Pandey et al. 2000). The ${E(V-\lambda)}\over {E(B-V)}$ values in NGC 1624 are estimated using the procedure as described in Pandey et al. (2003). The slopes of the distributions $m_{cluster}$ are found to be identical to the normal values as given in Pandey et al. (2003). Thus we adopt a normal reddening law ($R_V=3.1$) for NGC 1624. In the absence of spectroscopic observations, the interstellar extinction $E(B -− V)$ towards the cluster region can be estimated using the $(U −- B )/(B -− V )$ colour-colour (CC) diagram. The CC diagram of NGC 1624 ($r \le 2^\prime$) is presented in Fig. \ref{ubbv}, where, continuous curves represent the empirical zero-age-main-sequence (ZAMS) locus by Girardi et al. (2002). The ZAMS locus is reddened by $E(B-V)$ = 0.76 and 1.00 mag along the normal reddening vector (i.e., $E(U - B) /E(B - V )$ = 0.72). Fig. \ref{ubbv} indicates that majority of the $O-A$ type stars have $E(B - V)$ in the range of 0.76 - 1.00 mag. The stars lying within the reddened ZAMS may be probable members of NGC 1624. Using $K/ (J-K)$ colour-magnitude diagram (CMD), Deharveng et al. (2008) have also reported $A_V \sim 3$ mag for the whole region. A careful inspection of the CC diagram indicates the presence of further reddened population which could be the probable background population of the region. The theoretical ZAMS, shown by dashed line, is further shifted to match the reddened sequence. The $E(B - V)$ value for the background population comes out to be $\sim$ 1.15 mag. Reddening of individual stars having spectral types earlier than A0 have also been computed by means of the reddening free index $Q$ (Johnson $\&$ Morgan 1953). Assuming a normal reddening law we can construct a reddening-free parameter index $Q = (U-B) - 0.72\times (B-V)$. For stars earlier than A0, value of $Q$ will be $<$ 0. For main-sequence (MS) stars, the intrinsic $(B-V)_0$ colour and colour-excess can be obtained from the relation $(B-V)_0 = 0.332\times Q$ (Johnson 1966; Hillenbrand et al. 1993) and $E(B-V) = (B-V) - (B-V)_0$, respectively. The individual reddening of the massive stars down to A0 spectral class within NGC 1624 ($r \le 2^\prime$) are found to vary in the range $E(B-V)$ $\simeq$ 0.76 - 1.05 mag implying the presence of differential reddening within the cluster. The $A_V$ values thus calculated for stars up to A0 spectral class have been given in Table \ref{optdata}. Assuming the standard deviation of the residuals (cf. Sect. \ref{obs}) as typical errors in photometry, we estimate a typical error in estimation of $E(B-V)$ as $\sim$ 0.05 mag. \subsection {Spectral classification of the bright sources in NGC 1624} \label{slitspec} We carried out low resolution spectroscopy of four optically bright sources within 2 arcmin radius of NGC 1624. These sources are referred as M2, M4, M9 and M8 (see Fig. 6 of Deharveng et al. 2008). The brightest source M2 is the probable ionizing source of Sh2-212 (Moffat et al. 1979). This star was identified as an emission line star of class O5e by Hubble (1922). Moffat et al. (1979) classified this object as O5.5V star, whereas Chini \& Wink (1984) classified it as O6I type star. To determine the spectral type of this star, we extracted low-resolution, one dimensional spectrum. In the top panel of Fig. \ref{spec}, we show the flux calibrated, normalized spectrum of the ionizing source M2 with important lines identified and labeled. Among the Balmer lines, $H{\alpha}$ and $H_{\beta}$ are relatively strong in emission compared to $H{\gamma}$, which is weak in emission. The $H{\delta}$ and $H{\epsilon}$ are in absorption. The other lines found in emission are \mbox{He~{\sc ii}~} $\lambda$ 4686 and \mbox{C~{\sc iii}}~ $\lambda\lambda$ 4647-50. In the case of early type stars, the ratio of \mbox{He~{\sc i}~} $\lambda$ 4471/\mbox{He~{\sc ii}~} $\lambda$ 4542 is a primary indicator of the spectral type. This ratio is found to vary from less than 1 to 1 and greater than 1 as we move from O5 to O7 and later types. The presence of strong \mbox{He~{\sc ii}~} $\lambda$ 4542 in absorption which is often accompanied by weak \mbox{N~{\sc iii}}~ $\lambda\lambda$ 4634-42 emission indicate a MS luminosity class denoted by ((f)). The absorption strength of \mbox{He~{\sc ii}~} $\lambda$ 4686 weakens while \mbox{N~{\sc iii}}~ emission strength increases in intermediate luminosity classes, denoted by (f) category. Finally, the Of super giants show both \mbox{He~{\sc ii}~} and \mbox{N~{\sc iii}}~ in strong emission (Walborn \& Fitzpatrick 1990). The ratio of \mbox{He~{\sc i}~} $\lambda$ 4471/\mbox{He~{\sc ii}~} $\lambda$ 4542 for M2 is found to be (i.e., Log EW = Log (EW(\mbox{He~{\sc i}~} $\lambda$ 4471)/EW(\mbox{He~{\sc ii}~} $\lambda$ 4542)) -0.15, implying that this star is likely to be of spectral type earlier to O7. Following Conti \& Alschuler (1971) we assign O6.5 $\pm$ 0.5 spectral type to this star. The weak nature of \mbox{N~{\sc iii}}~ $\lambda\lambda$ 4634-42 indicates that this star is likely to be in MS. Thus we assign a spectral class of O6.5 $\pm$ 0.5 V for the ionizing source of Sh2-212. The bottom panel of Fig. \ref{spec} shows the low resolution spectrum for the star M4. The absence of \mbox{He~{\sc ii}~} $\lambda$ 4200, \mbox{He~{\sc ii}~} $\lambda$ 4686 and \mbox{Mg~{\sc ii}~} $\lambda$ 4481 indicates that the spectral class of M4 is between B1-B2 (Walborn \& Fitzpatrick 1990). The lack of spectral lines \mbox{Mg~{\sc ii}~} $\lambda$ 4481 and \mbox{Si~{\sc iii}~} $\lambda$ 4552 rules out the possibility of it being an evolved star. A comparison with the low resolution stellar spectra of Jacoby et al. (1984) and Walborn \& Fitzpatrick (1990) suggests this star as a spectral class of B1.5 $\pm$ 0.5 V. The reddening slope E(B-V)/E(U-B) has also been obtained using the spectral types of the M2 (06.5V ) and M4 (B1.5V) stars. The value of the slope using the intrinsic values from Koorneef (1984) / Johnson (1966) comes out to be 0. 86 / 0.83 and 0.75 / 0.73 for M2 and M4, respectively. The reddening slope for the B type star agrees well the value obtained in Sect. \S\ref{reddening}. We adopt a normal reddening law in the region as mentioned in Sect. \S\ref{reddening} for further analysis of the data. We also extracted the low resolution spectra (not shown here) for the stars M8 and M9. Presence of the spectral lines \mbox{Na~{\sc i}~} $\lambda$ 5893, \mbox{Ca~{\sc i}~} $\lambda$$\lambda$ 6122, 6162, \mbox{Fe~{\sc ii}~} $\lambda$ 6456 and the line strength of \mbox{Fe~{\sc i}~}, \mbox{Ca~{\sc i}~} $\lambda$ 6497 put these two stars in the mid F giant category based on the spectral atlas given by Torres-Dodgen \& Weaver (1993) and Jacoby et al. (1984). \subsection{Optical colour-magnitude diagrams : Distance and age} \label{distance} The optical colour-magnitude diagrams (CMDs) are useful to derive the cluster fundamental parameters such as age, distance etc. Fig. \ref{q} shows dereddened $V_0/(B-V)_0$ CMD for probable cluster members (Sect. \ref{reddening}) lying within $r \le 2^{\prime}$ of NGC 1624. The stars having spectral type earlier than A0 were dereddened individually using $Q$ method as discussed in Sect. \ref{reddening}. The stars labeled as M2, M4, M8 and M9 (following the nomenclature by Deharveng et al. 2008) have spectroscopic observations as discussed in Sect. \ref{slitspec}. The spectral class of the ionizing source (M2; see Sect. \ref{slitspec}) yields intrinsic distance modulus of 14.05 which corresponds to a distance of 6.5 kpc, whereas the spectral class of M4 yields intrinsic distance modulus of 13.8 which corresponds to a distance of 5.8 kpc. The average distance from these two spectroscopically identified cluster members comes out to be 6.15 kpc. We also calculated the individual distance modulus of the remaining 12 probable MS stars (shown as filled circles in Fig. \ref{q}). The intrinsic colours for each star were estimated using the $Q$ method as discussed in Sect. \ref{reddening}. Corresponding $M_V$ values have been estimated using the ZAMS by Girardi et al. (2002). The average value of the intrinsic distance modulus obtained from the 14 stars (2 from spectroscopy and 12 from photometry) comes out to be 13.9 $\pm$ 0.3 which corresponds to a distance of $6.0 \pm 0.8$ kpc. In Fig. \ref{q} we have also plotted the theoretical isochrone of 2 Myr ($Z=0.02$; log age = 6.3) by Girardi et al. (2002), shifted for the distance modulus of $(m-M_V)_0$ = 13.90 $\pm$ 0.3, which seems to be matching well with the distribution of the probable MS members of the cluster. Present distance estimate is in agreement with that obtained by Moffat et al. (1979; 6.0 $\pm$ 0.5 kpc), whereas Chini \& Wink (1984) have reported a distance of 10.4 kpc to NGC 1624. The distance estimates by Moffat et al. (1979) and Chini \& Wink (1984) were based on the assumed spectral class of the ionizing source M2 (i.e., O5.5V and O6I, respectively). Here, it is worthwhile to mention that the $M_V$ value for an O6V star in the literature varies significantly; e.g., $M_V$ = -5.5 (Schmidt-Kaler 1982) to -4.9 (Martins et al. 2005). Hence, the distance estimation based on the O-type star alone may not be reliable. However, the present distance estimation is carried out using the O-type star as well as all the probable members earlier to A0 spectral type. The kinematic distance (6.07 kpc) to the region derived by Caplan et al. (2000) is in agreement with the present distance estimation. Since this cluster is located in the outer galactic disk, the possibility of a low metallicity for the region cannot be ruled out, which would imply bluer intrinsic colour for the members and hence a closer distance of NGC 1624. However, in the absence of any metallicity measurements towards this region, we have considered solar metallicity for the region and the distance of NGC 1624 is taken as 6.0 kpc for the present study. The ages of young clusters are typically derived from the post-main-sequence evolutionary tracks for the earliest members if significant evolution has occurred and/or by fitting the low-mass contracting population with theoretical PMS isochrones. Since the most massive member of NGC 1624 seems to be a O6.5 MS star, the maximum age of the cluster should be of the order of the MS life time of the massive star i.e., $\sim$ 4.4 Myr (Meynet et al. 1994). In Fig. \ref{q} we have also shown the isochrone of 4 Myr age by Girardi et al. (2002), which suggests that the maximum post-main-sequence age of the cluster could be $\sim$ 4 Myr. Stars which deviate significantly from the isochrone are likely field stars and are shown by open circles in Fig. \ref{q}, which include stars M8 and M9. Spectroscopic observations of these two stars indicate that they are of mid F giant spectral category (see Sect. \ref{slitspec}) and hence cannot be the cluster members at this assumed distance and age. $V/(V - I)$ CMD for the stars lying within the core of the cluster ($r \le 0^\prime$.5) is shown in Fig. \ref{cmd}a and CMD for the stars outside the core ($0^\prime.5 \le r \le 2^\prime$) is shown in Fig. \ref{cmd}b. In order to find out the field star contamination in the cluster region, we selected a control field having same area as that of the cluster from the corner of our CCD image. $V/(V - I)$ CMD for the control field is shown in Fig. \ref{cmd}c. Assuming $E(B-V)_{min} =0.76$ mag, $E(B-V)_{max}$ =1.0 mag and using the relations $A_{V}=3.1\times E(B-V)$; $E(V-I)=1.25\times E(B-V)$, we have plotted theoretical isochrone of 2 Myr by Girardi et al. (2002) and pre-main-sequence (PMS) isochrone of 0.5 and 5 Myr (Siess et al. 2000) in Fig. \ref{cmd}. It is evident from this figure that the MS ($V \le $ 16.5) is rather free from field star contamination. Although the CMDs of the cluster region show a significant number of stars towards the right of the 2 Myr isochrone at $(V-I) >2.5 $ and $ V >18 $ mag, a comparison between the cluster and field regions clearly reveals the contamination due to field star population in the CMD of the cluster region. However, the $V/(V - I)$ CMD of the core (Fig. \ref{cmd}a) reveals uncontaminated population of PMS stars having ages 0.5 - 5 Myr. As discussed in Sect. \ref{reddening}, there is indication for a population in the background of the cluster which is apparent in Figs. \ref{cmd}b and \ref{cmd}c. Assuming the average $E(B-V)$ = 1.15 mag, we estimate that the distance of the background population is $\sim$ 8 kpc. The study by Pandey et al. (2006) also indicates a background population at a distance of $\sim$ 8 kpc in the second galactic quadrant. \subsection {Emission from ionized gas} \label{ionized gas} Fig. \ref{1280} shows GMRT radio continuum map of Sh2-212 at 1280 MHz made with a resolution of $\sim$ 4$^{\prime\prime}$.9 $\times$ 3$^{\prime\prime}$.2. In the high resolution map, most of the extended diffuse emission associated with the region appears quite faint. However, a compact intense emission can be seen at the position of UC\mbox{H~{\sc ii}~} region (04$^{\rm h}$40$^{\rm m}$27$^{\rm s}$.5, +50$^\circ$28$^\prime$28$^{\prime\prime}$) located at the periphery of Sh2-212 and is marked using an arrow. The UC\mbox{H~{\sc ii}~} region is associated with the IRAS point source IRAS 04366+5022. The overall morphology of the map agrees well with that of our optical colour composite image shown in Fig. \ref{cfht}. Fig. \ref{610} shows an enlarged version of the UC\mbox{H~{\sc ii}~} region at 1280 MHz. The integrated flux densities from the radio continuum contour maps for the evolved \mbox{H~{\sc ii}~} region (i.e., Sh2-212) and UC\mbox{H~{\sc ii}~} region are estimated to be 3.6 $\pm$ 0.4 Jy and 16.5 $\pm$ 0.5 mJy, respectively. Assuming the ionized regions to be spherically symmetric and neglecting absorption of ultraviolet radiation by dust inside the \mbox{H~{\sc ii}~} region, the above flux densities together with assumed distance, allow us to estimate the number of Lyman continuum photons (N$_{Lyc}$) emitted per second, and hence the spectral type of the exciting stars. Using the relation given by Mart\'{i}n-Hern\'{a}ndez et al. (2003) for an electron temperature of 10000 K, we estimated log N$_{Lyc}$ = 48.29 and log N$_{Lyc}$ = 45.96 for the evolved \mbox{H~{\sc ii}~} and UC\mbox{H~{\sc ii}~} region, respectively, which corresponds to MS spectral types of $\sim$ O7 and $\sim$ B0.5, respectively (Vacca et al. 1996). On the basis of optical spectroscopy, we estimated spectral type of the ionizing source of Sh2-212 as O6.5V (see Sect. \ref{slitspec}) which is in fair agreement with the above spectral type estimation from integrated radio continuum flux. Using the spectral energy distribution, Deharveng et al. (2008) have found that the source associated with the UC\mbox{H~{\sc ii}~} region is a massive YSO of $\sim$ B0 type ($\sim$ 14 $M_\odot$), which is in agreement with the spectral type $\sim$ B0 obtained in the present work. \section{Analysis of near-infrared data} \label{nir} NIR data are very useful tools to study the nature of young stellar population within the star forming regions (SFRs). Discriminating young stars in clusters from field stars is difficult. Young stars with strong infrared (IR) excess from disks and envelopes can be identified using the NIR and mid-IR (MIR) observations. We used the CFHT deep NIR photometry to study the PMS contents and KLF of NGC 1624. The CFHT $K$-band mosaic image centered on the UC\mbox{H~{\sc ii}~} region covering an area of $5^{\prime}.2 \times 5^{\prime}.2$ is shown in Fig. \ref{cfht} (right panel), where the ionizing source is marked with a white circle. A very rich cluster is apparent around the ionizing source. Since the centre of NGC 1624 is located towards the eastern edge of the CFHT frame, eastern half of the cluster is covered partially. The observations covered an area $\sim$ 9.6 arcmin$^2$ of NGC 1624 and is shown using a partial circle in Fig. \ref{cfht}. A region covering an area $\sim$ 3.1 arcmin$^2$ towards north of the cluster shown by a box in Fig. \ref{cfht}, is considered as the control field. In the following sections, we discuss the NIR CC diagram and CMDs. \subsection {Colour-Colour Diagrams} \label{nircc} NIR and MIR photometry are useful tools to investigate the fraction of YSOs in a SFR. In the absence of ground based $L$-band observations or {\it Spitzer} based MIR observations, we used $(J-H)$/$(H-K)$ CC diagram to identify the young stellar population in NGC 1624 (Hunter et al. 1995; Haisch et al. 2000; 2001; Sugitani et al. 2002; Devine et al. 2008; Chavarr\'{i}a et al. 2010). The $(J-H)$/$(H-K)$ CC diagrams for the cluster region (area $\sim$ 9.6 arcmin$^2$) and the control field (area $\sim$ 3.1 arcmin$^2$) are shown in Fig. \ref{jhhk}. The thin and thick solid curves are the locations of unreddened MS and giant stars (Bessell $\&$ Brett 1988), respectively. The dotted and dotted-dashed lines represent the locus of unreddened and reddened ($A_V$ = 4.0 mag) classical T Tauri stars (CTTSs; Meyer et al. 1997). The two long parallel dashed lines are the reddening vectors for the early MS and giant type stars (drawn from the base and tip of the two branches). One more reddening vector is plotted from the tip of the unreddened CTTS locus. The crosses on the reddening vectors are separated by an $A_{V}$ value of 5 mag. The extinction ratios, $A_J/A_V = 0.265, A_H/A_V = 0.155$ and $A_K/A_V=0.090$, are adopted from Cohen et al. (1981). The magnitudes, colours and the curves are in CIT system. Presently YSOs are classified as an evolutionary sequence spanning a few million years as: Class 0/Class I - the youngest embedded protostars surrounded by infalling envelopes and growing accretion disks; Class II - PMS stars with less active accretion disks and Class III - PMS stars with no disks or optically thin remnant disk (Adams et al. 1987). Following Ojha et al. (2004a), we classified sources according to their locations in $(J-H)/(H-K)$ CC diagrams. The `F' sources are those located between the reddening vectors projected from the intrinsic colours of MS and giant stars. These sources are reddened field stars (MS and giants) or Class III/Class II sources with little or no NIR excess (viz., weak-lined T Tauri sources (WTTSs) but some CTTSs may also be included). The sources located redward of region `F' are considered to have NIR excess. Among these, the `T' sources are located redward of `F' but blueward of the reddening line projected from the red end of the CTTS locus. These sources are considered to be mostly CTTSs (Class II objects) with large NIR excesses (Lada \& Adams 1992). There may be an overlap in NIR colours of Herbig Ae/Be stars and T Tauri stars in the `T' region (Hillenbrand et al. 1992). The `P' sources are those located in the region redward of region `T' and are most likely Class I objects (protostellar-like) showing large amount of NIR excess. Here it is worthwhile to mention that Robitaille et al. (2006) have shown that there is a significant overlap between protostellar-like objects and CTTSs in the CC diagram. A comparison of the colour distribution of the sources in the cluster and control field (Fig. \ref{jhhk}) suggests that there is an appreciable difference between them. Significant fraction of sources in the cluster region are concentrated between the unreddened and reddened CTTS locus, whereas majority of sources in the control field are mainly concentrated in the `F' region. Statistically, we can safely assume that majority of sources of the cluster region located between the unreddened and reddened CTTS locus are most likely to be cluster members. The comparison also indicates that the sources located in the `F' region could be the reddened field stars but a majority of them are likely candidate WTTSs or CTTSs with little or no NIR excess. The sources lying towards the right side of the reddening vector at the boundary of `F' and `T' regions and above the unreddened CTTS locus can be safely considered as YSO/NIR excess sources. A total of 120 such sources have been detected within a $5^{\prime}.2 \times 5^{\prime}.2$ region which fall in the `T' region and above the unreddened CTTS locus. However, this number is certainly a lower limit for the population of YSOs, as several of the cluster members detected in the $H$ and $K$ bands have not been detected in the $J$-band. Moreover, $L$-band or MIR observations would further increase the detection of YSOs in the region. Hence the present $JHK$ photometry provides only a lower limit to the population of YSOs in NGC 1624. The distribution of YSOs in Fig. \ref{jhhk} manifests that majority of them have $A_V$ $\le$ 4 mag. Some of the sources in `F' and `T' regions, which might be the candidate WTTSs/CTTSs, show $A_V$ values higher than 4 mag. The $A_V$ for each star lying in `T' region has been estimated by tracing back to the intrinsic CTTS locus along the reddening vector. The $A_V$ for stars within the cluster region (area $\sim$ 9.6 arcmin$^2$) and located in the `F' region is estimated by tracing them back to the extension of the intrinsic CTTS locus (see Ogura et al. 2007; Chauhan et al. 2009 for details). The $A_V$ values thus calculated for the sources in `F' and `T' regions are given in Table \ref{optdata}. Twenty one sources are found to have $A_V$ $\ge$ 6.0 mag, indicating that significant number of cluster members in the region may still be embedded. \subsection{The colour-magnitude diagram} Fig. \ref{jhj} shows $J/(J-H)$ distribution of sources within $\sim$ 9.6 arcmin$^2$ area of NGC 1624. The encircled are the NIR excess sources in this region. The thick solid curve denotes the locus of 2 Myr PMS isochrone from Siess et al. (2000), which is the average age of NIR excess sources (see Sect. \ref{pms}, Fig. \ref{yso}) and the thin curve is the 2 Myr isochrone from Girardi et al. (2002). Both the isochrones are shifted for the cluster distance and reddening. The continuous oblique lines denote the reddening trajectories up to $A_V$ = 10 mag for PMS stars of 2 Myr age having masses 0.1, 2.0 and 3.0 $M_\odot$, respectively. For the assumed age $\sim$ 2 Myr, reddening $A_V$ = 2.5 mag and distance = 6.0 kpc, the $J$-band detection limit of present observations corresponds to $M$ $\sim$ 0.1 $M_\odot$. In Fig. \ref{jhj} majority of NIR excess sources ($\sim$ 98 \%) are seen to have masses in the range 0.1 to 3.0 $M_\odot$. The CMD indicates that the stellar population in NGC 1624 significantly comprises of low mass PMS stars similar to other SFRs studied by Ojha et al. (2004a), Sharma et al. (2007), Pandey et al. (2008) and Jose et al. (2008). These results further support the scenario that the high mass star forming regions are not devoid of low mass stars (e.g., Lada \& Lada 1991; Zinnecker et al. 1993; Tapia et al. 1997; Ojha et al. 2004a). The distribution of stars located below the CTTS locus (cf. Fig. \ref{jhhk}) is shown by crosses in Fig. \ref{jhj} which indicates that a majority of these sources are likely to be field stars. The brightest NIR excess source marked as a star symbol in Fig. \ref{jhj} is the candidate ionizing source of the UC\mbox{H~{\sc ii}~} region. The extinction to this star is estimated by tracing it back to the ZAMS along the reddening vector and found to be $A_V$ $\sim$ 10.6 mag. This extinction should be considered as an upper limit, as the star shows NIR excess, therefore, $J$ and $H$ magnitudes might have been affected by the NIR excess emission. The photometric spectral type of this star comes out to be $\sim$ B0 which is in agreement with the spectral type estimation based on our radio continuum observations (see Sect. \ref{ionized gas}). \section{ Field star decontamination} \label{field} Distinguishing cluster members from field stars is a significant challenge for photometric surveys of clusters. To study the LF/MF, it is necessary to remove field star contamination from the cluster region. Membership determination is also crucial for assessing the presence of PMS stars because both PMS and dwarf foreground stars occupy similar positions above the ZAMS in the CMDs. As discussed in Sect. \ref{nir}, some of the YSOs can be identified with the help of NIR excess, however this is not true for the diskless YSOs. An alternative is to study the statistical distribution of stars in the cluster and field regions. Because proper motion studies are not available for the stars in the cluster region, we used following statistical criteria to estimate the number of probable members of NGC 1624. To remove contamination due to field stars from the MS and PMS sample, we statistically subtracted the contribution of field stars from the observed CMD of the cluster region using the following procedure. For any star in the $V/(V-I)$ CMD of the control field (Fig. \ref{cmd}c), the nearest star in the cluster's $V/(V-I)$ CMD (Figs. \ref{cmd}a and b) within $V$ $\pm$ 0.125 and $(V-I)$ $ \pm$ 0.065 was removed. The statistically cleaned $V/(V-I)$ CMD (SCMD) of the cluster region is shown in Fig. \ref{calone}, which clearly shows a sequence towards red side of the MS. PMS isochrones by Siess et al. (2000) for ages 0.5 and 5 Myr (dashed lines) and 2 Myr isochrone by Girardi et al. (2002) (continuous line) are shown in Fig. \ref{calone}. The evolutionary tracks by Siess et al. (2000) for different masses are also shown which are used to determine the masses of PMS cluster members. Here we would like to remind the readers that the points shown by filled circles in Fig. \ref{calone} may not represent the actual members of the clusters. However, the filled circles should represent the statistics of PMS stars in the region and the statistics has been used to study the MF of the cluster region (cf. Sect. \ref{imf}). We followed the above technique for the field star decontamination of the NIR data as well. Since the area of the selected field region is smaller in comparison to the cluster region, we subdivided the cluster region in to three sub regions having area equal to the field region. The field star contamination from $J/ (J-H)$ CMD of the cluster sub regions was subtracted using the $J/ (J-H)$ CMD of the field region in a similar manner as in the case of $V/(V-I)$ CMD. \subsection{Young stellar population in NGC 1624} \label{pms} It is found that nineteen percent of the candidate PMS stars located above the intrinsic CTTS locus (cf. Fig. \ref{jhhk}) have optical counterparts in $V$-band within 9.6 arcmin$^2$ area. The $V/(V-I)$ CMD for these sources is shown in Fig. \ref{yso}. The encircled are the NIR excess sources which are the likely candidate YSOs (see Sect. \ref{nircc}). PMS isochrones by Siess et al. (2000) for 0.5, 2, 5 Myr (dashed curves) and isochrone for 2 Myr by Girardi et al. (2002; continuous curve) corrected for cluster distance and reddening are also shown. Fig. \ref{yso} reveals that majority of the sources have ages $\le$ 5 Myr with a possible age spread of $\sim 0.5 - 5$ Myr and $\sim$ 75$\%$ of the NIR excess sources show ages $\le$ 2 Myr. Since the reddening vector in $V/(V-I)$ CMD (see Fig. \ref{yso}) is nearly parallel to the PMS isochrone, the presence of variable extinction in the region will not affect the age estimation significantly. Therefore the age spread indicates a possible non-coeval star formation in this region. The membership of the YSOs shown in Fig. \ref{yso} is calculated using the following procedure. Each YSO is corrected for its reddening calculated in the Sect. \ref{nircc}. The intrinsic $(V-I)$ colour thus obtained is then compared with the PMS isochrones of varying ages from 5 Myr to 0.1 Myr. The $M_V$ value of each YSO is obtained from the best matching isochrone and hence the distance modulus. The sources lying within $3\sigma$ of the distance modulus obtained in Sect. \ref{distance} are considered as the probable cluster members. It is found that three sources do not satisfy the above criteria and has been considered as non-members. These three sources are marked using box in Fig. \ref{yso}. A comparison of Fig. \ref{yso} with the field star decontaminated CMD shown in Fig. \ref{calone} reveals a nice resemblance, suggesting that the statistics of PMS sources selected on the basis of SCMD can be used to study the IMF of PMS population of NGC 1624. As most of the sources in Fig. \ref{yso} are located in the PMS region, it can be safely assumed that the sources lying above the unreddened CTTS locus of Fig. \ref{jhhk} are likely cluster members. Thus sources falling in the `F' region (see Fig. \ref{jhhk}) are likely to be WTTSs or CTTSs with little or no NIR excess and those in the `T' region are the candidate CTTSs with NIR excess. However, Fig. \ref{yso} does not show any trend in age distribution between these sources. A comparison of Figs. \ref{jhj} and \ref{calone} confirms that most of the YSOs have masses $\le$ 3.0 $M_\odot$. The fraction of NIR excess sources in a cluster is also an age indicator because the disks/envelopes become optically thin with age (Haisch et al. 2001; Carpenter et al. 2006; Hern\'{a}ndez et al. 2007). For young embedded clusters having age $\le$ $ 1 \times 10^6$ yr, the disk fraction obtained from $JHK$ photometry is $\sim$ 50\% (Lada et al. 2000; Haisch et al. 2000). Whereas the fraction reduces to $\sim$ 20\% for the clusters with age $\sim$ 2 - 3 $\times$ $10^6$ yr ( Lada \& Lada 1995; Haisch et al. 2001; Teixeira et al. 2004; Oliveira et al. 2005). After correcting for the field star contamination and photometric incompleteness, the fraction of NIR excess sources in an area $\sim$ 9.6 arcmin$^2$ of NGC 1624 is estimated to be $\sim$ 20\%. There are 31 $J$ drop-out sources falling within our error criteria. Based on the colour and spatial distribution of these $J$ drop-out sources (see Sect. \ref{distribution}), we presume that they can be included in the list of candidate YSOs and hence the NIR excess fraction increases to $\sim$ 25\%. This suggests an age of $\sim$ 2 - 3 $\times$ $10^6$ yr for this cluster which is in agreement with the age estimation derived using the PMS evolutionary tracks in the optical CMD (cf. Fig. \ref{yso}). This NIR excess fraction is to be considered as a lower limit to the actual YSO fraction of the cluster as we do not have $L$-band observations for this cluster. However, Yasui et al. (2009) point out that the disk fraction from only $JHK$ data are about 0.6 of those from $JHKL$ data and the lifetime estimation from $JHK$ data is basically identical to that from $JHKL$ data. Therefore, despite a little larger uncertainty, the disk fraction from $JHK$ data alone should still be effective even without $L$-band data. Here it is worthwhile to point out that in the case of Cep OB3B, Getman et al. (2009) have shown that the disk frequency depends on the distance from the exciting stars, as massive stars can photo-evaporate the disk around young stars. Also, Carpenter et al. (2006) have found evidence for mass dependent circumstellar disk evolution in the sense that the mechanism for disk dispersal operates less efficiently for low mass stars. Hence, keeping in mind the uncertainties mentioned above, the age estimation based on the disk frequency must be considered as an approximate estimation. In order to check if there is any mass dependence of the NIR excess fraction, we divided the optically identified PMS members (shown in Fig. \ref{yso}) in to three mass bins i.e., 2.5 - 1.5 $M_{\odot}$, 1.5 - 1.0 $M_{\odot}$ and 1.0 - 0.6 $M_{\odot}$ using the evolutionary tracks by Siess et al. (2000). After applying the completeness correction in each magnitude bin, we obtained the NIR excess fraction as 23\%, 24\% and 37\%, respectively for the above mentioned mass bins. Hence, there is an evidence of mass dependent evolution of circumstellar disk as explained by Carpenter et al. (2006). However, this estimation has to be considered as a lower limit, as only 19\% of the identified NIR PMS stars have the optical counterparts. Deharveng et al. (2005; 2008) have identified signs of recent star formation in Sh2-212. They estimated the age of the massive star associated with the UC\mbox{H~{\sc ii}~} region located at the periphery of Sh2-212 as $\sim$ 0.14 Myr on the basis of dynamical size of the UC\mbox{H~{\sc ii}~} region. This indicates that the UC\mbox{H~{\sc ii}~} region is relatively young as compared to the YSOs within the cluster region. The bright rim feature at one end of the UC\mbox{H~{\sc ii}~} region (see Fig. 2 of Deharveng et al. 2008) also suggests that the UC\mbox{H~{\sc ii}~} region might have formed at a later evolutionary stage of the \mbox{H~{\sc ii}~} region as a second generation object. \section {Spatial distribution of YSOs} \label{distribution} Fig. \ref{co} displays the spatial distribution of YSOs (blue circles; likely Class II sources) identified on the basis of NIR excess characteristics (cf. Fig. \ref{jhhk}) along with the CO emission contour map from Deharveng et al. (2008) for four condensations and filament. The $J$ drop-out sources are shown using red triangles. The molecular condensations make a semi-circular ring towards the southern side of Sh2-212. Fig. \ref{co} reveals that majority of YSOs are located close to the cluster centre within a radius of 0.5 arcmin (i.e., within the cluster core radius of $\sim$ 0.9 pc; cf. Sect. \ref{rd}), however, several other YSOs are found to be distributed outside of this radius along the thin semi-circular ring and filamentary structure. Interestingly, there is an apparent concentration of YSOs just at the boundary of the clump C2. In Fig. 15 we have shown the $ K/(H-K) $ CMD for all the sources detected in this region. The encircled are the YSOs and the red triangles are the $J$ drop-out sources. It is evident from the CMD that majority of YSOs have $(H-K)$ colour in the range $\sim$ 0.6 - 0.8 mag. However a significant number of sources appear to be redder ($H-K$ $ \ge 1.0$ mag). The spatial distribution of sources having $(H-K) \ge 1.0$ mag has been shown in Fig. \ref{co} with filled circles (i.e., YSOs) and triangles ($J$ drop-out sources), respectively and this figure reveals a higher density of reddened sources near the clump C2. The larger value of $(H-K)$ ($\ge$ 1.0 mag) could be either due to higher extinction, as most of these sources are lying within/very close to the CO distribution, or could be their intrinsic colour due to large NIR excess. If the origin of this colour excess is merely from the interstellar extinction, then one must expect an increment in the value of $A_V$ by $\sim$ 12 mag as compared to the sources located close to the cluster center. In order to investigate the spatial distribution of extinction in the region, we plot radial variation of $A_V$ in Fig. \ref{radial} (left panel). It is evident from the Fig. \ref{radial} that $A_V$ is almost constant within an 80 arcsec cluster radius. Hence, we can presume that the origin of colour excess could be intrinsic in nature. This fact indicates an age sequence in the sense that YSOs located/projected over the semi-circular ring of molecular condensations are younger than those lying within the core of the cluster. To further elucidate the youth of the YSOs located/projected over the semi-circular ring of molecular condensations, we plot radial variation of NIR excess, $\Delta(H-K)$, defined as the horizontal displacement from the reddening vector at the boundary of ‘F’ and ‘T’ regions (see Fig. \ref{jhhk}). NIR excess is considered to be a function of age. An enhancement in the mean value of $\Delta(H-K)$ at $\sim$ 45 arcsec, i.e., near the periphery of the semi-circular ring is apparent in Fig. \ref{radial} (middle panel). In the right panel we plot the radial variation of $(H - K)$ colour of YSOs and $J$ drop-out sources using dashed and solid histogram, respectively. The enhancement in the mean $(H - K)$ value at the same location is apparent in this figure as well. However, we have to keep in mind the possibility of photo-evaporation of the disk around YSOs lying within the core of the cluster due to stellar radiation of massive star at the centre of the cluster. The above facts indicate that the sources near the molecular material are intrinsically redder and support the scenario of possible sequential star formation towards the direction of molecular clumps. It is interesting to mention that the distribution of YSOs in the NGC 1624 region is rather similar to the distribution of Class II sources in other star forming regions. e.g., RCW 82 (Pomar\`{e}s et al. 2009), RCW 120 (Zavagno et al. 2007) and Sh2-284 (Puga et al. 2009). Majority of Class I sources in the case of RCW 82 and RCW 120 are found to be associated with the molecular material at their periphery and none are found around the ionizing source. The association of Class I sources with the molecular material manifests the recent star formation at their periphery. If star formation in Sh2-212 region is similar in nature to RCW 82 and RCW 120, one would expect a significant number of Class I sources in the surrounding molecular material. Unfortunately, the absence of MIR observations hampers the detailed study of the probable young sources lying towards the collected molecular material. However, the YSOs having $(H-K) \ge 1.0$ mag, which are expected to be the youngest sources of the region, are found to be distributed around the molecular clumps detected by Deharveng et al. (2008). It is interesting to mention that in the case of RCW 82, the YSOs having $(H-K) \ge 1.0$ mag are found to be associated with the molecular emission surrounding the \mbox{H~{\sc ii}~} region. Many of these sources are not observed in the direction of molecular emission peaks, but are located on the borders of the condensations (Pomar\`{e}s et al. 2009). A similar distribution of YSOs (having $H-K \ge 1.0$) can be seen in the present study at the border of the clump C2. According to Deharveng et al. (2008), the massive YSO associated with the UCH II region (clump C1) might have formed as a result of the collect and collapse process due to the expansion of the \mbox{H~{\sc ii}~} region. If the sources lying towards the molecular clump C2 and along the filament are formed as a result of the collect and collapse process, these sources must be younger than the ionization source by about 2 - 3 Myr as the model calculation by Deharveng at al. (2008) predicts the fragmentation of the collected layer after 2.2 - 2.8 Myr of the formation of the massive star in Sh2-212. Since the ionization source is an O6.5 $\pm$ 0.5 MS star, the maximum age of the ionization source should be of the order of its MS life time, i.e., $\sim$ 4.4 Myr (cf. Sect. \ref{distance}). On the basis of the present analysis we can indicate that the sources with $(H-K) \ge 1.0$ seem to have a correlation with the semi-circular ring of molecular condensations and should be younger than the age of the ionization source of the region. However in the absence of optical photometry, the reliable age estimation of these YSOs is not possible. Since the distribution of youngest YSOs on the border of clump C2 has a resemblance to the distribution of Class I/ II YSOs in RCW 82, the formation of these YSOs could be due to the result of small-scale Jeans gravitational instabilities in the collected layer, or interactions of the ionization front with the pre-existing condensations as suggested by Pomar\`{e}s et al. (2009) cannot be ignored. \section{Initial Mass Function} \label{imf} The distribution of stellar masses that form in a star formation event in a given volume of space is called IMF and together with star formation rate, the IMF dictates the evolution and fate of galaxies and star clusters (Kroupa 2002). Young clusters are important tools to study IMF since their MF can be considered as IMF as they are too young to loose significant number of members either by dynamical or stellar evolution. To study the IMF of NGC 1624 we used the data within $r \le 2^\prime$. The MF is often expressed by the power law, $N (\log m) \propto m^{\Gamma}$ and the slope of the MF is given as $$ \Gamma = d \log N (\log m)/d \log m $$ \noindent where $N (\log m)$ is the number of stars per unit logarithmic mass interval. For the mass range $0.4 < M/M_{\odot} \le 10$, the classical value derived by Salpeter (1955) for the slope of MF is, $\Gamma = -1.35$. Since the NIR data is deeper, we expect to have a better detection of YSOs towards the fainter end in comparison to the optical data. Therefore we estimated the IMF using the optical and NIR data independently. \subsection {IMF from optical data} With the help of SCMD shown in Fig. \ref{calone}, we can derive the MF using theoretical evolutionary models. A mass$-$luminosity relation is needed to convert the derived magnitude for each star to a mass. For the MS stars (see Fig. \ref{q}), LF was converted to MF using the theoretical model by Girardi et al. (2002) for 2 Myr (cf. Pandey et al. 2001; 2005). The MF for PMS stars was obtained by counting the number of stars in various mass bins (shown as evolutionary tracks in Fig. \ref{calone}). Necessary corrections for data incompleteness were taken into account for each magnitude bin to calculate the MF. The MF of NGC 1624 is plotted in Fig. \ref{mf}. The slope, $\Gamma$ of the MF in the mass range $1.2 \le M/M_{\odot}<27$ can be represented by a power law. The slope of the MF for the mass range $1.2 \le M/M_{\odot}<27$ comes out to be, $\Gamma$ = $-1.18\pm0.10$, which is slightly shallower than the Salpeter value (-1.35). We conclude that within an acceptable margin, the slope of IMF for the cluster NGC 1624 is comparable to the Salpeter (1955) value. \subsection {IMF from NIR data} We also estimate the IMF using J -band luminosity function (JLF). We preferred $J$-band over $K$-band as the former is least affected by the NIR excess. After removing the field star contamination using the statistical subtraction as explained in Sect. \ref{field}, we applied the completeness correction to the $J$-band data. Assuming an average age of 2 Myr for the PMS stars, distance 6.0 kpc and average reddening $A_V$ = 2.5 mag, the $J$ magnitudes were converted to mass using the 2 Myr PMS isochrone by Siess et al. (2000). For MS stars, the mass-luminosity relation is taken from Girardi et al. (2002). Completeness of the $J$-band data was $\sim$ 90 \% at $J$ = 18 mag ($\sim$ 0.65 $M_\odot$). In Fig. \ref{mf_ir}, we have shown the MF derived for NGC 1624 (within the area of $\sim$ 9.6 arcmin$^2$) in the mass range $0.65 \le M/M_{\odot}<27$. The linear fit gives a slope $\Gamma$ = $-1.31\pm0.15$ which is in agreement with the Salpeter (1955) value. The MF ($\Gamma$ = $-1.18\pm0.10$) derived using optical data is slightly shallower than that of IR data. However both the slopes are within error and can be considered to be in agreement. Here we would like to point out that the estimation of IMF depends on the models used. We are pursuing studies of few young clusters, hence a comparative study of IMFs of various young clusters obtained using similar techniques will give useful information about IMFs. Our recent studies on young clusters (age $\sim 2 - 4 $ Myr), viz., NGC 1893 (Sharma et al. 2007), Be 59 (Pandey et al. 2008) and Stock 8 (Jose et al. 2008) have yielded the value of $\Gamma$ for stars more massive than $\sim $ 1 - 2 $M_\odot$ as -1.27 $\pm$ 0.08, -1.01 $\pm$ 0.11 and -1.38 $\pm$ 0.12, respectively. A comparison of the MF in the case of NGC 1624 and the clusters mentioned above indicates that the MF slope towards massive end (i.e., M $\ge 1 M_\odot$) in general, is comparable to the Salpeter value (-1.35). \section {K-band luminosity function} The KLF is frequently used in studies of young clusters as a powerful tool to constrain its age and IMF. Pioneering work on the interpretation of KLF was presented by Zinnecker et al. (1993). During the last decade several studies have been carried out with the aim of determining the KLF of young clusters (e.g., Muench et al. 2000; Lada \& Lada 2003; Ojha et al. 2004b; Sanchawala et al. 2007; Sharma et al. 2007; Pandey et al. 2008; Jose et al. 2008). We have used CFHT $K$-band data to study the KLF of NGC 1624. Because the CFHT observations did not include the entire cluster region, we restricted the KLF study to a region within $\sim$ 9.6 arcmin$^2$ area of NGC 1624 (see Sect. \ref{nir}). In order to convert the observed KLF to the true KLF, it is necessary to correct the data incompleteness and field star contamination. We applied the CF (see Sect. \ref{cfhtdata}) for the data incompleteness. The control field having an area $\sim$ 3.1 arcmin$^2$ shown in Fig. \ref{cfht} has been used to remove the field star contribution. We applied a correction factor to take into account the different areas of cluster and control field regions. The field star population towards the direction of NGC 1624 is also estimated by using the Besan\c con Galactic model of stellar population synthesis (Robin et al. 2003) using a similar procedure as described by Ojha et al. (2004b). The star counts were predicted using the Besan\c con model towards the direction of the control field. An advantage of using this model is that we can simulate foreground ($d<6.0$ kpc) and background ($d>6.0$ kpc) field star populations separately. The use of this model allows us to apply the extra cloud extinction to the background stars. The foreground population was simulated using the model with $A_V$ = 2.36 mag ($E(B-V) = 0.76$ mag; ref. Sect. \ref{reddening}) and $d < 6.0$ kpc. The background population ($d>6.0$ kpc) was simulated with an extinction value $A_V$ = 4.0 mag (see Sect. \ref{nircc}). Thus we determined the fraction of the contaminating stars (foreground + background) over the total model counts. The scale factor we obtained to the control field direction was close to 1.0 in all the magnitude bins. This indicates that the moderate extinction of $A_V$ $\sim$ 4.0 mag is unlikely to have any significant effect on the field star distribution at this distance. Hence, we proceeded our analysis of KLF with the field star counts obtained from the observed control field. The completeness corrected and field star subtracted KLF for NGC 1624 is shown in Fig. \ref{klf}. The KLFs of young embedded clusters are known to follow power-law shapes (Lada et al. 1991; 1993) which is expressed as: \begin{center} ${{ \rm {d} N(K) } \over {\rm{d} K }} \propto 10^{\alpha K}$ \end{center} where ${ \rm {d} N(K) } \over {\rm{d} K }$ is the number of stars per 0.5 mag bin and $\alpha$ is the slope of the power law. The KLF for NGC 1624 shown in Fig. \ref{klf} (solid line), yields a slope $0.30\pm0.06$ for the range $K$ = 13.5 - 17.5 mag, which is slightly lower than the average value of slopes ($\alpha \sim 0.4$) for young clusters of similar ages (Lada et al. 1991; Lada \& Lada 1995; Lada \& Lada 2003). However, a break in the power law can be noticed at $K$ = 15.75 mag and the KLF seems to be flat in the magnitude range 15.75 - 17.5. The slope of the KLF in the magnitude range 13.5 - 15.75 (dahsed line in Fig. \ref{klf}) comes out to be 0.44 $\pm$ 0.11 which is comparable with the average value of slopes for young clusters. A turn off in the KLF has also been observed in a few young clusters. e.g., at $K \sim$ 14.5 mag and $K \sim$ 16.0 mag in the case of Tr 14 (distance $\sim$ 2.5 Kpc; Sanchawala et al. 2007) and NGC 7538 (distance $\sim$ 2.8 Kpc; Ojha et al. 2004), respectively. KLF slope is an age indicator of young clusters. For clusters up to 10 Myr old, the KLF slope gets steeper as the cluster gets older (Ali \& Depoy 1995; Lada \& Lada 1995). However, there is no precise age - KLF relationship in the literature due to huge uncertainty in their correlation (Devine et al. 2008). There are many studies on KLF of young clusters. The studies by Blum et al. 2000; Figuer\^{e}do et al. 2002; Leistra et al. 2005; 2006; Devine at al. 2008 indicate that the KLF slope varies from 0.2 -0.4 for clusters younger than 5 Myr. The KLF of NGC 1624 is worth comparing with the recent studies of young clusters viz; NGC 1893 (Sharma et al. 2007), Be 59 (Pandey et al. 2008) and Stock 8 (Jose et al. 2008), since all the KLFs are obtained using a similar technique. The slope of the KLF ($\alpha = 0.30\pm0.06$) obtained for NGC 1624 in the magnitude range 13.5 - 17.5 is comparable with those obtained for NGC 1893 ($\alpha = 0.34\pm0.07$), Stock 8 ($\alpha = 0.31\pm0.02$) and Be 59 ($\alpha = 0.27\pm0.02$). \section{Summary} We have carried out a comprehensive multi-wavelength study of the young cluster NGC 1624 associated with the \mbox{H~{\sc ii}~} region Sh2-212. Sh2-212 is thought to have experienced `Champagne flow' and the molecular clumps along with the UC\mbox{H~{\sc ii}~} region at the periphery are suggested as the possible outcome of the collect and collapse phenomena. In our present study, an attempt has been made to determine the basic properties of NGC 1624 as well as to study the nature of stellar contents in the region using optical $UBVRI$ photometry, optical spectroscopy of four stars, radio continuum observations from GMRT along with NIR $JHK$ archival data from 2MASS and CFHT. From optical observations of massive stars, reddening ($E(B-V)$) in the direction of NGC 1624 is found to vary between 0.76 to 1.00 mag and distance is estimated to be $6.0 \pm 0.8$ kpc. The maximum post-main-sequence age of the cluster is estimated as $\sim$ 4 Myr. Present spectroscopic analysis of the ionizing source indicates a spectral class of O6.5V. We used $JHK$ colour criteria to identify sources with NIR excess and found 120 candidate YSOs in the region. Majority of the YSOs have $A_V \le$ 4.0 mag and masses in the range $\sim$ 0.1 - 3.0 $M_\odot$. Distribution of these YSOs on the CMD indicates an age spread of $\sim$ 0.5 - 5 Myr with an average age of $\sim$ 2-3 Myr, suggesting non-coeval star formation in NGC 1624. The lower limit for the NIR excess fraction on the basis of $JHK$ data is found to be $\sim$ 20\% which indicates an average age $\sim$ 2 - 3 Myr for YSOs in NGC 1624. From the radio continuum flux, spectral type of the ionizing source of the UC\mbox{H~{\sc ii}~} region is estimated to be $\sim$ B0.5V. A significant number of YSOs are located close to the cluster centre and a few YSOs are seen to be located/projected over the molecular clumps detected by Deharveng et al. (2008), as well as farther away from the clumps. We detect an enhanced density of reddened YSOs located/projected close to the molecular clump C2. The NIR excess and $(H-K)$ colour distribution of these sources show indication of an age sequence in the sense that the YSOs located/projected near the clump C2 are younger than those located within the cluster core. The slope of the MF, $ \Gamma$, derived from optical data, in the mass range $1.2 \le M/M_{\odot}<27$ can be represented by -1.18 $\pm$ 0.10. Whereas NIR data, in the mass range $0.65 \le M/M_{\odot}<27$ yields $ \Gamma$ = -1.31 $\pm$ 0.15. Thus MF fairly agrees with the Salpeter value (-1.35). Slope of the KLF for NGC 1624 in the magnitude range 13.5 - 17.5 is found to be 0.30 $\pm$ 0.06 which is smaller than the average value ($\sim$0.4) obtained for young clusters of similar ages (Lada et al. 1991; Lada \& Lada 1995; Lada \& Lada 2003), however, agrees well with the values 0.27 $\pm$ 0.02 for Be 59 (Pandey et al. 2008); 0.34 $\pm$ 0.07 for NGC 1893 (Sharma et al. 2007) and 0.31 $\pm$ 0.02 for Stock 8 (Jose et al. 2008). However, there is a clear indication of break in the power law at $K$ =15.75 mag. The KLF slope in the magnitude range 13.5 - 15.75 can be represented by $\alpha = 0.44 \pm 0.11$ and the KLF slope is found to be flat in the magnitude range 15.75 - 17.5. \section{Acknowledgments} Authors are thankful to the referee Dr. Antonio Delgado for his useful comments which has improved contents and presentation of the paper significantly. We thank the staff of IAO, Hanle and its remote control station at CREST, Hosakote, ARIES, Naini Tal, and GMRT, Pune, India for their assistance during observations. This publication makes use of data from the Two Micron All Sky Survey, which is a joint project of the University of Massachusetts and the Infrared Processing and Analysis Center/California Institute of Technology, funded by the National Aeronautics and Space Administration and the National Science Foundation. This research used the facilities of the Canadian Astronomy Data Centre operated by the National Research Council of Canada with the support of the Canadian Space Agency. We thank Annie Robin for letting us use her model of stellar population synthesis. JJ is thankful for the financial support for this study through a stipend from the DST and CSIR, India. \section{REFERENCES} Adams F. C., Lada C. J., Shu F. H. 1987, ApJ, 312, 788\\ Ali, B., Depoy, D. L., 1995, AJ, 109, 709\\ Bessell, M., Brett, J. M., 1988, PASP, 100, 1134\\ Blitz, L., Fich, M., Stark, A. A., 1982, ApJS, 49, 183\\ Blum, R. D., Conti, P. S., Damineli, A., 2000, AJ, 119, 1860\\ Caplan, J., Deharveng, L., Peña, M., Costero, R., Blondel, C., 2000, MNRAS, 311, 317\\ Carpenter, J. M., 2001, AJ, 121, 2851\\ Carpenter, J. M., Mamajek, E. E., Hillenbrand, L. A., Meyer, M. R. 2006, ApJ, 651, L49\\ Chabrier, G., 2005, The Initial Mass Function 50 Years Later, 327, 41\\ Chavarr\'{i}a, L., Mardones, D., Garay, G., Escala, A., Bronfman, L., Lizano, S., 2010, ApJ, 710, 583\\ Chauhan, N., Pandey, A. K., Ogura, K., Ojha, D. K., Bhatt, B. C., Ghosh, S. K., Rawat, P. S., 2009, MNRAS,396,964\\ Chini, R., Wink, J.E., 1984, A\&A, 139, L5\\ Chini, R., Wargau, W. F., 1990, A\&A, 227, 213\\ Cohen, J. G., Persson, S. E., Elias, J. H., Frogel, J. A., 1981, ApJ, 249, 481\\ Conti, P.S., Alschuler, W. R., 1971, ApJ, 170, 325\\ Cutri, R. M., Skrutskie, M. F., van Dyk, S., et al., 2003, The IRSA 2MASS All Sky Point Source Catalog, NASA/IPAC Infrared Science Archive, http://irsa.ipac.caltech.edu/applications/Gator/\\ Deharveng, L., Zavagno, A., Caplan, J. 2005, A\&A, 433, 565\\ Deharveng, L., Lefloch, B., Kurtz, S., Nadeau, D., Pomar\`{e}s, M., Caplan, J., Zavagno, A., 2008, A\&A, 482, 585\\ Devine, K. E., Churchwell, E. B., Indebetouw, R., Watson, C., Crawford, S. M., 2008, AJ, 135, 2095\\ Figuer\^{e}do, E., Blum, R. D., Damineli, A., Conti, P. S., 2002, AJ, 124, 2739\\ Georgelin, Y. M., Georgelin, Y. P., 1970, A\&A, 6, 349\\ Getman, K. V., Feigelson, E. D., Luhman, K. L., Sicilia-Aguilar, A., Wang, J., Garmire, G. P., 2009, ApJ, 699, 1454\\ Girardi, L., Bertelli, G., Bressan, A., Chiosi, C., Groenewegen, M. A. T., et al., 2002, A\&A, 391, 195\\ Haisch, K. E., Lada, E. A., Lada, C. J., 2000, AJ, 120, 1396\\ Haisch, K. E., Lada, E. A., Lada, C. J., 2001, AJ, 121, 2065\\ Hern\'{a}ndez, J., Hartmann, L., Megeath, T., Gutermuth, R., Muzerolle, J., 2007, ApJ, 662, 1067\\ Hillenbrand, L. A., Strom, S. E., Vrba, F. J., Keene, J., 1992, ApJ, 397, 613\\ Hillenbrand, L. A., Massey, P., Strom, S. E., Merrill, K. M., 1993, AJ, 106, 1906 \\ Hubble E., 1922, ApJ, 56, 400\\ Hunter, T. R., Testi, L., Taylor, G. B., Tofani, G., Felli, M., Phillips, T. G., 1995, A\&A, 302, 249\\ Jacoby, G. H., Hunter, D. A., Christian, C. A., 1984, ApJS, 56, 257\\ Johnson, H. L. Morgan, W. W., 1953, ApJ, 117, 313\\ Johnson, H. L., 1966, ARA\&A, 4, 193\\ Jones, B. F., Herbig, G. H., 1979, 84, 1872\\ Jose, J., et al., 2008, MNRAS, 384, 1675\\ King, I., 1962, AJ, 67, 471\\ Kroupa, P., 2002, SCIENCE, 295, 82\\ Kroupa P., 2008, in Knapen J. H., Mahoney T. J., Vazdekis A., eds, ASP Conf. Ser. Vol. 390, Pathways Through an Eclectic Universe. Astron. Soc. Pac., San Francisco, p. 3\\ Lada, C. J., Lada, E. A., 1991, in ASP Conf. Ser. 13, The Formation and Evolution of Star Clusters, ed. K. Janes (San Francisco: ASP), 3\\ Lada, C. J., Adams, F. C., 1992, ApJ, 393, 278\\ Lada, C. J., Young, T., Greene, T., 1993, ApJ, 408, 471\\ Lada, E . A . Lada, C . J., 1995, AJ , 109, 1682\\ Lada, C. J. et al., 2000, AJ, 120, 3162\\ Lada, C. J., Lada E. A., 2003, ARA\&A, 41, 57\\ Landolt A.U., 1992, AJ, 104, 340\\ Larson, R. B., 1992, MNRAS, 256, 641\\ Leisawitz D., Bash F. N., Thaddeus P., 1989, ApJS, 70, 731\\ Leistra, A., Cotera, A. S., Liebert, J., 2006, AJ, 131, 2571\\ Leistra, A., Cotera, A. S., Liebert, J., Burton, M., 2005, AJ, 130, 1719\\ Mart\'{i}n-Hern\'{a}ndez, N. L., van der Hulst, J. M., Tielens, A. G. G. M., 2003, A\&A, 407, 957\\ Martins, F., Schaerer, D., Hillier, D. J., 2005, A\&A, 436, 1049\\ Meyer, M., Calvet, N., Hillenbrand, L. A., 1997, AJ, 114, 288\\ Meyer, M. R., Adams, F. C., Hillenbrand, L. A., Carpenter, J. M., Larson, R. B., 2000, Protostars and Planets IV, 121\\ Meynet, G., Maeder, A., 2005, A\&A, 429, 581\\ Moffat, A. F. J., Fitzgerald, M. P., Jackson, P. D., 1979, A\&AS, 38, 197\\ Muench, A. A., Lada, E.A., Lada, C.J., 2000, ApJ, 553, 338\\ Muench, A. A. et al., 2003, AJ, 125, 2029 Ogura K., Chauhan N., Pandey A.K., Bhatt B.C., Ojha D.K., Itoh Y., 2007, PASJ, 59, 199 \\ Ojha, D. K., Tamura, M., Nakijama, Y., et al., 2004a, ApJ, 608, 797\\ Ojha, D. K., Tamura, M., Nakajima, Y., et al, 2004b, ApJ, 616, 1042\\ Oliveira, J. M., Jeffries, R. D., van Loon, J. Th., Littlefair, S. P., Naylor, T., 2005, MNRAS, 358, L21\\ Pandey, A. K., Ogura, K., Sekiguchi, K., 2000, PASJ, 52, 847\\ Pandey A.K., Nilakshi, Ogura K., Sagar R., Tarusawa K., 2001, A\&A, 374, 504\\ Pandey, A. K., Upadhyay, K., Nakada, Y., Ogura, K., 2003, A\&A, 397, 191\\ Pandey, A. K., Upadhyay, K., Ogura, K., Sagar, R., Mohan, V. et al., 2005, MNRAS, 358, 1290\\ Pandey, A. K., Sharma, S., Ogura, K., Ojha, D. K., Chen, W. P. et al., 2008, MNRAS, 383, 1241\\ Pomar\`{e}s, M., Zavagno, A., Deharveng, L., Cunningham, M., Jones, P., Kurtz, S. et al., 2009, A\&A, 494, 987\\ Price, N. M., Podsiadlowski, Ph., 1995, MNRAS, 273, 1041\\ Puga, E., Hony, S., Neiner, C., Lenorzer, A., Hubert, A.-M. et al., 2009, A\&A, 503, 107\\ Robin, A. C., Reyle, C., Derriere, S., Picaud, S., 2003, A\&A, 409, 523\\ Robitaille, T. P., Whitney, B. A., Indebetouw, R., Wood, K., Denzmore, P., 2006, ApJS, 167, 256\\ Salpeter, E.E., 1955, ApJ, 121, 161\\ Sanchawala, K. et al., 2007, ApJ, 667, 963\\ Schmidt-Kaler, Th. 1982, Landolt-Bornstein, Vol. 2b, ed. K. Schaifers, H. H. Voigt, H. Landolt (Berlin: Springer), 19\\ Sharma, S., Pandey, A. K., Ojha, D. K., Chen, W. P., Ghosh, S. K., Bhatt, B. C., Maheswar, G., Sagar, R., 2007, MNRAS, 380, 1141\\ Sharpless, S., 1959, ApJS, 4, 257\\ Siess, L., Dufour, E., Forestini, M., 2000, A\&A, 358, 593\\ Stetson, P. B., 1987, PASP, 99, 191\\ Sugitani, K. et al., 2002, ApJ, 565, L25\\ Swarup, G., Ananthkrishnan, S., Kaphi, V. K., Rao, A. P., Subrhmanya, C. R., Kulkarni, V. K., 1991, Current Science, 60, 95\\ Tapia, M., Persi, P., Bohigas, J., Ferrari-Toniolo, M., 1997, AJ, 113, 1769\\ Teixeira, P. S., Fernandes, S. R., Alves, J. F., Correia, J. C., Santos, F. D., Lada, E. A., Lada, C. J., 2004, A\&A, 413, L1\\ Torres-Dodgen, Ana V., Weaver, W. B., 1993, PASP, 105, 693\\ Vacca, W. D., Garmany, C. D., Shull, J. M., 1996, A\&A, 460, 914\\ Walborn, N. R., Fitzpatrick, E. L., 1990, PASP, 102, 379\\ Wood, D. O. S., Churchwell, E., 1989, 340, 265\\ Yasui, C., Kobayashi, N., Tokunaga, A. T., Saito, M., Tokoku, C., 2009, ApJ, 705, 54\\ Zavagno, A., Pomar\`{e}s, M., Deharveng, L., Hosokawa, T., Russeil, D., Caplan, J., 2007, A\&A, 472, 835\\ Zinnecker, H., 1986., IMF in starburst regions. In light on Dark Matter, ed. F.P.Israel, ApSS Library Vol. 124, pp.277-278\\ Zinnecker, H., McCaughrean, M. J., Wilking, B. A., 1993, in Protostars and Planets III, ed. E. Levy \& J. Lunine (Tucson: Univ. Arizona Press), 429\\ \begin{table} \caption{Log of observations} \label{obslog} \begin{tabular}{p{.3in}p{.45in}p{.45in}p{.35in}p{1in}} \hline $\alpha_{(2000)}$ & $\delta_{(2000)}$ & Date of &Filter & Exposure time\\ (h:m:s) & ($^{\circ}:^{\prime}:^{\prime\prime}$) & observation & & (s)$\times$no. of frames \\ \hline {\it HCT$^1$} & & &\\ 04:40:38& +50:27:36& 2004.11.03 & $U$ & 600$\times$3 \\ 04:40:38&+50:27:36& 2004.11.03 & $B$ & 300$\times$3, 60$\times$1, 20$\times$1\\ 04:40:38& +50:27:36& 2004.11.03 & $V$ & 120$\times$3, 10$\times$1\\ 04:40:38& +50:27:36& 2004.11.03 & $R$ & 60$\times$3, 10$\times$1\\ 04:40:38& +50:27:36& 2004.11.03 & $I$ & 60$\times$3, 10$\times$1, 5$\times$1 \\ 04:40:38& +50:27:36& 2007.01.26 & \mbox{[S~{\sc ii}]}~&450$\times$1\\ 04:40:38& +50:27:36& 2007.01.26 & \mbox{[O~{\sc iii}]}~&450$\times$1\\ 04:40:37& +50:27:41& 2006.09.08 & Gr7/167l&900$\times$1 \\ 04:40:39& +50:27:18& 2007.01.26 & Gr7/167l&600$\times$1\\ 04:40:35& +50:28:44& 2007.01.26 & Gr7/167l&750$\times$1\\ 04:40:32& +50:27:54& 2007.01.26 & Gr7/167l&750$\times$1\\ {\it ST$^2$} & & &\\ 04:40:38& +50:27:36& 2006.12.12 & $V$ & 300$\times$10 \\ 04:40:38&+50:27:36& 2006.12.12 & $I_c$ & 300$\times$5 \\ \hline \end{tabular} $^1$ 2-m Himalayan Chandra Telescope, IAO, Hanle\\ $^2$ 104-cm Sampurnanand Telescope, ARIES, Naini Tal\\ \end{table} \begin{table} \caption{$UBVRI_cJHK$ photometric data of sample stars. The complete table is available in electronic form only.} \label{optdata} \scriptsize \begin{tabular}{cccccccccccc} \hline star& $\alpha_{(2000)}$& $\delta_{(2000)}$& $V$ &$(U-B)$ &$(B-V)$ & $(V-R)$ & $(V-I)$ & $J$ & $H$ & $ K$ & $A_V$\\ ID& (h:m:s) & ($^{\circ}:^{\prime}:^{\prime\prime}$) & & & & & & & & & \\ \hline 1& 04:39:46.271 & +50:30:00.70 & 18.415 & - & - & - & 1.611 & - & - & - & - \\ 2& 04:39:46.320 & +50:22:03.89 & 21.320 & - & - & - & 1.865 & - & - & - & - \\ 3& 04:39:46.320 & +50:22:23.00 & 20.893 & - & - & - & 1.887 & - & - & - & - \\ ...&.....&.....&.....&.....&.....&.....&.....&.....&......&.....&\\ ...&.....&.....&.....&.....&.....&.....&.....&.....&......&.....&\\ 1155&04:40:32.181&+50:27:53.40&13.067&0.396 & 0.917& 0.542 & 1.055& 11.172 &10.838& 10.728 &3.1*\\ ...&.....&.....&.....&.....&.....&.....&.....&.....&......&.....&\\ \hline \end{tabular}\\ $A_V$ for the $\star$ marked sources have been obtained using optical photometry\\ \end{table} \begin{table} \caption{Completeness Factor of photometric data in the cluster and field regions.} \label{cf_opt} \begin{tabular}{ccc} \hline V range& NGC 1624 & Field region\\ (mag)& $r < 2^{\prime}$ & r $\ge$ $3^{\prime}$ \\ \hline 11 - 12&1.00&1.00\\ 12 - 13&1.00&1.00\\ 13 - 14&1.00&1.00\\ 14 - 15&1.00&1.00\\ 15 - 16&1.00&1.00\\ 16 - 17&0.98&0.98\\ 17 - 18&0.98&0.97\\ 18 - 19&0.90&0.95\\ 19 - 20&0.90&0.93\\ 20 - 21&0.80&0.89\\ 21 - 22&0.55&0.61\\ \hline \end{tabular} \end{table} \begin{figure*} \centering \includegraphics[scale = .76, trim = 2 150 0 150, clip]{Fig1.eps} \caption{$Left$: The colour composite image reproduced using the bands $B$, \mbox{[O~{\sc iii}]}~ and \mbox{[S~{\sc ii}]}~ ($B$, blue; \mbox{[O~{\sc iii}]}~, green; and \mbox{[S~{\sc ii}]}~, red) for an area $\sim 10 \times 10$ arcmin$^2$ around NGC 1624 (see the electronic version for the colour image). The dashed line box represents the $ 5^{\prime}.2 \times 5^{\prime}.2$ area of CFHT-$JHK$ observations (cf. Sect. \ref{cfhtdata}). The star mark represents the cluster centre and the dashed circle represents the boundary of NGC 1624 (cf. Sect. \ref{rd}). $Right$: CFHT $K$-band mosaic image with a field of view of $5^{\prime}.2 \times 5^{\prime}.2$ centered on the UC\mbox{H~{\sc ii}~} region of Sh2-212. The white circle represents the ionizing source of Sh2-212 and the asterisk represents the centre of NGC 1624. The contours represent the $^{13}$CO(2-1) emission map from Deharveng et al. (2008) in the velocity range between -34.0 kms$^{-1}$ to -32.7 kms$^{-1}$ (continuous thin contours), -36.1 $km s^{-1}$ to -35.1 $km s^{-1}$ (continuous thick contours) and -36.8 kms$^{-1}$ to -35.9 kms$^{-1}$ (dashed contours), respectively. C1, C2, C3 and C4 are the molecular clumps identified by Deharveng et al. (2008). The partial circle shows $\sim$ 9.6 arcmin$^2$ section of the cluster (radius = 2$^\prime$; area = 12.6 arcmin$^2$). The control field region (cf. Sect. \ref{nir}) is represented by the dashed line box. } \label{cfht} \end{figure*} \begin{figure*} \centering \includegraphics[scale =.5,trim=0 0 0 0, angle=-90, clip]{Fig2.eps} \caption{The two dimensional stellar surface number density distribution obtained from the CFHT $K$-band data using a grid size of $5^{\prime\prime} \times 5^{\prime\prime}$. The lowest contour is plotted at 3 times above the background level. The star mark represents the cluster centre. } \label{ssnd} \end{figure*} \begin{figure*} \centering \includegraphics[scale =.5,trim=10 10 10 10, clip]{Fig3.eps} \caption{Stellar density as a function of radius from the adopted cluster centre for the optical (upper panel) and 2MASS (lower panel) data. The solid curve shows the least square fit of the King (1962) profile to the observed data points. The dashed line represents the mean density level of the field stars and dotted lines are the error limits for the field star density. The error bars represent $\pm$ $\sqrt{N}$ errors.} \label{rad} \end{figure*} \begin{figure*} \centering \includegraphics[scale = .5, trim = 10 20 50 100, clip]{Fig4.eps} \caption{$(U - B)/(B - V)$ colour-colour diagram for the stars within $r \le 2^\prime$ of NGC 1624. The continuous curves represent the ZAMS by Girardi et al. (2002) shifted along the reddening slope of 0.72 (shown as dotted line) for $E(B-V)$ = 0.76 and 1.00 mag, respectively. The dashed curve represents the ZAMS reddened by E(B - V) = 1.15 mag to match the probable background population (see the text for details). } \label{ubbv} \end{figure*} \begin{figure*} \centering \includegraphics[scale = .8, trim = 0 0 0 0, clip]{Fig5.eps} \caption{Top: Flux calibrated normalized spectrum for the ionizing source M2. Bottom: Wavelength calibrated normalized spectrum for the star M4. The lines identified for the spectral classification are marked in the figure. } \label{spec} \end{figure*} \begin{figure*} \centering \includegraphics[scale = .5, trim = 10 20 10 80, clip]{Fig6.eps} \caption{$V_0/(B-V)_0$ CMD for stars lying within $r \le 2^{\prime}$ of NGC 1624 and having spectral type earlier than A0. The filled and open circles are the probable cluster members and field stars, respectively. The isochrones of age 2 Myr (solid curve) and 4 Myr (dashed curve) by Girardi et al. (2002) corrected for the cluster distance are also shown. The labeled sources, numbered according to Deharveng et al. (2008), are further classified using low resolution spectroscopy to be of spectral class $\mbox{F~{\sc iii}}~-OV$ (see Sect. \ref{slitspec}). The star shown by open square with open circle occupies a location near to M8 and M9 stars in the $(J-H)/(H-K)$ colour-colour diagram and hence this star could be a field giant. The average error in the colour term is given at the lower left side of the figure. } \label{q} \end{figure*} \begin{figure*} \centering \includegraphics[scale = .62, trim = 10 10 10 160, clip]{Fig7.eps} \caption{ $V/(V-I)$ CMD for the stars within (a): $r \le 0^\prime$.5 of NGC 1624 (b): within $0^\prime.5 \le r \le 2^\prime$ of NGC 1624 (c): for stars in the control field. The continuous curve is the isochrone of 2 Myr from Girardi et al. (2002) corrected for the cluster distance and reddening $E(B-V)_{min}$ = 0.76 mag, whereas the dashed curve is shifted for a reddening $E(B-V)_{max}$ = 1.0 mag. The dotted curves are the PMS isochrone for 0.5 and 5 Myr (Siess et al. 2000) shifted for the cluster distance and reddening $E(B-V)_{min}$ = 0.76 mag} \label{cmd} \end{figure*} \begin{figure*} \centering \includegraphics[scale = .5, trim = 10 120 50 150,clip]{Fig8.eps} \caption{GMRT high resolution map at 1280 MHz of Sh2-212 with a resolution of $\sim$ 4$^{\prime\prime}$.9 $\times$ 3$^{\prime\prime}$.2. The contour levels are at 3, 4, 6, 9, 13, 18, 24 and 31 times of the rms noise 0.224 mJy/beam. The star symbol represents the location of the cluster centre.} \label{1280} \end{figure*} \begin{figure*} \centering \includegraphics[scale = .45, angle=-90, trim = 0 0 0 0, clip]{Fig9.eps} \caption{ Enlarged map of UC\mbox{H~{\sc ii}~} region at 1280 MHz from Fig. \ref{1280}. The contours are plotted above three times of the rms noise. } \label{610} \end{figure*} \begin{figure*} \centering \includegraphics[scale = .5, trim = 0 10 0 50, clip]{Fig10.eps} \caption{$(J - H)/(H - K)$ CC diagrams of sources detected in the $J H K$ bands in (a) NGC 1624 within $\sim$ 9.6 arcmin$^2$ area (b) control field of area $\sim$ 3.1 arcmin$^2$. The locus for dwarfs (thin solid curve) and giants (thick solid curve) are from Bessell $\&$ Brett (1988). The dotted and dotted-dashed lines (red and green, respectively in the online version) represent the unreddened and reddened ($A_V$ = 4.0 mag) locus of CTTSs (Meyer et al. 1997). Dashed straight lines represent the reddening vectors (Cohen et al. 1981). The crosses on the dashed lines are separated by $A_V$ = 5 mag. The plots are classified in to three regions, `F', `T' and `P'. The sources located in the `F' region are likely to be the reddened field stars, WTTSs or CTTSs with little or no NIR excess. The sources in the `T' region are considered to be candidate CTTSs with NIR excess and sources in the `P' region are the candidate Class I objects (see text for details). The sources marked using red triangles are the MS members identified using Q method (see Sect. \ref{reddening}). The average photometric errors are shown in the lower right of each panel. } \label{jhhk} \end{figure*} \begin{figure*} \centering \includegraphics[scale = .7, trim = 10 20 10 120, clip]{Fig11.eps} \caption{$J/(J - H)$ CMD for the sources within $\sim$ 9.6 arcmin$^2$ area of NGC 1624. The encircled are the candidate NIR excess sources and the crosses are the sources which are lying below the CTTS locus. The sources marked using red triangles are the MS members identified using Q method (see Sect. \ref{reddening}). The star symbol represents candidate ionizing source of the UC\mbox{H~{\sc ii}~} region. The thick solid curve represents the PMS isochrone of age 2 Myr by Siess et al. (2000) and the thin curve represents the isochrone of age 2 Myr by Girardi et al. (2002). Both the isochrones are corrected for cluster distance and reddening. The continuous oblique lines denote the reddening trajectories up to $A_V$ = 10 mag for PMS stars of 0.1, 2.0 and 3.0 $M_{\odot}$ for 2 Myr. } \label{jhj} \end{figure*} \begin{figure*} \centering \includegraphics[scale = .7, trim = 10 10 10 10, clip]{Fig12.eps} \caption{Statistically cleaned $V/(V - I)$ CMD (SCMD) for stars lying within $r \le 2^{\prime}$ of NGC 1624. The stars having PMS age $\le$ 5 Myr are considered as representing the statistics of PMS stars in the region and are shown by filled circles. The sources marked using red triangles are the MS members identified using Q method (see Sect. \ref{reddening}). The isochrone for 2 Myr age by Girardi et al. (2002) and PMS isochrones of 0.5, 5 Myr along with evolutionary tracks of different mass stars by Siess et al. (2000) are also shown. All the isochrones and tracks are corrected for the cluster distance (6.0 kpc) and reddening ($E(B-V)$ = 0.76 mag). The corresponding values of masses in solar mass are given at the right side of each track. Points shown by small dots are considered as non-members. Average photometric errors in magnitude and colour for different magnitude ranges are shown in the left side of the figure. } \label{calone} \end{figure*} \clearpage \begin{figure*} \centering \includegraphics[scale = .7, trim = 10 20 10 120, clip]{Fig13.eps} \caption{$V/(V-I)$ CMD for the sources in NGC 1624 (area $\sim$ 9.6 arcmin$^2$) and lying above the unreddened CTTS locus of the NIR CC diagram (see Fig. \ref{jhhk}). The encircled are the NIR excess sources. The sources marked using red triangles are the MS members identified using Q method (see Sect. \ref{reddening}) and those sources shown in box are probable field stars. Isochrone for 2 Myr age (solid curve) by Girardi et al. (2002) and PMS isochrones of age 0.5, 2 and 5 Myr (dashed curves) by Siess et al. (2000) are also shown. All the isochrones are corrected for the cluster distance and reddening. The arrow indicates the reddening vector for $A_V$ = 2 mag. } \label{yso} \end{figure*} \begin{figure*} \centering \includegraphics[scale = .8, trim = 10 30 10 170, clip]{Fig14.eps} \caption{ Spatial distribution of YSOs (blue circles in the online version) and the $J$ drop out sources (red triangles). The sources with $H-K \ge 1.0 $ mag are shown using filled circles and triangles, respectively and the asterisk represents the centre of NGC 1624. The contours represent the $^{13}$CO(2-1) emission map from Deharveng et al. (2008) in the velocity range between -34.0 km s$^{-1}$ to -32.7 km s$^{-1}$ (continuous thin contours), -36.1 $km s^{-1}$ to -35.1 $km s^{-1}$ (continuous thick contours) and -36.8 km s$^{-1}$ to -35.9 km s$^{-1}$ (dashed contours), respectively. The partial circle represents the 2$^\prime$ boundary of the cluster.} \label{co} \end{figure*} \newpage \begin{figure*} \centering \includegraphics[scale = 0.7, trim = 10 30 10 120, clip]{Fig15.eps} \caption{$K/(H - K)$ CMD for the sources detected in the $J H K$ bands and having error $\le$ 0.15 mag. The encircled are the NIR excess sources in the region and the red triangles are the J drop out sources. The vertical dashed line represents the unreddened ZAMS locus shifted for the cluster distance. The slanting line traces the reddening vector for the B0 spectral class with reddening $A_V$ = 15 mag. The star symbol represents candidate ionizing source of the UC\mbox{H~{\sc ii}~} region. } \label{hkk} \end{figure*} \begin{figure*} \centering \includegraphics[scale = 0.85, trim = 0 0 0 0, clip]{Fig16.eps} \caption { {\it Left panel}: Radial variation of Av within a cluster radius of 80 arcsec. {\it Middle panel}: Radial variation of $\Delta(H-K)$, defined as the horizontal displacement from the reddening vector at the boundary of `F' and `T' regions (see Fig. \ref{jhhk}) within a radius of 80 arcsec. {\it Right panel}: Radial variation of $(H-K)$ for the NIR excess sources (dashed histogram) and for all the sources detected in $H$ and $K$- bands (solid histogram). } \label{radial} \end{figure*} \begin{figure*} \centering \includegraphics[scale = .5, trim = 10 30 10 120, clip]{Fig17.eps} \caption{A plot of the MF for NGC 1624 within $r \le 2^\prime$ using optical data. The $\phi$ represents $N$/dlog $m$. The error bars represent $\pm$$\sqrt{N}$ errors. The continuous line shows least-squares fit to the mass ranges described in the text. The value of the slope obtained is mentioned in the figure. } \label{mf} \end{figure*} \begin{figure*} \centering \includegraphics[scale = .5, trim = 10 20 10 120, clip]{Fig18.eps} \caption{A plot of the mass function for NGC 1624 within $\sim$ 9.6 arcmin$^2$ area using the $J$-band data. The $\phi$ represents $N$/dlog $m$. The error bars represent $\pm$$\sqrt{N}$ errors. The continuous lines show least-squares fit to the mass ranges described in the text. The value of the slope obtained is mentioned in the figure. } \label{mf_ir} \end{figure*} \begin{figure*} \centering \includegraphics[scale = .5, trim = 10 30 10 120, clip]{Fig19.eps} \caption {KLF derived after completeness correction and subtracting the field star contamination (see the text). The linear fit for various magnitude ranges are represented by the straight lines.} \label{klf} \end{figure*} \bsp \end{document}
1,108,101,565,910
arxiv
\section{Introduction} \label{sec:intro} \subsection{Bridgeland stability conditions on threefolds} \label{sec:intro-stability} Motivated by Douglas's work on $\Pi$-stability for D-branes on Calabi-Yau threefolds (see \cite{Dou}), Bridgeland introduced the notion of stability conditions on triangulated categories (see \cite{BriStab}). Bridgeland's approach can be interpreted essentially as an abstraction of the usual slope stability for sheaves. From the original motivation, construction of Bridgeland stability conditions on the bounded derived category of a given projective threefold is an important problem. However, unlike for a projective surface, there is no known construction which gives stability conditions for all projective threefolds. See \cite{HuyStabNotes, MS} for further details. The category of coherent sheaves does not arise as a heart of a Bridgeland stability condition for higher dimensional smooth projective varieties (see \cite[Lemma 2.7]{TodLimit}). So more work is needed to construct the hearts for stability conditions on projective varieties of dimension above one. In general, when $\Omega$ is a complexified ample class on a projective variety $X$ (that is $\Omega = B + i \sqrt{3}\alpha H$ for some $B, H \in \mathop{\rm NS}\nolimits_{\mathbb{R}}(X)$ with ample class $H$, and $\alpha \in \mathbb{R}_{>0}$), it is expected that \begin{equation} \label{eqn:centralcharge} Z_{\Omega}(-) = -\int_X e^{-\Omega}\mathop{\rm ch}\nolimits(-) \end{equation} defines a central charge function of some stability condition on $X$ (see \cite[Conjecture 2.1.2]{BMT}). In \cite{BMT}, the authors conjecturally construct a heart for this central charge function by double tilting coherent sheaves on $X$. The first tilt of $\mathop{\rm Coh}\nolimits(X)$ associated to the Harder-Narasimhan filtration with respect to the slope stability, is denoted by $$ \mathcal{B}_{\Omega} = \langle \mathcal{F}_{\Omega}[1], \mathcal{T}_{\Omega} \rangle. $$ They proved that abelian category $\mathcal{B}_{\Omega}$ of two term complexes is Noetherian, and furthermore, they introduced the notion of tilt slope stability for objects in $\mathcal{B}_{\Omega}$. The conjectural stability condition heart $$ \mathcal{A}_{\Omega} = \langle\mathcal{F}_{\Omega}'[1], \mathcal{T}_{\Omega}' \rangle $$ is the tilt of $\mathcal{B}_{\Omega}$ associated to the Harder-Narasimhan filtration with respect to the tilt slope stability. It was shown in \cite{BMT} that the pair $(Z_{\Omega}, \mathcal{A}_{\Omega})$ defines a Bridgeland stability condition on $X$ if and only if any $E \in\mathcal{B}_{\Omega}$ tilt slope stable object with zero tilt slope satisfies $ \mathop{\rm Re}\nolimits Z_{\Omega} (E[1]) < 0$. Moreover, they proposed the following strong inequality for tilt stable objects with zero tilt slopes, and this is now commonly known as the \textit{Conjectural Bogomolov-Gieseker Type Inequality}: \begin{equation*} \label{eqn:BGineq} \mathop{\rm ch}\nolimits_3^B(E) - \frac{1}{6} \alpha^2 H^2 \mathop{\rm ch}\nolimits_1^B(E) \le 0. \end{equation*} Here $\mathop{\rm ch}\nolimits^B(E) = e^{-B}\mathop{\rm ch}\nolimits(E)$ is the twisted Chern character. This conjecture has been shown to hold for all Fano threefolds with Picard rank one (see \cite{BMT, MacP3, SchQuadric, LiFano3}), abelian threefolds (see \cite{MP1, MP2, PiyThesis, BMS}), \'etale quotients of abelian threefolds (see \cite{BMS}), some toric threefolds (see \cite[Theorem 5.1]{BMSZ}) and threefolds which are products of projective spaces and abelian varieties (see \cite{Koseki}). Recently, Schmidt found a counterexample to the original Bogomolov-Gieseker type inequality conjecture when $X$ is the blowup at a point of $\mathbb{P}^3$ (see \cite{SchCounterExample}). Therefore, this inequality needs some modifications in general setting and this was discussed in \cite{PiyFano3, BMSZ}. \subsection{Bridgeland Stability under Fourier-Mukai transforms} \label{sec:intro-satb-under-FMT} The notion of Fourier-Mukai transform (FM transform for short) was introduced by Mukai in early 1980s (see \cite{MukFMT}). In particular, he showed that the Poincar\'e bundle induces a non-trivial equivalence between the derived categories of an abelian variety and its dual variety. Furthermore, he studied certain type of vector bundles on abelian varieties called semihomogeneous bundles, and moduli of them (see \cite{MukSemihomo}). In particular, the moduli space parametrizing simple semihomogeneous bundles on an abelian variety $Y$ with a fixed Chern character is also an abelian variety, denoted by $X$. Moreover, the associated universal bundle $\mathcal{E}$ on $X \times Y$ induces a derived equivalence $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}$ from $X$ to $Y$, which is now commonly known as the Fourier-Mukai transform. Action of the Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}$ induces stability conditions on $D^b(Y)$ from the ones on $D^b(X)$. This can be defined via the induced map on $\mathop{\rm Hom}\nolimits(K(Y), \mathbb{C})$ from $\mathop{\rm Hom}\nolimits(K(X), \mathbb{C})$ by the transform. More precisely, if $ (Z, \mathcal{A})$ is a stability condition on $D^b(X)$ then $$ \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \cdot (Z, \ \mathcal{A}) : = \left( \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \cdot Z , \ \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}\left(\mathcal{A}\right) \right) $$ defines a stability condition on $D^b(Y)$, where $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \cdot Z (-) = Z \left( \left( \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}\right)^{-1}\left(-\right) \right)$. For abelian varieties we view this as \begin{equation} \label{eqn:FMT-action-central-charge} \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \cdot Z_{\Omega} = \zeta \, Z_{\Omega'} \end{equation} for some $\zeta \in \mathbb{C} \setminus \{0\}$, where $\Omega, \Omega'$ are complexified ample classes on $X, Y$ respectively. When $\zeta$ is real, one can expect that the Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}$ gives an equivalence of some hearts of particular stability conditions on $X$ and $Y$, whose $\Omega$ and $\Omega'$ are determined by $\operatorname{Im} \zeta = 0$. In particular, we prove the following for abelian threefolds: \begin{thm} \label{prop:intro-main-stab-symmetries} The Fourier-Mukai transform $ \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}: D^b(X) \to D^b(Y)$ between the abelian threefolds gives the following symmetries of Bridgeland stability conditions: $$ \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} [1] \cdot \left( Z_{\Omega} , \ \mathcal{A}_{\Omega} \right) = \left( \zeta Z_{\Omega'}, \ \mathcal{A}_{\Omega'} \right) $$ for some $\zeta \in \mathbb{R}_{>0}$, and complexified ample classes $\Omega, \Omega'$ on $X, Y$ respectively. Here $\mathcal{A}_{\Omega} , \mathcal{A}_{\Omega'}$ are the double tilted stability condition hearts as in the construction of \cite{BMT}, and $ Z_{\Omega}, Z_{\Omega'}$ are the central charge functions as defined in \eqref{eqn:centralcharge}. \end{thm} The analogous result of the above theorem for abelian surfaces holds due to Huybrechts and Yoshioka and see \cite{HuyK3Equivalence, YoshiokaFMT} for further details. \subsection{Main ingredients} \label{sec:intro-main-ideas} \subsubsection{Fourier-Mukai theory and polarizations} The Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}: D^b(X) \to D^b(Y)$ between the abelian varieties induces a linear isomorphism $\Phi_{\mathcal{E}}^{\operatorname{\scriptscriptstyle{H}}}$ from $H^{2*}_{\operatorname{\scriptstyle{alg}}}(X,\mathbb{Q})$ to $H^{2*}_{\operatorname{\scriptstyle{alg}}}(Y,\mathbb{Q})$, called the cohomological Fourier-Mukai transform. In this article, we realize this linear isomorphism in anti-diagonal form with respect to some twisted Chern characters (see Theorem \ref{prop:antidiagonal-rep-cohom-FMT}). Furthermore, we prove the following. \begin{thm}[{= \ref{prop:derived-induce-polarization}}] \label{prop:intro-derived-polarization} If the ample line bundle $L$ defines a polarization on $X$, then the line bundle $\det (\Xi(L))^{-1}$ is ample and so it defines a polarization on $Y$. Here $\Xi $ is the Fourier-Mukai functor from $D^b(X)$ to $D^b(Y)$ defined by $$ \Xi = \mathcal{E}_{\{a\}\times Y}^* \circ \Phi_\mathcal{E}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \circ \mathcal{E}_{X \times \{b\}}^*, $$ where $a, b$ are any two points on $X, Y$ respectively; and $\mathcal{E}_{\{a\}\times Y}^*$ denotes the functor $\mathcal{E}_{\{a\}\times Y}^* \otimes (-)$ and similar for $ \mathcal{E}_{X \times \{b\}}^*$. \end{thm} This theorem generalizes similar results for abelian surfaces (see \cite[Section 1.3]{YoshiokaFMT}) and for all abelian varieties with respect to the classical Fourier-Mukai transform with kernel the Poincar\'e bundle (see \cite{BL-polarization}). \subsubsection{Stability under Fourier-Mukai transforms} The main goal of this paper is to prove Theorem \ref{prop:intro-main-stab-symmetries}, and for that we need to establish the corresponding equivalence of the double tilt stability condition hearts on the abelian threefolds. This is a generalization of the main results in \cite{MP1, MP2, PiyThesis}. More specifically, we extend many techniques in \cite{MP1, MP2, PiyThesis} on a principally polarized abelian threefold with Picard rank one to a general abelian threefold. In Section \ref{sec:FMT-abelian-varieties}, we study the behavior of slope stability of sheaves under the Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}$ on any abelian varieties. In Section \ref{sec:equivalence-stab-hearts-surface} we establish the analogous result of Theorem \ref{prop:intro-main-stab-symmetries} for abelian surfaces, and our main aim is to get some familiarization with Fourier-Mukai techniques to prove our main theorem. Here we closely follow the proof of Yoshioka in \cite{YoshiokaFMT}. Understanding the homological Fourier-Mukai transform for abelian threefolds is central to this paper. In Sections \ref{sec:FMT-sheaves-abelian-threefolds} and \ref{sec:further-FMT-sheaves-abelian-threefolds}, we study the slope stability of sheaves under the Fourier-Mukai transforms. In particular, at the end of Section \ref{sec:further-FMT-sheaves-abelian-threefolds}, we prove that \begin{equation*} \label{dddd} \left.\begin{aligned} & \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \left(\mathcal{T}_{\Omega} \right) \subset \langle \mathcal{B}_{\Omega'}, \mathcal{B}_{\Omega'}[-1], \mathcal{B}_{\Omega'}[-2] \rangle \\ & \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \left(\mathcal{F}_{\Omega} \right) \subset \langle \mathcal{B}_{\Omega'}[-1], \mathcal{B}_{\Omega'}[-2], \mathcal{B}_{\Omega'}[-3] \rangle \end{aligned} \ \right\}. \end{equation*} From the definition of the first tilt, we have that the images under the Fourier-Mukai transform $ \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}$ of the objects in the abelian category $\mathcal{B}_{\Omega}$ have non-zero cohomologies with respect to $\mathcal{B}_{\Omega'}$ only in positions $0$, $1$ and $2$. We prove a similar result for the Fourier-Mukai transform $\Phi_{\mathcal{E}^\vee}^{\scriptscriptstyle Y \to \scriptscriptstyle X}[1]: D^b(Y) \to D^b(X)$. That is \begin{equation*} \label{ddddds} \left.\begin{aligned} & \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \left( \mathcal{B}_{\Omega} \right) \subset \langle \mathcal{B}_{\Omega'} , \mathcal{B}_{\Omega'}[-1], \mathcal{B}_{\Omega'}[-2] \rangle \rangle \\ & \Phi_{\mathcal{E}^\vee}^{\scriptscriptstyle Y \to \scriptscriptstyle X}[1] \left(\mathcal{B}_{\Omega'} \right) \subset \langle \mathcal{B}_{\Omega} , \mathcal{B}_{\Omega}[-1], \mathcal{B}_{\Omega}[-2] \rangle \end{aligned} \ \right\}. \end{equation*} Since we have the isomorphisms $\Phi_{\mathcal{E}^\vee}^{\scriptscriptstyle Y \to \scriptscriptstyle X}[1] \circ \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \cong [-2]$ and $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \circ \Phi_{\mathcal{E}^\vee}^{\scriptscriptstyle Y \to \scriptscriptstyle X}[1] \cong [-2]$, the abelian categories $\mathcal{B}_{\Omega} $ and $\mathcal{B}_{\Omega'}$ behave somewhat similarly to the category of coherent sheaves on an abelian surface under the Fourier-Mukai transforms. Finally, in Section \ref{sec:FMT-tilt-stability}, we study the behavior of tilt stability under the Fourier-Mukai transforms. In particular, we prove that \begin{equation*} \label{sdssdsdsd} \left.\begin{aligned} & \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \left(\mathcal{T}_{\Omega}' \right) \subset \langle \mathcal{F}_{\Omega'}', \mathcal{T}_{\Omega'}' [-1] \rangle \\ & \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \left(\mathcal{F}_{\Omega}' \right) \subset \langle \mathcal{F}_{\Omega'}'[-1], \mathcal{T}_{\Omega'}' [-2] \rangle \end{aligned} \ \right\}, \end{equation*} and similar results for $ \Phi_{\mathcal{E}^\vee}^{\scriptscriptstyle Y \to \scriptscriptstyle X}[1]$. From the definition of the second tilt, we have the following: \begin{thm} \label{prop:intro-equivalence-threefolds} The derived equivalences $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} $ and $\Phi_{\mathcal{E}^\vee}^{\scriptscriptstyle Y \to \scriptscriptstyle X}$ give the equivalences of the double tilted hearts $$ \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}[1] \left(\mathcal{A}_{\Omega} \right) \cong \mathcal{A}_{\Omega'}, \ \text{ and } \ \Phi_{\mathcal{E}^\vee}^{\scriptscriptstyle Y \to \scriptscriptstyle X}[2] \left(\mathcal{A}_{\Omega'} \right) \cong \mathcal{A}_{\Omega}. $$ \end{thm} \subsubsection{Bogomolov-Gieseker type inequality for abelian threefolds} For a given smooth projective threefold $X$, let $\mathcal{M}_{\Omega}$ be the class of tilt stable objects $E$ with zero tilt slope and $\mathop{\rm Ext}\nolimits^1_{\scriptscriptstyle X}(\mathcal{O}_x, E) = 0$ for all $x \in X$. In Lemma \ref{prop:minimal-objects-threefold-hearts}, we see that the objects in $\mathcal{M}_{\Omega}[1]$ are minimal objects (also called simple objects in the literature) in $\mathcal{A}_{\Omega}$. Moreover, due to Lemma \ref{prop:reduction-BG-ineq-class}, we only need to check the Bogomolov-Gieseker type inequalities for tilt stable objects in $\mathcal{M}_{\Omega}$. Minimal objects of the abelian subcategories $\mathcal{A}_{\Omega}$ are sent to minimal objects of $\mathcal{A}_{\Omega'}$ under the Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}[1]$. This enables us to obtain an inequality involving the top part of the Chern character of minimal objects in these abelian categories. This is exactly the Bogomolov-Gieseker type inequality for tilt stable objects in $\mathcal{M}_{\Omega}$. Therefore, we have the following: \begin{thm}[=\ref{prop:BG-ineq-abelian-threefolds}] \label{prop:intro-BGineq-abelian-threefolds} Any tilt stable object with zero tilt slope satisfies the strong Bogomolov-Gieseker type inequality for any abelian threefold. \end{thm} Theorems \ref{prop:intro-equivalence-threefolds} and \ref{prop:intro-BGineq-abelian-threefolds} together with the double tilting construction in \cite{BMT} proves Theorem \ref{prop:intro-main-stab-symmetries}. \subsection{Higher dimensional abelian varieties} \label{sec:intro-higher-dim-abelian-varieties} In Section \ref{sec:cojectural-any-abelian}, for any abelian variety we conjecturally construct a heart for the central charge function \eqref{eqn:centralcharge}, by using the notion of very weak stability condition (see Conjecture \ref{prop:conjecture-stab-cond}). This essentially generalizes the single tilting construction due to Bridgeland and Arcara-Bertram for surfaces (\cite{BriK3, AB}), and the conjectural double tilting construction due to Bayer-Macr\`i-Toda for threefolds (\cite{BMT}). By considering the complexified ample classes $\Omega$ and $\Omega'$ determined by $\operatorname{Im} \zeta = 0$ in \eqref{eqn:FMT-action-central-charge}, we formulate the following for the Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}$. \begin{conj}[=\ref{prop:conjecture-equivalence-stab-hearts}] \label{prop:intro-conjectural-equivalence} The Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}: D^b(X) \to D^b(Y)$ gives the equivalence of stability condition hearts conjecturally constructed in Conjecture \ref{prop:conjecture-stab-cond}: $$ \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} [k] \left( \mathcal{A}^{\scriptscriptstyle X}_{\Omega} \right) = \mathcal{A}^{\scriptscriptstyle Y}_{\Omega'}. $$ Here $\Omega = -D_{\scriptscriptstyle X} + \lambda e^{i k\pi/g }\, \ell_{\scriptscriptstyle{X}} $ and $\Omega' = D_{\scriptscriptstyle Y} - (1/\lambda) e^{-i k \pi/g} \, \ell_{\scriptscriptstyle{Y}}$ are complexified ample classes on $X$ and $Y$ respectively, for any $k \in \{1, 2, \ldots, (g-1)\}$ and any $ \lambda \in \mathbb{R}_{>0}$. \end{conj} \subsection{Relation to the existing works} \label{sec:intro-current-work} \subsubsection{Relation to \cite{MP1, MP2, PiyThesis}} As mentioned before, this paper generalizes previous work \cite{MP1, MP2, PiyThesis} on a principally polarized abelian threefold with Picard rank one to any abelian threefold. Moreover, many proofs in this paper are adopted from that of the similar results in those works. Also for the completeness and for the convenience of the reader, we give almost all the proofs relevant to general abelian threefolds. In particular, we extend the proof of the Bogomolov-Gieseker type inequality conjecture in \cite{MP1,MP2, PiyThesis} for any abelian threefold by using the Fourier-Mukai theory. Let us highlight the connections of the notations in this paper with the notations in \cite{MP1, MP2, PiyThesis}. Suppose $X$ is a principally polarized abelian threefold with Picard rank one. Let $\ell_{\scriptscriptstyle{X}} \in \mathop{\rm NS}\nolimits(X)$ be the corresponding principal polarization, and so $\ell_{\scriptscriptstyle{X}} ^3/6 =1$. The twisted Chern character of any $E \in D^b(X)$ is of the form $\mathop{\rm ch}\nolimits^B(E) = (a_0, a_1 \ell_{\scriptscriptstyle{X}} , a_2 \ell_{\scriptscriptstyle{X}} ^2/2 , a_3\ell_{\scriptscriptstyle{X}} ^3/6)$ for some $a_i \in \mathbb{Q}$ when $B$ is a rational class, and in \cite{MP1,MP2, PiyThesis} the authors simply denote such Chern characters in vector form \begin{equation} \label{eqn:chern-ppa3} (a_0, a_1, a_2, a_3) \in \mathbb{Q}^{4}. \end{equation} They consider the twisted slope function on $\mathop{\rm Coh}\nolimits(X)$ defined by $a_1/a_0$, and study the slope stability of sheaves under the Fourier-Mukai transforms on $X$. Moreover, they consider the tilt slope defined in terms of $a_0$, $a_1$ and $a_2$, and study the tilt stability of complexes in the first tilted hearts under the Fourier-Mukai transforms. In this paper we are interested in the twisted slope functions and also tilt slope functions defined with respect to the numerology in the vector $$ v^{B, \ell_{\scriptscriptstyle{X}} } (E)= (\ell_{\scriptscriptstyle{X}} ^3 \mathop{\rm ch}\nolimits_0^B(E) , \ell_{\scriptscriptstyle{X}} ^2 \mathop{\rm ch}\nolimits_1^B(E), 2\ell_{\scriptscriptstyle{X}} \mathop{\rm ch}\nolimits_2^B(E) , 6 \mathop{\rm ch}\nolimits_3^B(E)). $$ Here $\ell_{\scriptscriptstyle{X}} $ is any ample class in $\mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$. Now one can see that for the principally polarized abelian threefold with Picard rank one case, $$ v^{B, \ell_{\scriptscriptstyle{X}} } (E) = 6 (a_0, a_1, a_2, a_3), $$ that is a fixed scalar multiple of the vector in \eqref{eqn:chern-ppa3}. \subsubsection{Relation to other works} The main results in this paper were summarized in the author's article \cite{PiyKinosaki} for the Proceedings of Kinosaki Symposium on Algebraic Geometry 2015. In \cite{BMS}, the authors establish the Bogomolov-Gieseker type inequality conjecture for any abelian threefold by extensive use of the multiplication map $x \mapsto mx$ on abelian threefolds. In \cite{YoshiokaFMT}, Yoshioka studied the behavior of slope stability under the Fourier-Mukai transform on abelian surfaces. Moreover, he established the claim in Conjecture \ref{prop:intro-conjectural-equivalence} for abelian surfaces using Fourier-Mukai theory, however, this is firstly known due to Huybrechts (\cite{HuyK3Equivalence}). In a forthcoming article we use the main result of this paper (Theorem \ref{prop:intro-main-stab-symmetries}) to prove the full support property and to study the stability manifold of any abelian threefold. \subsection{Notation} \begin{itemize}[leftmargin=*] \item When $\mathcal{A}$ is the heart of a bounded t-structure on a triangulated category $\mathcal{D}$, by $H_{\mathcal{A}}^i(-)$ we denote the corresponding $i$-th cohomology functor. \item For a set of objects $\mathcal{S} \subset \mathcal{D}$ in a triangulated category $\mathcal{D}$, by $\langle \mathcal{S} \rangle \subset \mathcal{D}$ we denote its extension closure, that is the smallest extension closed subcategory of $\mathcal{D}$ which contains $\mathcal{S}$. \item Unless otherwise stated, throughout this paper, all the varieties are smooth projective and defined over $\mathbb{C}$. For a variety $X$, by $\mathop{\rm Coh}\nolimits(X)$ we denote the category of coherent sheaves on $X$, and by $D^b(X)$ we denote the bounded derived category of $\mathop{\rm Coh}\nolimits(X)$. That is $D^b(X) = D^b( \mathop{\rm Coh}\nolimits(X))$. \item For $D^b(X)$ we simply write $\mathcal{H}^i(-)$ for $H_{\mathop{\rm Coh}\nolimits(X)}^i(-)$. \item For a variety $X$, by $\omega_X$ we denote its canonical line bundle, and let $K_X = c_1(\omega_X) $. \item For $ M = \mathbb{Q}, \mathbb{R}, \text{ or } \mathbb{C}$ we write $\mathop{\rm NS}\nolimits_M(X) = \mathop{\rm NS}\nolimits(X) \otimes_{\mathbb{Z}} M$. \item For $0 \le i \le \dim X$, $\mathop{\rm Coh}\nolimits_{\le i}(X) = \{E \in \mathop{\rm Coh}\nolimits(X): \dim \mathop{\rm Supp}\nolimits(E) \le i \}$, $\mathop{\rm Coh}\nolimits_{\ge i}(X) = \{E \in \mathop{\rm Coh}\nolimits(X): \text{for } 0 \ne F \subset E, \ \dim \mathop{\rm Supp}\nolimits(F) \ge i \}$ and $\mathop{\rm Coh}\nolimits_{i}(X) = \mathop{\rm Coh}\nolimits_{\le i}(X) \cap \mathop{\rm Coh}\nolimits_{\ge i}(X)$. \item For $E \in D^b(X)$, $E^\vee = \mathbf{R} \operatorname{\mathcal{H}\textit{om}}(E, \mathcal{O}_X)$. When $E$ is a sheaf we write its dual sheaf $\mathcal{H}^0(E^\vee)$ by $E^*$. \item The structure sheaf of a closed subscheme $Z \subset X$ as an object in $\mathop{\rm Coh}\nolimits(X)$ is denoted by $\mathcal{O}_Z$, and when $Z = \{x\}$ for a closed point $x\in X$, it is simply denoted by $\mathcal{O}_x$. \item $\mathop{\rm ch}\nolimits_{\le k} = (\mathop{\rm ch}\nolimits_0, \mathop{\rm ch}\nolimits_1, \ldots, \mathop{\rm ch}\nolimits_k, 0, \ldots, 0)$, and $\mathop{\rm ch}\nolimits_{\ge k} = (0, \ldots, 0, \mathop{\rm ch}\nolimits_k, \mathop{\rm ch}\nolimits_{k+1}, \ldots, \mathop{\rm ch}\nolimits_n)$. \item For $B \in \mathop{\rm NS}\nolimits_{\mathbb{R}}(X)$, the twisted Chern character $\mathop{\rm ch}\nolimits^B(-) = e^{-B} \cdot \mathop{\rm ch}\nolimits(-)$. For ample $H \in \mathop{\rm NS}\nolimits_{\mathbb{R}}(X)$, we define $v^{B,H}(E) = (H^3 \mathop{\rm ch}\nolimits_0^B(E), H^2 \mathop{\rm ch}\nolimits_1^B(E), 2H \mathop{\rm ch}\nolimits_2^B(E), 6 \mathop{\rm ch}\nolimits_3^B(E))$. \item The twisted slope on $\mathop{\rm Coh}\nolimits(X)$ is defined by $\mu_{H,B}(E) = \dfrac{H^2 \mathop{\rm ch}\nolimits_1^B(E)}{H^3 \mathop{\rm ch}\nolimits_0(E)} = \dfrac{v_1^{B,H}(E)}{v_0^{B,H}(E)}$. \item Tilt slope on $\mathcal{B}_{H,B}$ is defined by $$ \nu_{H,B, \alpha} (E) = \dfrac{H \mathop{\rm ch}\nolimits_2^{B}(E) - (\alpha^2/2) H^3\mathop{\rm ch}\nolimits_0(E)}{H^2 \mathop{\rm ch}\nolimits^B_1(E)} = \dfrac{v_1^{B,H}(E) - \alpha^2 v_0^{B,H}(E)}{2 v_1^{B,H}(E)}. $$ \item $\operatorname{HN}^{\mu}_{H, B}(I) = \langle E \in \mathop{\rm Coh}\nolimits(X) : E \text{ is } \mu_{H , B}\text{-semistable with } \mu_{H , B}(E) \in I \rangle$. Similarly, we define $\operatorname{HN}^{\nu}_{H, B}(I) \subset \mathcal{B}_{H,B}$. \item We denote the upper half plane $\{z \in \mathbb{C} : \operatorname{Im} z >0\}$ by $\mathbb{H}$. \item We will denote a $g \times g$ anti-diagonal matrix with entries $a_k$, $k=1, \ldots, g$ by $$ \operatorname{Adiag}(a_1, \ldots, a_g)_{ij} : = \begin{cases} a_k & \text{if } i=k, j=g+1-k \\ 0 & \text{otherwise}. \end{cases} $$ \end{itemize} \subsection{Acknowledgements} The author would like to specially thank Antony Maciocia for his guidance given to his doctoral studies. The author is grateful to Yukinobu Toda for very helpful discussions relating to this work, and also to Arend Bayer and Tom Bridgeland for very useful comments and suggestions given during the author's PhD defense. This work is supported by the World Premier International Research Center Initiative (WPI Initiative), MEXT, Japan. \section{Preliminaries} \label{sec:preliminaries} \subsection{Some homological algebra} \label{sec:some-homological-algebra} A {\it triangulated category} $\mathcal{D}$ is an additive category equipped with a shift functor, and a class of triangles, called distinguished triangles satisfying certain axioms. We denote the shift functor by $[1]: \mathcal{D} \to \mathcal{D}$, and write a distinguished triangle as $A \to B \to C \to A[1]$. The bounded derived categories of coherent sheaves on smooth projective varieties are the most important examples of triangulated categories in this paper. \begin{defi} \rm A {\it t-structure} on $\mathcal{D}$ is a pair of strictly full subcategories $(\mathcal{D}^{\le 0}, \mathcal{D}^{\ge 0} )$ such that, if we let $\mathcal{D}^{\le n} =\mathcal{D}^{\le 0}[-n]$ and $\mathcal{D}^{\ge n} =\mathcal{D}^{\ge 0}[-n]$, for $n \in \mathbb{Z}$, then we have \begin{enumerate}[label=(\roman*)] \item $\mathcal{D}^{\le 0} \subset \mathcal{D}^{\le 1}$, $\mathcal{D}^{\ge 0} \supset \mathcal{D}^{\ge 1}$, \item $\mathop{\rm Hom}\nolimits_{\mathcal{D}}(E, F ) = 0$ for $E \in \mathcal{D}^{\le 0}$ and $F \in \mathcal{D}^{\ge 1}$, \item for any $G \in \mathcal{D}$ there exists a distinguished triangle $E \to G \to F \to E[1]$ such that $E \in \mathcal{D}^{\le 0}$ and $F \in \mathcal{D}^{\ge 1}$. \end{enumerate} The {\it heart} $\mathcal{C} $ of this t-structure is $\mathcal{C} = \mathcal{D}^{\le 0} \cap \mathcal{D}^{\ge 0}$. The t-structure is called {\it bounded} if $$ \bigcup_{n \in \mathbb{Z}} \mathcal{D}^{\le n} = \mathcal{D} = \bigcup_{n \in \mathbb{Z}} \mathcal{D}^{\ge n}. $$ \end{defi} It is known that the heart $\mathcal{C}$ is an abelian category, and also a bounded t-structure is determined by its heart (see \cite[Lemma 3.1]{BriK3}). So we denote the $i$-th cohomology of $E \in \mathcal{D}$ with respect to the t-structure $(\mathcal{D}^{\le 0}, \mathcal{D}^{\ge 0} )$ by $H^i_{\mathcal{C}}(E)$. If $A \to B \to C \to A[1]$ is a distinguished triangle in $\mathcal{D}$, then we have the exact sequence $$ \cdots \to H^{i-1}_{\mathcal{C}}(C) \to H^i_{\mathcal{C}}(A) \to H^i_{\mathcal{C}}(B) \to H^i_{\mathcal{C}}(C) \to H^{i+1}_{\mathcal{C}}(A) \to \cdots $$ of cohomologies from $\mathcal{C}$. Let $D^b(\mathcal{A})$ be the bounded derived category of an abelian category $\mathcal{A}$. Then the pair of subcategories \begin{equation*} \left.\begin{aligned} &D^b(\mathcal{A})^{\le 0} = \{E \in D^b(\mathcal{A}) : H^i_{\mathcal{A}}(E)= 0 \text{ for } i>0 \} \\ &D^b(\mathcal{A})^{\ge 0} = \{E \in D^b(\mathcal{A}) : H^i_{\mathcal{A}}(E)= 0 \text{ for } i<0 \} \end{aligned} \ \right\} \end{equation*} define a bounded t-structure on $D^b(\mathcal{A})$ and the corresponding heart is $\mathcal{A}$. This is called the {\it standard t-structure} on $D^b(\mathcal{A})$. Let us discuss about the torsion theory of an abelian category. It provides a useful method, called tilting, to construct interesting t-structures from the known ones. This was first introduced by Happel, Reiten and Smal{\o} in \cite{HRS}. \begin{defi} \label{dsdsdsdsd} \rm A {\it torsion pair} on an abelian category $\mathcal{A}$ is a pair of subcategories $(\mathcal{T}, \mathcal{F})$ of $\mathcal{A}$ such that \begin{enumerate}[label=(\roman*)] \item $\mathop{\rm Hom}\nolimits_{\mathcal{A}}(T,F) = 0$ for every $T \in \mathcal{T}$, $F \in \mathcal{F}$, and \item every $E \in \mathcal{A}$ fits into a short exact sequence $0 \to T \to E \to F \to 0$ in $\mathcal{A}$ for some $T \in \mathcal{T}$, $F \in \mathcal{F}$. \end{enumerate} \end{defi} \begin{lem}[{\cite[Proposition 2.1]{HRS}}] \label{sdsdsdsd} Let $\mathcal{A}$ be the heart of a bounded t-structure on a triangulated category $\mathcal{D}$ and let $(\mathcal{T},\mathcal{F})$ be a torsion pair on $\mathcal{A}$. Then the full subcategory defined by $$ \mathcal{B} = \{E \in \mathcal{D} : H^{i}_{\mathcal{A}}(E) = 0 \text{ for } i \ne -1,0, \ H^{-1}_{\mathcal{A}}(E) \in \mathcal{F}, \ H^{0}_{\mathcal{A}}(E) \in \mathcal{T} \} $$ is the heart of bounded t-structure given by the pair of subcategories \begin{equation*} \left.\begin{aligned} &\mathcal{D}^{\le 0} = \{X \in \mathcal{D} : H^{i}_{\mathcal{A}}(E) = 0 \text{ for } i > 0, \ H^{0}_{\mathcal{A}}(E) \in \mathcal{T} \} \\ &\mathcal{D}^{\ge 0} = \{X \in \mathcal{D} : H^{i}_{\mathcal{A}}(E) = 0 \text{ for } i <-1, \ H^{-1}_{\mathcal{A}}(E) \in \mathcal{F} \} \end{aligned} \ \right\}. \end{equation*} \end{lem} The abelian subcategory $\mathcal{B} \subset \mathcal{D}$ is usually called the {\it tilt} of $\mathcal{A}$ with respect to the torsion pair $(\mathcal{T},\mathcal{F})$ and we also write $\mathcal{B} = \langle \mathcal{F}[1], \mathcal{T} \rangle$. The t-structures defined by the hearts $\mathcal{A}$ and $\mathcal{B}$ give two different views for the objects in the triangulated category $\mathcal{D}$. The {\it Grothendieck group} $K(\mathcal{A})$ of an abelian category $\mathcal{A}$ is the quotient of the free abelian group generated by the classes $[A]$ of objects $A \in \mathcal{A}$ modulo the relations given by $[A]+[C] = [B]$ for every short exact sequences $0 \to A \to B \to C \to 0$ in $\mathcal{A}$. Similarly, the Grothendieck group $K(\mathcal{D})$ of a triangulated category $\mathcal{D}$ is the free abelian group generated by the classes $[A]$ of $A\in \mathcal{D}$ with the relations $[A]+[C] = [B]$ for every distinguished triangles $A \to B \to C \to A[1]$ in $\mathcal{D}$. If $\mathcal{A}$ is the heart of a bounded t-structure on $\mathcal{D}$ then $ K(\mathcal{D})=K(\mathcal{A})$. Moreover, when $\mathcal{A} = \mathop{\rm Coh}\nolimits(X)$ for a variety $X$ we write $$ K(X) = K(\mathop{\rm Coh}\nolimits(X)) = K(D^b(X)). $$ \subsection{Bridgeland stability on varieties} Let us introduce the notion of stability conditions as in \cite{BriStab}. Let $\mathcal{A}$ be an abelian category. A group homomorphism $Z : K(\mathcal{A}) \to \mathbb{C}$ is called a \textit{stability function (also known as central charge function)}, if for all $0 \ne E \in \mathcal{A}$, $ Z(E) \in \mathbb{H} \cup \mathbb{R}_{<0}$. The \textit{phase} of $0 \ne E \in \mathcal{A}$ is defined by $ \phi(E) = \frac{1}{\pi} \arg Z(E) \in (0,1]$. An object $0 \ne E \in \mathcal{A}$ is called \textit{(semi)stable}, if for any $ 0 \ne A \varsubsetneq E $ in $\mathcal{A}$, $\phi(A) < (\le) \, \phi(E/A)$. A \textit{Harder-Narasimhan filtration} of $0 \ne E \in \mathcal{A}$ is a finite chain of subobjects \begin{equation} \label{eqn:HN-filtration-definition} 0=E_0 \subset E_1 \subset \cdots \subset E_{n-1} \subset E_n = E, \end{equation} where factors $F_k = E_k/E_{k-1}$, $k=1, \ldots, n$, are semistable in $\mathcal{A}$ with $$ \phi(F_1) > \phi(F_2) > \cdots > \phi(F_{n-1}) > \phi(F_n). $$ The stability function $Z$ satisfies the \textit{Harder-Narasimhan property} for $\mathcal{A}$, if such a filtration exists for any non-trivial object in $\mathcal{A}$. When the Harder-Narasimhan property holds for $\mathcal{A}$ with respect to the stability function $Z$, one can show that the filtration \eqref{eqn:HN-filtration-definition} is unique for a given $E \in \mathcal{A}$. \begin{defi}[{\cite[Proposition 5.3]{BriStab}}] \label{defi:Bridgeland-stability} A stability condition on a triangulated category $\mathcal{D}$ is given by a pair $(Z, \mathcal{A})$, where $\mathcal{A}$ is the heart of a bounded t-structure on $\mathcal{D}$ and a $Z: K(\mathcal{A}) \to \mathbb{C}$ is stability function, such that the Harder-Narasimhan property holds for $\mathcal{A}$ with respect to the stability function $Z$. \end{defi} Let $X$ be a smooth projective variety and let $D^b(X)$ be the bounded derived category of coherent sheaves on $X$. We are interested in stability conditions $\sigma = (Z, \mathcal{A})$ on $D^b(X)$, where the stability function $Z: K(X) \to \mathbb{C}$ factors through the Chern character map $\mathop{\rm ch}\nolimits: K(X) \to H^{2*}_{\operatorname{\scriptstyle{alg}}}(X,\mathbb{Q})$. Such stability conditions are usually called \textit{numerical stability conditions}. A stability condition $\sigma$ on $D^b(X)$ is called \textit{geometric} if all the skyscraper sheaves $\mathcal{O}_x$ of $x \in X$ are $\sigma$-stable of the same phase. The following result gives some properties of geometric stability conditions on varieties. \begin{prop} \label{prop:property-higher-dim-stability-hearts} Let $X$ be a smooth projective variety of dimension $n$. Let $\sigma= (Z, \mathcal{A})$ be a geometric stability condition on $D^b(X)$ with all the skyscraper sheaves $\mathcal{O}_x$ of $x \in X$ are $\sigma$-stable with phase one. If $E \in \mathcal{A}$ then $\mathcal{H}^i(E) = 0$ for $i \notin \{-n+1, -n+2, \ldots ,0\}$. \end{prop} \begin{proof} The following proof is adapted from \cite[Lemma 10.1]{BriK3}. Let $\mathcal{P}$ be the corresponding slicing of $\sigma$. Since $\mathcal{A} = \mathcal{P}((0,1])$ and $\mathop{\rm Coh}\nolimits_0(X) \subset \mathcal{P}(1)$, from the Harder-Narasimhan property, we only need to consider $E \in \mathcal{A} $ such that $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle X} (\mathop{\rm Coh}\nolimits_0(X), E) =0$. For any skyscraper sheaf $\mathcal{O}_x$ of $x \in X$ we have $\mathcal{O}_x[i] \in \mathcal{P}(1+i)$ and $E[i] \in \mathcal{P}((i,1+i])$. Therefore, for all $i<0$, $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(E, \mathcal{O}_x[i]) = 0$, and $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{O}_x, E[1+i])$ $\cong$ $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(E,\mathcal{O}_x [n-1-i])^* =0$. So by \cite[Proposition 5.4]{BM}, $E$ is quasi-isomorphic to a complex of locally free sheaves of length $n$. This completes the proof as required. \end{proof} When $X$ is a smooth projective curve, the central charge function $Z$ defined by $Z(-) = -\deg(-) + i \mathop{\rm rk}\nolimits (-)$ together with the heart $\mathop{\rm Coh}\nolimits(X)$ of the standard t-structure defines a geometric stability condition on $D^b(X)$. However, for a smooth projective variety $X$ with $\dim X \ge 2$, there is no numerical stability condition on $D^b(X)$ with $\mathop{\rm Coh}\nolimits(X)$ as the heart of a stability condition (see \cite[Lemma 2.7]{TodLimit} for a proof). In fact, for a smooth projective surface $X$, when $\sigma = (Z, \mathcal{A})$ is a geometric Bridgeland stability condition, the heart $\mathcal{A}$ is a tilt of $\mathop{\rm Coh}\nolimits(X)$ with respect to a torsion pair coming from the usual slope stability on $\mathop{\rm Coh}\nolimits(X)$ (see \cite{BriK3, AB}). \subsection{Double tilting stability construction on threefolds} \label{sec:double-tilting-construction} Let us briefly recall the conjectural construction of stability conditions on a given smooth projective threefold $X$ as introduced in \cite{BMT}. Let $H, B \in \mathop{\rm NS}\nolimits_{\mathbb{R}}(X)$ such that $H$ an ample class. The twisted Chern character with respect to $B$ is defined by \begin{equation*} \mathop{\rm ch}\nolimits^B(-) = e^{-B} \mathop{\rm ch}\nolimits (-). \end{equation*} The twisted slope $\mu_{H , B}$ on $\mathop{\rm Coh}\nolimits(X)$ is defined by, for $E \in \mathop{\rm Coh}\nolimits(X)$ $$ \mu_{H, B} (E) = \begin{cases} + \infty & \text{if } E \text{ is a torsion sheaf} \\ \frac{H^{2} \mathop{\rm ch}\nolimits_1^B(E)}{H^3 \mathop{\rm ch}\nolimits^B_0(E)} & \text{otherwise}. \end{cases} $$ So we have $\mu_{H,B + \beta H} = \mu_{H, B} - \beta$. We say $E \in \mathop{\rm Coh}\nolimits(X)$ is $\mu_{H , B}$-(semi)stable, if for any $0 \ne F \varsubsetneq E$, $\mu_{H , B}(F)< (\le) \mu_{H , B}(E/F)$. \begin{defi} \label{def:discriminant} For $E \in D^b(X)$ we define \begin{align*} & \Delta(E) = (\mathop{\rm ch}\nolimits_1(E))^2- 2 \mathop{\rm ch}\nolimits_0(E) \mathop{\rm ch}\nolimits_2(E) \in H^4_{\operatorname{\scriptstyle{alg}}}(X, \mathbb{Z}) ,\\ & \overline{\Delta}_{H, B}(E) = (H^2 \mathop{\rm ch}\nolimits_1^B(E))^2 - 2 H^3 \mathop{\rm ch}\nolimits_0(E) H \mathop{\rm ch}\nolimits_2^B(E). \end{align*} \end{defi} \begin{lem}[{Bogomolov-Gieseker Inequality, \cite{HLBook}}] \label{prop:usual-BG-ineq} Let $E$ be $\mu_{H,B}$ semistable torsion free sheaf. Then it satisfies $$ H \cdot \Delta(E) \ge 0, \ \text{ and } \ \overline{\Delta}_{H, B}(E) \ge 0. $$ \end{lem} The Harder-Narasimhan property holds for $\mu_{H , B}$ stability on $\mathop{\rm Coh}\nolimits(X)$. This enables us to define the following slopes: \begin{equation*} \label{eqn:highest-lowest-HN-slopes} \left.\begin{aligned} &\mu_{H , B}^{+}(E) = \max_{0 \ne G \subseteq E} \ \mu_{H , B}(G) \\ & \mu_{H , B}^{-}(E) = \min_{G \subsetneq E} \ \mu_{H , B}(E/G) \end{aligned} \ \right\}. \end{equation*} Moreover, for a given interval $I \subset \mathbb{R} \cup\{+\infty\}$, we define the subcategory $\operatorname{HN}^{\mu}_{H, B}(I) \subset \mathop{\rm Coh}\nolimits(X)$ by \begin{equation} \label{eqn:HN-mu-interval-subcat} \operatorname{HN}^{\mu}_{H, B}(I) = \langle E \in \mathop{\rm Coh}\nolimits(X) : E \text{ is } \mu_{H , B}\text{-semistable with } \mu_{H , B}(E) \in I \rangle. \end{equation} The subcategories $\mathcal{T}_{H , B}$ and $\mathcal{F}_{H , B}$ of $\mathop{\rm Coh}\nolimits(X)$ are defined by \begin{align*} \mathcal{T}_{H , B} = \operatorname{HN}^{\mu}_{H, B}((0, +\infty]), \ \ \ \mathcal{F}_{H , B} = \operatorname{HN}^{\mu}_{H, B}((-\infty, 0]). \end{align*} Now $( \mathcal{T}_{H , B} , \mathcal{F}_{H, B})$ forms a torsion pair on $\mathop{\rm Coh}\nolimits(X)$ and let the abelian category \begin{equation*} \mathcal{B}_{H , B} = \langle \mathcal{F}_{H , B}[1], \mathcal{T}_{H, B} \rangle \subset D^b(X) \end{equation*} be the corresponding tilt of $\mathop{\rm Coh}\nolimits(X)$. Let $\alpha \in \mathbb{R}_{>0}$. Following \cite{BMT}, the tilt-slope $\nu_{H, B, \alpha} $ on $\mathcal{B}_{H , B}$ is defined by, for $E \in \mathcal{B}_{H,B}$ $$ \nu_{H, B, \alpha}(E) = \begin{cases} +\infty & \text{if } H^2 \mathop{\rm ch}\nolimits^B_1(E) = 0 \\ \frac{H \mathop{\rm ch}\nolimits_2^{B}(E) - (\alpha^2/2) H^3\mathop{\rm ch}\nolimits_0(E)}{H^2 \mathop{\rm ch}\nolimits^B_1(E)} & \text{otherwise}. \end{cases} $$ In \cite{BMT}, the notion of $\nu_{H , B, \alpha}$-stability for objects in $\mathcal{B}_{H , B}$ is introduced in a similar way to $\mu_{H, B}$-stability on $\mathop{\rm Coh}\nolimits(X)$. Also it is proved that the abelian category $\mathcal{B}_{H , B}$ satisfies the Harder-Narasimhan property with respect to $\nu_{H , B, \alpha}$-stability. Then similar to \eqref{eqn:HN-mu-interval-subcat} we define the subcategory $\operatorname{HN}^{\nu}_{H, B, \alpha}(I) \subset \mathcal{B}_{H, B}$ for an interval $I \subset \mathbb{R} \cup\{+\infty\}$. The subcategories $\mathcal{T}_{H , B, \alpha}'$ and $\mathcal{F}_{H , B, \alpha}'$ of $\mathcal{B}_{H, B}$ are defined by \begin{align*} \mathcal{T}_{H , B, \alpha}' = \operatorname{HN}^{\nu}_{H, B, \alpha}((0, +\infty]), \ \ \ \mathcal{F}_{H, B}' = \operatorname{HN}^{\nu}_{H, B, \alpha}((-\infty, 0]). \end{align*} Then $( \mathcal{T}_{H , B, \alpha}' , \mathcal{F}_{H , B, \alpha}')$ forms a torsion pair on $\mathcal{B}_{H , B}$, and let the abelian category \begin{equation} \label{eqn:double-tilt-heart} \mathcal{A}_{H , B, \alpha} = \langle \mathcal{F}_{H , B, \alpha}'[1],\mathcal{T}_{H , B, \alpha}' \rangle \subset D^b(X) \end{equation} be the corresponding tilt. \begin{defi} \label{defi:central-charge} The central charge $Z_{H,B, \alpha} : K(X) \to \mathbb{C}$ is defined by $$ Z_{H,B, \alpha}(-) = \int_X e^{-B - i \sqrt{3}\alpha H} \mathop{\rm ch}\nolimits(-). $$ \end{defi} In \cite{BMT}, authors made the following conjecture to construct stability conditions. \begin{conj}[{\cite[Conjecture 3.2.6]{BMT}}] \label{prop:BMT-stab-cond-conjecture} The pair $(Z_{H,B, \alpha}, \mathcal{A}_{H,B, \alpha})$ is a Bridgeland stability condition on $D^b(X)$. \end{conj} Let us assume $H, B \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$ and $\alpha^2 \in \mathbb{Q}$ then similar to the proof of \cite[Proposition 5.2.2]{BMT} one can show that the abelian category $\mathcal{A}_{H,B, \alpha}$ is Noetherian. Therefore Conjecture \ref{prop:BMT-stab-cond-conjecture} is equivalent to saying that any $\nu_{H,B, \alpha}$-stable object $E \in \mathcal{B}_{H,B}$ with $\nu_{H,B,\alpha}(E) =0$ satisfies \begin{equation*} \label{eqn:weak-BG-ineq} \mathop{\rm Re}\nolimits Z_{H,B, \alpha}(E[1]) < 0. \end{equation*} See \cite[Corollary 5.2.4]{BMT} for further details. Moreover in \cite{BMT} they proposed the following strong inequality: \begin{conj}[{\cite[Conjecture 1.3.1]{BMT}}] \label{prop:BG-ineq-conjecture} Any $\nu_{H,B, \alpha}$ stable objects $E \in \mathcal{B}_{H,B}$ with $\nu_{H,B,\alpha}(E) =0$ satisfies the so-called \textbf{Bogomolov-Gieseker Type Inequality}: $$ \mathop{\rm ch}\nolimits_{3}^B(E) - \frac{1}{6}\alpha^2 \mathop{\rm ch}\nolimits_1^{B}(E) \le 0. $$ \end{conj} Since this stronger conjectural inequality implies the above weak inequality, Conjecture \ref{prop:BG-ineq-conjecture} implies Conjecture\ref{prop:BMT-stab-cond-conjecture}. \subsection{Some properties of tilt stable objects and minimal objects} \label{sec:tiltproperties} Let $X$ be a smooth projective threefold. We follow the same notations for tilt stability introduced in Section \ref{sec:double-tilting-construction} for $X$. \begin{prop}[{\cite[Lemma 3.2.1]{BMT}}] \label{prop:first-tilt-behaves-like-sheaves-surfaces} For any $0 \ne E \in \mathcal{B}_{H,B}$, one of the following conditions holds: \begin{enumerate}[label=(\roman*)] \item $H^2 \mathop{\rm ch}\nolimits_1^B(E) > 0$, \item $H^2 \mathop{\rm ch}\nolimits_1^B(E) =0$ and $\operatorname{Im} Z_{H,B,\alpha}(E) > 0$, \item $H^2 \mathop{\rm ch}\nolimits_1^B(E) = \operatorname{Im} Z_{H,B,\alpha}(E) =0$, $- \mathop{\rm Re}\nolimits Z_{H,B,\alpha}(E) > 0$ and $E \cong T$ for some $0 \ne T \in \mathop{\rm Coh}\nolimits_0(X)$. \end{enumerate} \end{prop} \begin{prop}[{\cite[Proposition 3.2]{PiyFano3}}] \label{prop:reflexivityatminus1place} Let $E \in \operatorname{HN}^{\nu}_{H, B, \alpha}((-\infty,+\infty))$. Then $\mathcal{H}^{-1}(E)$ is a reflexive sheaf. \end{prop} Let us recall the following slope bounds from \cite{PT} for cohomology sheaves of complexes in the abelian category $\mathcal{B}_{H,B}$. \begin{prop} \label{prop:slope-bounds} Let $E \in \mathcal{B}_{H, B}$. Then we have the following: \begin{enumerate}[label=(\arabic*)] \item if $E \in \operatorname{HN}^{\nu}_{H, B, \alpha}((-\infty, 0)) $, then $\mathcal{H}^{-1}(E) \in \operatorname{HN}^{\mu}_{H, B} ((-\infty, - \alpha))$; \item if $E \in \operatorname{HN}^{\nu}_{H, B}((0, +\infty)) $, then $\mathcal{H}^{0}(E) \in \operatorname{HN}^{\mu}_{H, B} (( \alpha, +\infty])$; and \item if $E$ is tilt semistable with $\nu_{H,B, \alpha}(E) =0$, then \begin{enumerate}[label=(\roman*)] \item $\mathcal{H}^{-1}(E) \in \operatorname{HN}^{\mu}_{H, B} ((-\infty, - \alpha])$ with equality $\mu_{H,B}(E_{-1}(E)) = -\alpha $ holds if and only if $H^2 \mathop{\rm ch}\nolimits_2^{B - \alpha H}(\mathcal{H}^{-1}(E)) = 0$, that is when $\overline{\Delta}_{H,B}(\mathcal{H}^{-1}(E))=0$, and \item when $\mathcal{H}^{0}(E)$ is torsion free $\mathcal{H}^{0}(E) \in \operatorname{HN}^{\mu}_{H, B} ([ \alpha , +\infty))$ with equality $\mu_{H,B}(\mathcal{H}^{0}(E)) = \alpha $ holds if and only if $H^2 \mathop{\rm ch}\nolimits_2^{B+ \alpha H}(\mathcal{H}^{0}(E)) =0$, that is when $\overline{\Delta}_{H,B}(\mathcal{H}^{0}(E))=0$. \end{enumerate} \item Let $E$ be $\nu_{H,B, \alpha}$-stable with $\nu_{H,B, \alpha}(E) =0$. Then $$ H^2\mathop{\rm ch}\nolimits_1^{B + \alpha H}(E) \ge 0, \ \text{ and } \ H^2\mathop{\rm ch}\nolimits_1^{B - \alpha H}(E) \ge 0. $$ \end{enumerate} \end{prop} \begin{proof} (1), (2) and (3) follows from $t=0$ case of \cite[Proposition 3.13]{PT}. (4) follows from (3) or from $t=0$ case of \cite[Proposition 3.6]{PiyFano3}. \end{proof} First we recall the definition of a minimal object in an arbitrary abelian category. \begin{defi} \label{defi:minimal-objects} \rm Let $\mathcal{C}$ be an abelian category. Then a non-trivial object $A \in \mathcal{C}$ is said to be a {\it minimal object} if $0 \to E \to A \to F \to 0$ is a short exact sequence in $\mathcal{C}$ then $E \cong 0$ or $F \cong 0$. That is, $A \in \mathcal{C}$ is minimal when $A$ has no proper subobjects in $\mathcal{C}$. \end{defi} \begin{defi} \label{defi:double-tilt-minimal-objects} \rm Let $\mathcal{M}_{H,B, \alpha}$ be the class of all objects $E \in \mathcal{B}_{H , B,\alpha}$ such that \begin{enumerate}[label=(\roman*)] \item $E$ is $\nu_{H,B,\alpha}$-stable, \item $\nu_{H,B, \alpha}(E) = 0$, and \item $\mathop{\rm Ext}\nolimits_{\scriptscriptstyle X}^1(\mathcal{O}_x , E) = 0$ for any skyscraper sheaf $\mathcal{O}_x$ of $x \in X$. \end{enumerate} \end{defi} \begin{lem}[{\cite[Lemma 2.3]{MP1}}] \label{prop:minimal-objects-threefold-hearts} The following objects are minimal in $\mathcal{A}_{H,B,\alpha}$: \begin{enumerate} \item the skyscraper sheaves $\mathcal{O}_x$ of any $x \in X$, and \item objects which are isomorphic to $E[1]$, where $E \in \mathcal{M}_{H,B,\alpha}$. \end{enumerate} \end{lem} \begin{prop}[{\cite[Proposition 7.4.1]{BMT}}] \label{prop:trivial-discriminant-tilt-stable-objects} Let $E$ be a $\mu_{H, B}$-stable locally free sheaf on $X$ with $\overline{\Delta}_{H,B}(E) =0$. Then either $E$ or $E[1]$ in $\mathcal{B}_{H,B}$ is $\nu_{H,B, \alpha}$-stable. \end{prop} \begin{exam} \label{example:minimal-objects-line-bundles} \rm Let $L$ be a line bundle on the smooth projective threefold $X$. Let $D = c_1(L)$. By direct computation we have $\overline{\Delta}_{H, D \pm \alpha H}(L) = 0$ for any $\alpha >0$. So by Proposition~\ref{prop:trivial-discriminant-tilt-stable-objects}, $L \in \mathcal{B}_{H, D - \alpha H}$ and $L[1] \in \mathcal{B}_{H, D + \alpha H}$ are tilt stable objects. Moreover, one can check that $\nu_{H, D- \alpha H, \alpha}(L) =0$ and $\nu_{H, D + \alpha H, \alpha}(L[1]) =0$. So by Lemma~\ref{prop:minimal-objects-threefold-hearts}, \begin{align*} L[1] \in \mathcal{A}_{H, D- \alpha H, \alpha}, \ \text{ and } \ L[2] \in \mathcal{A}_{H, D + \alpha H, \alpha} \end{align*} are minimal objects. \end{exam} \begin{exam} \label{example:minimal-objects-semihomogeneous-bundles} \rm Let $X$ be an abelian threefold. Let $\ell_{\scriptscriptstyle{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}$ be an ample class. From Lemma \ref{prop:semihomo-numerical}--(2), for any $D \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$ there exists stable semihomogeneous bundles $E $ on $X$ with $$ D = c_1(E)/ \mathop{\rm rk}\nolimits(E). $$ Moreover, $\mathop{\rm ch}\nolimits(E) = \mathop{\rm rk}\nolimits(E) e^{D}$. By direct computation one can check that $\overline{\Delta}_{\ell_{\scriptscriptstyle{X}} , D +\pm \alpha \ell_{\scriptscriptstyle{X}} }(E) = 0$ for any $\alpha >0$. So by Proposition~\ref{prop:trivial-discriminant-tilt-stable-objects}, $E \in \mathcal{B}_{\ell_{\scriptscriptstyle{X}} , D - \alpha \ell_{\scriptscriptstyle{X}} }$ and $E[1] \in \mathcal{B}_{\ell_{\scriptscriptstyle{X}} , D + \alpha \ell_{\scriptscriptstyle{X}} }$ are tilt stable objects. Moreover, one can check that $\nu_{\ell_{\scriptscriptstyle{X}} , D- \alpha \ell_{\scriptscriptstyle{X}} , \alpha}(E) =0$ and $\nu_{\ell_{\scriptscriptstyle{X}} , D + \alpha \ell_{\scriptscriptstyle{X}} , \alpha}(E[1]) =0$. So by Lemma~\ref{prop:minimal-objects-threefold-hearts}, \begin{align*} E[1] \in \mathcal{A}_{\ell_{\scriptscriptstyle{X}} , D- \alpha \ell_{\scriptscriptstyle{X}} , \alpha}, \ \text{ and } \ E[2] \in \mathcal{A}_{\ell_{\scriptscriptstyle{X}} , D + \alpha \ell_{\scriptscriptstyle{X}} , \alpha} \end{align*} are minimal objects. \end{exam} \begin{note} \label{prop:BG-ineq-for-tilt-stable-trivial-discriminant} \rm The tilt stable objects associated to minimal objects in Examples \ref{example:minimal-objects-line-bundles} and \ref{example:minimal-objects-semihomogeneous-bundles} clearly satisfy the corresponding Bogomolov-Gieseker type inequalities in Conjecture \ref{prop:BG-ineq-conjecture}. \end{note} Let us reduce the requirement of Bogomolov-Gieseker type inequalities to the tilt stable objects in $\mathcal{M}_{H,B, \alpha}$ (see Definition \ref{defi:double-tilt-minimal-objects}). First we need the following proposition. \begin{prop}[{\cite[Proposition 3.5]{LM}}] \label{prop:some-tilt-stable-extensions} Let $0 \to E \to E' \to Q \to 0$ be a non splitting short exact sequence in $\mathcal{B}_{H,B}$ with $Q \in \mathop{\rm Coh}\nolimits_{0}(X)$, $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{O}_x, E') = 0$ for any $x \in X$, and $H^2 \mathop{\rm ch}\nolimits_1^B(E) \ne 0$. If $E$ is $\nu_{H,B, \alpha}$-stable then $E'$ is $\nu_{H,B, \alpha}$-stable. \end{prop} \begin{lem}[{\cite[Proposition 2.9]{MP1}}] \label{prop:reduction-BG-ineq-class} Let $E \in \mathcal{B}_{H,B}$ be $\nu_{H,B,\alpha}$ stable with $\nu_{H,B,\alpha}(E)=0$. Then there exists $E' \in \mathcal{M}_{H,B,\alpha}$ (that is $E'[1]$ is a minimal object in $\mathcal{A}_{H,B,\alpha}$) such that $$ 0 \to E \to E' \to Q \to 0 $$ is a short exact sequence in $\mathcal{B}_{H,B}$ for some $Q \in \mathop{\rm Coh}\nolimits_{0}(X)$. Since we have $\mathop{\rm ch}\nolimits_{3}^B(Q) - \frac{1}{6}\alpha^2 \mathop{\rm ch}\nolimits_1^{B}(Q) = \mathop{\rm ch}\nolimits_{3}(Q) \ge 0$, $E$ satisfies the Bogomolov-Gieseker type inequality in Conjecture \ref{prop:BG-ineq-conjecture} if $E' \in \mathcal{M}_{H,B, \alpha}$ satisfies the corresponding inequality. \end{lem} \subsection{Fourier-Mukai theory } \label{sec:FM-theory} Let us quickly recall some of the important notions in Fourier-Mukai theory. Further details can be found in \cite{HuyFMTBook}. Let $X,Y$ be smooth projective varieties and let $p_i$, $i=1,2$ be the projection maps from $X \times Y$ to $X$ and $Y$, respectively. The {\it Fourier-Mukai functor} $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}: D^b(X) \to D^b(Y)$ with kernel $\mathcal{E} \in D^b(X \times Y)$ is defined by $$ \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}(-) = \mathbf{R} p_{2*} (\mathcal{E} \stackrel{\textbf{L}}{\otimes} p_1^*(-)). $$ Let $ \mathcal{E}_L = \mathcal{E}^\vee \stackrel{\operatorname{\mathbf{L}}}{\otimes} p_2^*\omega_Y \, [\dim Y] $, and $\mathcal{E}_R = \mathcal{E}^\vee \stackrel{\operatorname{\mathbf{L}}}{\otimes} p_1^* \omega_X \, [\dim X]$. We have the following adjunctions (see \cite[Proposition 5.9]{HuyFMTBook}): $$ \Phi_{\mathcal{E}_L}^{\scriptscriptstyle Y \to \scriptscriptstyle X} \dashv \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \dashv \Phi_{\mathcal{E}_R}^{\scriptscriptstyle Y \to \scriptscriptstyle X}. $$ When $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}$ is an equivalence of the derived categories, usually it is called a {\it Fourier-Mukai transform}. On the other hand by Orlov's Representability Theorem (see \cite[Theorem 5.14]{HuyFMTBook}), any equivalence between $D^b(X)$ and $D^b(Y)$ is isomorphic to a Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}$ for some $\mathcal{E} \in D^b(X \times Y)$. Any Fourier-Mukai functor $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}: D^b(X) \to D^b(Y)$ induces a linear map $\Phi^{\operatorname{\scriptscriptstyle{H}}}_{\mathcal{E}} : H^{2*}_{\operatorname{\scriptstyle{alg}}}(X, \mathbb{Q}) \to H^{2*}_{\operatorname{\scriptstyle{alg}}}(Y, \mathbb{Q})$, usually called the cohomological Fourier-Mukai functor, and it is a linear isomorphism when $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}$ is a Fourier-Mukai transform. The induced transform fits into the following commutative diagram, due to the Grothendieck-Riemann-Roch theorem. $$ \xymatrixcolsep{4.5pc} \xymatrixrowsep{2.25pc} \xymatrix{ D^b(X) \ar[d]_{[-]} \ar[r]^{\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}} & D^b(Y) \ar[d]^{[-]} \\ K(X) \ar[d]_{v_X(-)} \ar[r]^{\Phi^K_{\mathcal{E}}} & K(Y) \ar[d]^{v_Y(-)} \\ H^{2*}_{\operatorname{\scriptstyle{alg}}}(X, \mathbb{Q}) \ar[r]^{\Phi^{\operatorname{\scriptscriptstyle{H}}}_{\mathcal{E}}} & H^{2*}_{\operatorname{\scriptstyle{alg}}}(Y, \mathbb{Q}) } $$ Here $v_Z(-) = \mathop{\rm ch}\nolimits(-) \sqrt{\text{td}_Z}$ is the Mukai vector map, where $\mathop{\rm ch}\nolimits: K(Z) \to H^{2*}_{\operatorname{\scriptstyle{alg}}}(Z, \mathbb{Q})$ is the Chern character map and $\text{td}_Z$ is the Todd class of $Z$. Let $v \in H^{2*}_{\operatorname{\scriptstyle{alg}}}(X, \mathbb{Q})$ be a Mukai vector. Then $v= \sum_{i=0}^{\dim X} v_i$ for $v_i \in H^{2i}_{\operatorname{\scriptstyle{alg}}}(X, \mathbb{Q})$ and the Mukai dual of $v$ is defined by $v^\vee = \sum_{i=0}^{\dim X} (-1)^i v_i$. A symmetric bilinear form $\langle - , - \rangle_{\scriptscriptstyle X}$ called \textit{Mukai pairing} is defined by the formula \begin{equation*} \langle v, w \rangle_{\scriptscriptstyle X} = - \int_X v^\vee \cdot w \cdot e^{{c_1(X)}/{2} }. \end{equation*} Note that for an abelian variety $X$, $\text{td}_X =1$ and $c_1(X) =0$. Hence the Mukai vector $v(E)$ of $E \in D^b(X)$ is the same as its Chern character $\mathop{\rm ch}\nolimits(E)$. Due to Mukai and C\u{a}ld\u{a}raru-Willerton, for any $u \in H^{2*}_{\operatorname{\scriptstyle{alg}}}(Y, \mathbb{Q})$ and $v \in H^{2*}_{\operatorname{\scriptstyle{alg}}}(X, \mathbb{Q})$ we have \begin{equation} \label{eqn:Mukai-pairing-isometry} \left\langle \Phi^{\operatorname{\scriptscriptstyle{H}}}_{\mathcal{E}_L}(u) \, , \, v \right\rangle_{\scriptscriptstyle X} = \left\langle u \, , \, \Phi^{\operatorname{\scriptscriptstyle{H}}}_{\mathcal{E} }(v) \right\rangle_{\scriptscriptstyle Y} \end{equation} (see \cite[Proposition 5.44]{HuyFMTBook}, \cite{CW}). \subsection{Abelian varieties} \label{sec:abelian-varieties} Over any field, an {\it abelian variety} $X$ is a complete group variety, that is $X$ is an algebraic variety equipped with the maps $X \times X \to X, \ (x,y) \mapsto x+y$ (the group law), and $X \to X, \ x \mapsto -x$ (the inverse map), together with the identity element $e \in X$. For $a \in X$, the morphism $t_a : X \to X$ is defined by $t_a : x \mapsto x + a$. Over the field of complex numbers, an abelian variety is a complex torus with the structure of a projective algebraic variety. Let $\mathop{\rm Pic}\nolimits^0(X)$ be the subgroup of the abelian group $\mathop{\rm Pic}\nolimits(X)$ consisting of elements represented by the line bundles which are algebraically equivalent to zero, and the corresponding quotient $\mathop{\rm Pic}\nolimits(X)/ \mathop{\rm Pic}\nolimits^0(X)$ is the N\'eron-Severi group $\mathop{\rm NS}\nolimits(X) $. The group $\mathop{\rm Pic}\nolimits^0(X)$ is naturally isomorphic to an abelian variety called the \textit{dual abelian variety} of $X$, denoted by $\widehat{X}$. The \textit{Poincar\'e line bundle} $\mathcal{P}$ on the product $X \times \widehat{X}$ is the uniquely determined line bundle satisfying (i) $\mathcal{P}_{X \times \{\widehat{x}\}} \in \mathop{\rm Pic}\nolimits(X)$ is represented by $\widehat{x} \in \widehat{X}$, and (ii) $\mathcal{P}_{\{e\} \times \widehat{X} } \cong \mathcal{O}_{\widehat{X}}$. In \cite{MukFMT}, Mukai proved that the Fourier-Mukai functor $\Phi^{\scriptscriptstyle X \to \scriptscriptstyle \widehat{X} }_{\mathcal{P}}: D^b(X) \to D^b(\widehat{X})$ is an equivalence of the derived categories, that is a Fourier-Mukai transform. A vector bundle $E$ on an abelian variety $X$ is called \textit{homogeneous} if we have $t_x^*E \cong E$ for all $x \in X$. A vector bundle $E$ on $X$ is homogeneous if and only if $E$ can be filtered by line bundles from $\mathop{\rm Pic}\nolimits^0(X)$ (see \cite{MukSemihomo}). We call a vector bundle $E$ is \textit{semihomogeneous} if for every $x \in X$ there exists a flat line bundle $\mathcal{P}_{X \times \{\widehat{x}\}}$ on $X$ such that $t_x^*E \cong E \otimes \mathcal{P}_{X \times \{\widehat{x}\}}$. A vector bundle $E$ is called \textit{simple} if we have $\mathop{\rm End}\nolimits_{\scriptscriptstyle X}(E) \cong \mathbb{C}$. \begin{lem}[{\cite[Theorem 5.8]{MukSemihomo}}] \label{prop:Mukai-semihomognoeus-properties} Let $E$ be a simple vector bundle on an abelian variety $X$. Then the following conditions are equivalent: \begin{enumerate}[label=(\arabic*)] \item $\dim H^1(X, \operatorname{\mathcal{E}\textit{nd}}(E))=g$, \item $E$ is semihomogeneous, \item $\operatorname{\mathcal{E}\textit{nd}}(E)$ is a homogeneous vector bundle. \end{enumerate} \end{lem} \begin{lem}[{\cite{MukSemihomo, Orl}}] \label{prop:semihomo-numerical} We have the following about simple semihomogeneous bundles: \begin{enumerate}[label=(\arabic*)] \item A rank $r$ simple semihomogeneous bundle $E$ has the Chern character \begin{equation*} \label{semihomochern} \mathop{\rm ch}\nolimits(E) = r \ e^{c_1(E)/r}. \end{equation*} \item For any $D_{\scriptscriptstyle X} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$, there exists simple semihomogeneous bundles $E$ on $X$ with $\mathop{\rm ch}\nolimits(E) = r \, e^{D_{\scriptscriptstyle X}}$ for some $r \in \mathbb{Z}_{>0}$. \item Let $E$ be a semihomogeneous bundle on $X$. Then $E$ is Gieseker semistable with respect to any ample bundle $L$, and if $E$ is simple then it is slope stable with respect to $c_1(L)$. \end{enumerate} \end{lem} See \cite{Orl} for further details. The image of an ample line bundle $L$ on $X$ under the Fourier-Mukai transform $\Phi_{\mathcal{P}}^{\scriptscriptstyle X \to \scriptscriptstyle \widehat{X} }$ is $$ \Phi_{\mathcal{P}}^{\scriptscriptstyle X \to \scriptscriptstyle \widehat{X} } (L) \cong \widehat{L} $$ for some rank $\chi(L) = c_1(L)^g/g!$ semihomogeneous bundle $\widehat{L}$. Here $g = \dim X$. Moreover, $-c_1(\widehat{L})$ is an ample divisor class on $\widehat{X}$. See \cite{BL-polarization} for further details. Therefore, we have the following: \begin{lem}[{\cite{BL-polarization}}] \label{classicalcohomoFMT} Let $\ell_{\scriptscriptstyle{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$ be an ample class on $X$, and let $g = \dim X$. Under the induced cohomological transform $\Phi_{\mathcal{P}}^{\operatorname{\scriptscriptstyle{H}}}: H^{2*}_{\operatorname{\scriptstyle{alg}}}(X, \mathbb{Q}) \to H^{2*}_{\operatorname{\scriptstyle{alg}}}(\widehat{X},\mathbb{Q})$ of $\Phi_{\mathcal{P}}^{\scriptscriptstyle X \to \scriptscriptstyle \widehat{X} }$ we have \begin{equation*} \Phi_{\mathcal{P}}^{\operatorname{\scriptscriptstyle{H}}}(e^{\ell_{\scriptscriptstyle{X}} }) = ({\ell_{\scriptscriptstyle{X}} ^g}/{g!}) \, e^{-\ell_{\scriptscriptstyle \widehat{X}} } \end{equation*} for some ample class $\ell_{\scriptscriptstyle \widehat{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(\widehat{X})$, satisfying \begin{equation*} ({\ell_{\scriptscriptstyle{X}} ^g}/{g!}) ({\ell_{\scriptscriptstyle \widehat{X}} ^g}/{g!}) =1. \end{equation*} Moreover, for each $0 \le i \le g$, \begin{equation*} \Phi_\mathcal{P}^{\operatorname{\scriptscriptstyle{H}}}\left( \frac{ \ell_{\scriptscriptstyle X}^i}{i!} \right) = \frac{(-1)^{g-i} \ell_{\scriptscriptstyle X}^g}{g! (g-i)!} \, \ell_{\scriptscriptstyle \widehat{X} }^{g-i}. \end{equation*} \end{lem} \subsection{Some sheaf theory} \label{s2.8} In this paper, we shall encounter reflexive sheaves at several occasions, and so we recall some of the key properties of them. Let $X$ be a smooth projective variety of dimension $n$. Any coherent sheaf $E$ on $X$ admits a \textit{locally free resolution} of length $n$. In other words, $E$ fits into an exact sequence: $$ 0 \to F_n \to \cdots \to F_1 \to F_0 \to E \to 0 $$ for some locally free sheaves $F_i$ on $X$. For a coherent sheaf $E$ on $X$, its dual is $E^* = \operatorname{\mathcal{H}\textit{om}}(E, \mathcal{O}_X)$. There is a natural map from any $E \in \mathop{\rm Coh}\nolimits(X)$ to its double dual $E^{**}$, $E \to E^{**}$. If this map is an isomorphism then $E$ is called a \textit{reflexive} sheaf. When $E$ is a torsion free sheaf, $E$ injects into its double dual. \begin{lem}[{\cite[Lemma 1.1.2]{OSS}}] \label{prop:dim-of-supp-Ext} For any coherent sheaf $E$ on $X$ we have $$ \dim \mathop{\rm Supp}\nolimits \left(\operatorname{\mathcal{E}\textit{xt}}^{i}(E, \mathcal{O}_X)\right)\le (n-i), \text{ for all i}. $$ \end{lem} \begin{defi} \label{defi:singularity-set} The \textit{singularity set} $\mathop{\rm Sing}\nolimits(E)$ of a coherent sheaf $E \in \mathop{\rm Coh}\nolimits(X)$ is defined as the locus where $E$ is not locally free, that is $$ \mathop{\rm Sing}\nolimits(E) = \{ x \in X : \mathop{\rm Ext}\nolimits_{\scriptscriptstyle X}^1(E, \mathcal{O}_x) \ne 0 \}. $$ \end{defi} This coincides with $$ S_{n-1}(E) = \bigcup_{i = 1}^n \mathop{\rm Supp}\nolimits \left(\operatorname{\mathcal{E}\textit{xt}}^{i}(E, \mathcal{O}_X)\right). $$ See \cite[Chapter 2]{OSS} for further details. We collect some of the useful results about reflexive sheaves as follows. \begin{lem} \label{prop:reflexive-sheaf-results} We have the following: \begin{enumerate}[label=(\arabic*)] \item if $E$ is a reflexive sheaf then $\dim \mathop{\rm Sing}\nolimits(E) \le n-3$; \item a coherent sheaf $E$ is reflexive if and only if it fits into a short exact sequence $$ 0 \to E \to F \to G \to 0 $$ in $\mathop{\rm Coh}\nolimits(X)$ for a locally free sheaf $F$ and a torsion free sheaf $G$; \item any $E \in \mathop{\rm Coh}\nolimits(X)$ fits into an exact sequence $$ 0 \to T \to E \to E^{**} \to Q \to 0 $$ in $\mathop{\rm Coh}\nolimits(X)$, where $T$ is the maximal torsion subsheaf of $E$ and $Q$ is a torsion sheaf supported in a subscheme of at least codimension $2$; \item for any $E \in \mathop{\rm Coh}\nolimits(X)$, its dual $E^*$ is a reflexive sheaf; \item any rank one reflexive sheaf is locally free, that is a line bundle. \end{enumerate} \end{lem} \begin{proof} See Propositions 1.1, 1.3, 1.9 and Corollary 1.2 of \cite{HarshorneReflexive} for proofs of (2), (1), (5) and (4). The claim in (3) is an easy exercise. \end{proof} When $\dim X =3$, one can easily prove the following result which is useful in this paper to identify reflexive sheaves. \begin{lem} \label{prop:reflexive-sheaf-threefold} A coherent sheaf $E$ on a smooth projective threefold $X$ is reflexive if and only if \begin{enumerate}[label=(\roman*)] \item $\mathop{\rm Ext}\nolimits^1_{\scriptscriptstyle X}(\mathcal{O}_x, E) = 0$ for all $x \in X$, and \item $\mathop{\rm Ext}\nolimits^2_{\scriptscriptstyle X}(\mathcal{O}_x, E) \ne 0$ for finitely many $x \in X$. \end{enumerate} \end{lem} The following result of Simpson is very important for us. \begin{lem}[{\cite[Theorem 2]{Simpson}}] \label{prop:Simpson-result-trivial-disciminant} Let $X$ be a smooth projective variety of dimension $n \ge 3$. Let $L$ be an ample line bundle on $X$ and let $H$ be $c_1(L)$. Let $E$ be a slope semistable reflexive sheaf on $X$ with respect to $H$ such that $H^{n-1} \mathop{\rm ch}\nolimits_1(E)=H^{n-2} \mathop{\rm ch}\nolimits_2(E) =0$. Then all the Jordan-H\"{o}lder slope stable factors of $E$ are locally free sheaves which have vanishing Chern classes. \end{lem} \section{Cohomological Fourier-Mukai Transforms and Polarizations} \label{sec:cohomological-FMT} Let $Y$ be a $g$-dimensional abelian variety. Let us fix a class $D_{\scriptscriptstyle Y} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(Y)$. Let $X$ be the fine moduli space of rank $r$ simple semihomogeneous bundles $E$ on $Y$ with $c_1(E)/r =D_{\scriptscriptstyle Y}$. Due to Mukai $X$ is a $g$-dimensional abelian variety. Let $\mathcal{E}$ be the associated universal bundle on $X \times Y$; so by Lemma \ref{prop:semihomo-numerical}--(1) we have $$ \mathop{\rm ch}\nolimits(\mathcal{E}_{\{x\} \times Y}) = r \, e^{D_{\scriptscriptstyle Y}}. $$ Let $\Phi_\mathcal{E}^{\scriptscriptstyle X \to \scriptscriptstyle Y} : D^b(X) \to D^b(Y)$ be the corresponding Fourier-Mukai transform from $D^b(X)$ to $D^b(Y)$ with kernel $\mathcal{E}$. Then its quasi inverse is given by $\Phi_{\mathcal{E}^\vee}^{\scriptscriptstyle Y \to \scriptscriptstyle X}[g]$. Again, by Lemma \ref{prop:semihomo-numerical}--(1) we have $$ \mathop{\rm ch}\nolimits(\mathcal{E}_{X \times \{y\}} ) = r \, e^{D_{\scriptscriptstyle X}} $$ for some $D_{\scriptscriptstyle X} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$. \begin{defi} \label{defi:polarization} \rm A \textit{polarization} on $X$ is by definition the first Chern class $c_1(L)$ of an ample line bundle $L$ on $X$. However, it is usual to say the line bundle $L$ itself a polarization. \end{defi} Let $a \in X$ and $b \in Y$. Consider the Fourier-Mukai functor $\Gamma$ from $D^b(X)$ to $D^b(\widehat{Y})$ defined by $$ \Gamma = \Phi_\mathcal{P}^{\scriptscriptstyle Y \to \scriptscriptstyle \widehat{Y}} \circ \mathcal{E}_{\{a\}\times Y}^* \circ \Phi_\mathcal{E}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \circ \mathcal{E}_{X \times \{b\}}^*\, [g], $$ where $\mathcal{E}_{\{a\}\times Y}^*$ denotes the functor $\mathcal{E}_{\{a\}\times Y}^* \otimes(-)$ and similar for $ \mathcal{E}_{X \times \{b\}}^*$. Let $\widehat \Gamma: D^b(\widehat{Y}) \to D^b(X) $ be the Fourier-Mukai functor defined by $$ \widehat \Gamma = \mathcal{E}_{X \times \{b\}} \circ \Phi_{\mathcal{E}^\vee}^{\scriptscriptstyle Y\to \scriptscriptstyle X } \circ \mathcal{E}_{\{a\}\times Y} \circ\Phi_{\mathcal{P}^\vee}^{ \scriptscriptstyle \widehat{Y} \to \scriptscriptstyle Y }\, [g]. $$ Then $\widehat \Gamma $ and $\Gamma$ are adjoint functors to each other. By direct computation, $\Gamma (\mathcal{O}_{x}) = \mathcal{O}_{Z_x}$ for some $0$-subscheme $Z_x \subset \widehat{Y}$, and $\Gamma (\mathcal{O}_{\widehat y}) = \mathcal{O}_{Z_{\widehat y}}$ for some $0$-subscheme $Z_{\widehat y} \subset X$; where the lengths of $Z_x$ and $Z_{\widehat y}$ are $r^3$ and $r$ respectively. Therefore, the Fourier-Mukai kernel of $\Gamma$ is $\mathcal{F} \in \mathop{\rm Coh}\nolimits_g(X \times \widehat{Y})$, with $\mathcal{F}^{\vee} \cong \operatorname{\mathcal{E}\textit{xt}}^g(\mathcal{F}, O_{X \times \widehat{Y}})[-g]$. So $\Gamma (\mathop{\rm Coh}\nolimits_{i}(X)) \subset \mathop{\rm Coh}\nolimits_i(\widehat{Y})$ and $\widehat \Gamma (\mathop{\rm Coh}\nolimits_i (\widehat{Y})) \subset \mathop{\rm Coh}\nolimits_i(X)$ for all $i$. Also by direct computation, $\Gamma(\mathcal{O}_X)$ and $\widehat \Gamma (\mathcal{O}_{\widehat{Y}}) $ are homogeneous bundles of rank $r$ and $r^3$ respectively. Let $\ell_{\scriptscriptstyle{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$ be an ample class. \begin{prop} \label{prop:support-cohomological-result} Under the induced cohomological map $ \Gamma^{\operatorname{\scriptscriptstyle{H}}}: H^{2*}_{\operatorname{\scriptstyle{alg}}}(X,\mathbb{Q}) \to H^{2*}_{\operatorname{\scriptstyle{alg}}}(\widehat{Y},\mathbb{Q}) $, $$ \Gamma^{\operatorname{\scriptscriptstyle{H}}}(e^{\ell_{\scriptscriptstyle X}}) = r\ e^{\ell_{\scriptscriptstyle \widehat{Y}}}, $$ for some ample class $\ell_{\scriptscriptstyle \widehat{Y}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(\widehat{Y})$ satisfying $r^2 \, {\ell_{\scriptscriptstyle{X}} ^g} = {\ell_{\scriptscriptstyle \widehat{Y}}^g}$. Hence, under the induced cohomological map $ \widehat \Gamma^{\operatorname{\scriptscriptstyle{H}}}: H^{2*}_{\operatorname{\scriptstyle{alg}}}(\widehat{Y},\mathbb{Q}) \to H^{2*}_{\operatorname{\scriptstyle{alg}}}(X,\mathbb{Q}) $, $$ \widehat \Gamma^{\operatorname{\scriptscriptstyle{H}}}(e^{\ell_{\scriptscriptstyle \widehat{Y}}}) = r^3 \ e^{\ell_{\scriptscriptstyle X}}. $$ Moreover, for each $0 \le i \le g$, $$ \Gamma^{\operatorname{\scriptscriptstyle{H}}}( \ell_{\scriptscriptstyle X}^i) = r \, \ell_{\scriptscriptstyle \widehat{Y}}^i, \ \ \ \widehat \Gamma^{\operatorname{\scriptscriptstyle{H}}}(\ell_{\scriptscriptstyle \widehat{Y}}^i) = r^3 \, \ell_{\scriptscriptstyle{X}} ^i. $$ \end{prop} \begin{proof} Since $\Gamma (\mathop{\rm Coh}\nolimits_{i}(X)) \subset \mathop{\rm Coh}\nolimits_i(\widehat{Y})$, for any $E$ we have $$ \Gamma^{\operatorname{\scriptscriptstyle{H}}} (\mathop{\rm ch}\nolimits_{\ge j}(E)) = \mathop{\rm ch}\nolimits_{ \ge j}(\Gamma (E)). $$ Here $\mathop{\rm ch}\nolimits_{\ge j } = (0,\cdots, \mathop{\rm ch}\nolimits_j, \mathop{\rm ch}\nolimits_{j+1}, \cdots, \mathop{\rm ch}\nolimits_g)$. Therefore, \begin{align*} \Gamma^{\operatorname{\scriptscriptstyle{H}}}(e^{ \ell_{\scriptscriptstyle X}}) & = \Gamma^{\operatorname{\scriptscriptstyle{H}}}\left(e^{0} + \mathop{\rm ch}\nolimits_{\ge 1}(e^{\ell_{\scriptscriptstyle X}}) \right) =\Gamma^{\operatorname{\scriptscriptstyle{H}}}\left(e^{0} \right) + \Gamma^{\operatorname{\scriptscriptstyle{H}}}\left( \mathop{\rm ch}\nolimits_{\ge 1}(e^{\ell_{\scriptscriptstyle X}}) \right)\\ & = \mathop{\rm ch}\nolimits(\Gamma(\mathcal{O}_X)) + \Gamma^{\operatorname{\scriptscriptstyle{H}}}\left( \mathop{\rm ch}\nolimits_{\ge 1}(e^{\ell_{\scriptscriptstyle X}}) \right) = (r, 0, \cdots,0) + (0, *, \cdots, *) \\ & = (r, * , \cdots, *). \end{align*} For any $k \in \mathbb{Z}$, There exists a semihomogeneous bundle $E_k$ with $k \ell_{\scriptscriptstyle{X}} = c_1(E_k)/\mathop{\rm rk}\nolimits(E_k)$. Under the transform $\Gamma (E_k)$ is also a semihomogeneous bundle such that $c_1(\Gamma (E_k))/ \mathop{\rm rk}\nolimits(\Gamma (E_k)) = D_k$ for some $D_k \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(\widehat{Y})$. So we deduce $$ \Gamma^{\operatorname{\scriptscriptstyle{H}}}(e^{ \ell_{\scriptscriptstyle X}}) = r \ e^{\ell_{\scriptscriptstyle \widehat{Y}}}, $$ for some class $\ell_{\scriptscriptstyle \widehat{Y}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(\widehat{Y})$. Moreover, for any $k$ \begin{align*} \Gamma^{\operatorname{\scriptscriptstyle{H}}}(e^{k \ell_{\scriptscriptstyle X}}) & = \Gamma^{\operatorname{\scriptscriptstyle{H}}}\left(k e^{\ell_{\scriptscriptstyle{X}} } - (k-1) e^0 + (0,0, *, \cdots, *) \right) \\ & = k r e^{\ell_{\scriptscriptstyle \widehat{Y}}} - (k-1)r e^0 + (0, 0, *, \cdots, *) = (r, r k \ell_{\scriptscriptstyle \widehat{Y}} , *, \cdots, *). \end{align*} So it has to be equal to $re^{k \ell_{\scriptscriptstyle \widehat{Y}}}$. For any $0 \le i \le g$, we can write $\ell_{\scriptscriptstyle{X}} ^i$ as a $\mathbb{Q}$-linear combination of $\{e^0, e^{\ell_{\scriptscriptstyle X}}, \cdots, e^{g \ell_{\scriptscriptstyle X}}\}$. Since $\Gamma^{\operatorname{\scriptscriptstyle{H}}}(e^{k \ell_{\scriptscriptstyle X}}) = re^{k \ell_{\scriptscriptstyle \widehat{Y}}}$, we have $\Gamma^{\operatorname{\scriptscriptstyle{H}}}( \ell_{\scriptscriptstyle X}^i) = r \, \ell_{\scriptscriptstyle \widehat{Y}}^i$. Similarly, we can prove the results involving $\widehat \Gamma^{\operatorname{\scriptscriptstyle{H}}}$. Now let us prove that the class $\ell_{\scriptscriptstyle \widehat{Y}}$ is ample. For any $0 \le j \le g$, let $\widehat{Y}^{(j)} \subset \widehat{Y}$ be a closed $j$-dimensional subscheme of $\widehat{Y}$. Then we have \begin{align*} \int_{\widehat{Y}} \ell_{\scriptscriptstyle \widehat{Y}}^{g-j} \cdot [\widehat{Y}^{(j)}] & = \frac{1}{r^4} \int_{\widehat{Y}} \ell_{\scriptscriptstyle \widehat{Y}}^{g-j} \cdot \Gamma^{\operatorname{\scriptscriptstyle{H}}} \widehat \Gamma^{\operatorname{\scriptscriptstyle{H}}} [\widehat{Y}^{(j)}] \\ & = \frac{(-1)^{g-j}}{r^4} \left\langle \ell_{\scriptscriptstyle \widehat{Y}}^{g-j} , \, \Gamma^{\operatorname{\scriptscriptstyle{H}}} \widehat \Gamma^{\operatorname{\scriptscriptstyle{H}}} [\widehat{Y}^{(j)}] \right\rangle_{\scriptscriptstyle \widehat{Y}} \\ & = \frac{(-1)^{g-j}}{r^4} \left\langle \widehat \Gamma^{\operatorname{\scriptscriptstyle{H}}}(\ell_{\scriptscriptstyle \widehat{Y}}^{g-j}) , \, \widehat \Gamma^{\operatorname{\scriptscriptstyle{H}}} [\widehat{Y}^{(j)}] \right\rangle_{\scriptscriptstyle X}, \ \ \ \text{by \eqref{eqn:Mukai-pairing-isometry}} \\ & = \frac{(-1)^{g-j}}{r} \left\langle \ell_{\scriptscriptstyle X}^{g-j} , \, \widehat \Gamma^{\operatorname{\scriptscriptstyle{H}}} [\widehat{Y}^{(j)}] \right\rangle_{\scriptscriptstyle X} \\ & = \frac{1}{r} \int_{X} \ell_{\scriptscriptstyle X}^{g-j} \cdot \widehat \Gamma^{\operatorname{\scriptscriptstyle{H}}} [\widehat{Y}^{(j)}] > 0, \end{align*} as $\widehat{\Gamma} \left(\mathcal{O}_{\widehat{Y}^{(j)}}\right) \in \mathop{\rm Coh}\nolimits_{j}(X)$ and $\ell_{\scriptscriptstyle X}$ is an ample class. Hence, from the Nakai-Moishezon criterion, $\ell_{\scriptscriptstyle \widehat{Y}}$ is an ample class on $\widehat{Y}$. \end{proof} By Theorem \ref{classicalcohomoFMT}, under the induced cohomological map of $\Phi_{\mathcal{P}^\vee}^{ \scriptscriptstyle \widehat{Y} \to \scriptscriptstyle Y }$ we have $$ \Phi_{\mathcal{P}^\vee}^{\operatorname{\scriptscriptstyle{H}}}(e^{\ell_{\scriptscriptstyle \widehat{Y}}}) =({\ell_{\scriptscriptstyle \widehat{Y}}^g}/{g!}) \, e^{- \ell_{\scriptscriptstyle Y}}, $$ for some ample class $\ell_{\scriptscriptstyle Y} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(Y)$. Let $\Xi : D^b(X) \to D^b(Y)$ be the Fourier-Mukai functor defined by $$ \Xi = \mathcal{E}_{\{a\}\times Y}^* \circ \Phi_\mathcal{E}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \circ \mathcal{E}_{X \times \{b\}}^* =\Phi_{\mathcal{P}^\vee}^{ \scriptscriptstyle \widehat{Y} \to \scriptscriptstyle Y } \circ \Gamma. $$ The image of $e^{\ell_{\scriptscriptstyle X}}$ under its induced cohomological transform $\Xi^{\operatorname{\scriptscriptstyle{H}}}$ is $ ({r^3 \ell_{\scriptscriptstyle{X}} ^g}/{g!}) \, e^{- \ell_{\scriptscriptstyle Y}} $. Therefore, we deduce the following. \begin{thm} \label{prop:general-cohomo-FMT} If $\ell_{\scriptscriptstyle{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$ is an ample class then $$ e^{- D_{\scriptscriptstyle Y}} \, \Phi_{\mathcal{E}}^{\operatorname{\scriptscriptstyle{H}}} \, e^{-D_{\scriptscriptstyle X}} ( e^{\ell_{\scriptscriptstyle X}}) = (r \, {\ell_{\scriptscriptstyle{X}} ^g}/{g!}) \, e^{-\ell_{\scriptscriptstyle Y}}, $$ for some ample class $\ell_{\scriptscriptstyle{Y}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(Y)$, satisfying $({\ell_{\scriptscriptstyle{X}} ^g}/g!)({\ell_{\scriptscriptstyle{Y}}^g}/g!)= 1/r^2$. Moreover, for each $0 \le i \le g$, \begin{equation*} e^{- D_{\scriptscriptstyle Y}} \, \Phi_{\mathcal{E}}^{\operatorname{\scriptscriptstyle{H}}} \, e^{-D_{\scriptscriptstyle X}} \left( \frac{ \ell_{\scriptscriptstyle X}^i}{i!} \right) = \frac{(-1)^{g-i} r \, \ell_{\scriptscriptstyle X}^g}{g! (g-i)!} \, \ell_{\scriptscriptstyle{Y}}^{g-i}. \end{equation*} \end{thm} This gives us the following: \begin{thm} \label{prop:derived-induce-polarization} If the ample line bundle $L$ defines a polarization on $X$, then the line bundle $\det (\Xi(L))^{-1}$ is ample and so it defines a polarization on $Y$. \end{thm} Let us introduce the following notation: \begin{nota} \rm Let $B, \ell_{\scriptscriptstyle{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$. For $E \in D^b(X)$, the entries $v^{B,{\ell_{\scriptscriptstyle{X}} }}_i(E)$, $i=0, \ldots, g$, are defined by $$ v^{B,{\ell_{\scriptscriptstyle{X}} }}_i(E) = i! \, \ell_{\scriptscriptstyle{X}} ^{g-i} \cdot \mathop{\rm ch}\nolimits^B_{i}(E). $$ Here $\mathop{\rm ch}\nolimits^B_{i}(E)$ is the $i$-th component of the $B$-twisted Chern character $\mathop{\rm ch}\nolimits^B(E)= e^{-B} \mathop{\rm ch}\nolimits(E)$. The vector $v^{B,{\ell_{\scriptscriptstyle{X}} }}(E)$ is defined by \begin{equation*} v^{B,{\ell_{\scriptscriptstyle{X}} }}(E) = \left( v^{B,{\ell_{\scriptscriptstyle{X}} }}_0(E) , \ldots, v^{B,{\ell_{\scriptscriptstyle{X}} }}_g(E) \right). \end{equation*} We will denote an $g \times g$ anti-diagonal matrix with entries $a_k$, $k=1, \ldots, g$ by $$ \operatorname{Adiag}(a_1, \ldots, a_g)_{ij} : = \begin{cases} a_k & \text{if } i=k, j=g+1-k \\ 0 & \text{otherwise}. \end{cases} $$ \end{nota} \begin{thm} \label{prop:antidiagonal-rep-cohom-FMT} If we consider $v^{-D_{\scriptscriptstyle X} ,\ell_{\scriptscriptstyle{X}} }, v^{D_{\scriptscriptstyle Y},\ell_{\scriptscriptstyle{Y}}}$ as column vectors, then $$ v^{D_{\scriptscriptstyle Y},\ell_{\scriptscriptstyle{Y}}}\left(\Phi_\mathcal{E}^{\scriptscriptstyle X \to \scriptscriptstyle Y}(E) \right) = \frac{g!}{r \, \ell_{\scriptscriptstyle X}^g} \, \operatorname{Adiag}\left(1,-1,\ldots, (-1)^{g-1}, (-1)^g\right) \ v^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} }(E). $$ \end{thm} \begin{proof} The $i$-th entry of $v^{D_{\scriptscriptstyle Y},\ell_{\scriptscriptstyle{Y}}}\left(\Phi_\mathcal{E}^{\scriptscriptstyle X \to \scriptscriptstyle Y}(E) \right) $ is \begin{align*} v^{D_{\scriptscriptstyle Y},\ell_{\scriptscriptstyle{Y}}}_{i} \left(\Phi_\mathcal{E}^{\scriptscriptstyle X \to \scriptscriptstyle Y}(E) \right) & = i! \, \ell_{\scriptscriptstyle{Y}}^{g-i} \cdot \mathop{\rm ch}\nolimits^{D_{\scriptscriptstyle Y}}_{i}\left(\Phi_\mathcal{E}^{\scriptscriptstyle X \to \scriptscriptstyle Y}(E) \right) \\ & = i! \int_{Y} {\ell_{\scriptscriptstyle Y}^{g-i}} \cdot \mathop{\rm ch}\nolimits^{D_{\scriptscriptstyle Y}}\left(\Phi_\mathcal{E}^{\scriptscriptstyle X \to \scriptscriptstyle Y}(E) \right) \\ & = i! \int_{Y} {\ell_{\scriptscriptstyle Y}^{g-i}} \cdot e^{-D_{\scriptscriptstyle Y}}\mathop{\rm ch}\nolimits\left(\Phi_\mathcal{E}^{\scriptscriptstyle X \to \scriptscriptstyle Y}(E) \right) \\ & =(-1)^{g-i} i! \left\langle {\ell_{\scriptscriptstyle Y}^{g-i}} , \, e^{-D_{\scriptscriptstyle Y}} \mathop{\rm ch}\nolimits\left(\Phi_\mathcal{E}^{\scriptscriptstyle X \to \scriptscriptstyle Y}(E) \right) \right\rangle_{\scriptscriptstyle Y}\\ & =(-1)^{g-i} i! \left\langle {\ell_{\scriptscriptstyle Y}^{g-i}} , \, e^{-D_{\scriptscriptstyle Y}} \Phi_\mathcal{E}^{\operatorname{\scriptscriptstyle{H}}}(\mathop{\rm ch}\nolimits(E)) \right\rangle_{\scriptscriptstyle Y}\\ & =(-1)^{g-i} i! \left\langle {\ell_{\scriptscriptstyle Y}^{g-i}} , \, e^{-D_{\scriptscriptstyle Y}} \Phi_\mathcal{E}^{\operatorname{\scriptscriptstyle{H}}}e^{-D_{\scriptscriptstyle X}}(\mathop{\rm ch}\nolimits^{-D_{\scriptscriptstyle X}}(E)) \right\rangle_{\scriptscriptstyle Y}\\ & =(-1)^{g-i} i! \left\langle {\left(e^{-D_{\scriptscriptstyle Y}} \Phi_\mathcal{E}^{\operatorname{\scriptscriptstyle{H}}}e^{-D_{\scriptscriptstyle X}}\right)^{-1}(\ell_{\scriptscriptstyle Y}^{g-i}}) , \, \mathop{\rm ch}\nolimits^{-D_{\scriptscriptstyle X}}(E) \right\rangle_{\scriptscriptstyle X}\\ & =\frac{ g! (g-i)! }{r \, \ell_{\scriptscriptstyle{X}} ^g} \left\langle \ell_{\scriptscriptstyle{X}} ^i , \, \mathop{\rm ch}\nolimits^{-D_{\scriptscriptstyle X}}(E) \right\rangle_{\scriptscriptstyle X}, \ \ \ \text{ from Theorem \ref{prop:general-cohomo-FMT} }\\ & =\frac{ (-1)^i g! (g-i)! }{r \, \ell_{\scriptscriptstyle{X}} ^g} \int_X \ell_{\scriptscriptstyle{X}} ^i \cdot \mathop{\rm ch}\nolimits^{-D_{\scriptscriptstyle X}}(E) \\ & =\frac{ (-1)^i g! (g-i)! }{r \, \ell_{\scriptscriptstyle{X}} ^g} \, \ell_{\scriptscriptstyle{X}} ^i \cdot \mathop{\rm ch}\nolimits^{-D_{\scriptscriptstyle X}}_{g-i}(E) \\ & =\frac{ (-1)^i g! }{r \, \ell_{\scriptscriptstyle{X}} ^g} \, v^{-D_{\scriptscriptstyle X},\ell_{\scriptscriptstyle{X}} }_{g-i} (E). \end{align*} This completes the proof. \end{proof} Let $\mathbb{D}$ denote the derived dualizing functor $\mathbf{R} \operatorname{\mathcal{H}\textit{om}}(-, \mathcal{O}_X)$. The following is a generalization of Mukai's result on classical Fourier-Mukai transform. \begin{lem}[{\cite[Lemma 2.2]{PP}}] \label{prop:dual-FMT} We have the isomorphism $$ (\Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}^\vee} \circ \mathbb{D}) [g]\cong \mathbb{D} \circ \Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}}. $$ Here $\Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}^\vee}: D^b(X) \to D^b(Y)$ is the Fourier-Mukai transform from $X$ to $Y$ with the kernel $\mathcal{E}^\vee$. \end{lem} This gives us the convergence of the following spectral sequence. \begin{dualspecseq} \label{Spec-Seq-Dual} $$ \mathcal{H}^p \left( \Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}^\vee} \left( \operatorname{\mathcal{E}\textit{xt}}^{q+g} (E, \mathcal{O}_X) \right) \right) \Longrightarrow \ \ ? \ \ \Longleftarrow \operatorname{\mathcal{E}\textit{xt}}^{p+g}\left( \mathcal{H}^{g-q} \left( \Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}}(E)\right), \mathcal{O}_X \right) $$ for $E \in \mathop{\rm Coh}\nolimits(X)$. \end{dualspecseq} We have the following for the Fourier-Mukai transform $\Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}^\vee}: D^b(X) \to D^b(Y)$ : \begin{prop} \label{prop:dual-antidiagonal-rep-cohom-FMT} If we consider $v^{D_{\scriptscriptstyle X} ,\ell_{\scriptscriptstyle{X}} }, v^{-D_{\scriptscriptstyle Y},\ell_{\scriptscriptstyle{Y}}}$ as column vectors, then $$ v^{-D_{\scriptscriptstyle Y},\ell_{\scriptscriptstyle{Y}}}\left(\Phi_\mathcal{E}^{\scriptscriptstyle X \to \scriptscriptstyle Y}(E) \right) = \frac{g!}{r \, \ell_{\scriptscriptstyle X}^g} \, \operatorname{Adiag}\left(1,-1,\ldots, (-1)^{g-1}, (-1)^g\right) \ v^{D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} }(E). $$ \end{prop} \section{Stability Conditions Under FM Transforms on Abelian Varieties } \label{sec:stability-under-FMT} \subsection{Action of FM transforms on Bridgeland Stability Conditions} \label{sec:action-FMT-central-charge} This section generalizes some of the similar results in \cite{MP2, PiyThesis}. Recall that a Bridgeland stability condition $\sigma$ on a triangulated category $\mathcal{D}$ consists of a stability function $Z$ together with a slicing $\mathcal{P}$ of $\mathcal{D}$ satisfying certain axioms. Equivalently, one can define $\sigma$ by giving a bounded t-structure on $\mathcal{D}$ together with a stability function $Z$ on the corresponding heart $\mathcal{A}$ satisfying the Harder-Narasimhan property. Then $\sigma$ is usually written as the pair $(Z, \mathcal{P})$ or $(Z, \mathcal{A})$. Let $\Upsilon: \mathcal{D} \to \mathcal{D}'$ be an equivalence of triangulated categories, and let $W: K(\mathcal{D}) \to \mathbb{C}$ be a group homomorphism. Then $$ \left( \Upsilon \cdot W \right) ([E]) = W \left( [ \Upsilon^{-1}(E) ] \right) $$ defines an induced group morphism $ \Upsilon \cdot W$ in $\mathop{\rm Hom}\nolimits (K(\mathcal{D}'), \mathbb{C})$ by the equivalence $\Upsilon$. Moreover, this can be extended to a natural induced stability condition on $\mathcal{D}'$ by defining $\Upsilon \cdot (Z, \mathcal{A}) = (\Upsilon \cdot Z , \Upsilon(\mathcal{A}))$. Let $X, Y$ be two derived equivalent $g$-dimensional abelian varieties as in Section \ref{sec:cohomological-FMT}, which is given by the Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} : D^b(X) \to D^b(Y)$. Let $\ell_{\scriptscriptstyle{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$ be an ample class on $X$ and let $\ell_{\scriptscriptstyle{Y}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(Y)$ be the induced ample class on $Y$ as in Theorem \ref{prop:general-cohomo-FMT}. Let $u$ be a complex number. Consider the function $Z_{-D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} }: K(X) \to \mathbb{C}$ defined by $$ Z_{-D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} }(E)= -\int_{X} e^{-\left( -D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} \right)}\mathop{\rm ch}\nolimits(E) = \left\langle e^{ -D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} } , \ \mathop{\rm ch}\nolimits(E) \right\rangle_{\scriptscriptstyle X}. $$ For $E \in D^b(Y)$ we have \begin{align*} \left( \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \cdot Z_{-D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} }\right)(E) & = \left\langle e^{ -D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} } , \ \mathop{\rm ch}\nolimits\left(\left(\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}\right)^{-1}(E)\right) \right\rangle_{\scriptscriptstyle X} \\ & = \left\langle e^{ -D_{\scriptscriptstyle X}+ u\ell_{\scriptscriptstyle{X}} } , \ \left(\Phi_{\mathcal{E}}^{\operatorname{\scriptscriptstyle{H}}}\right)^{-1} \left(\mathop{\rm ch}\nolimits(E)\right) \right\rangle_{\scriptscriptstyle X} \\ & = \left\langle \Phi_{\mathcal{E}}^{\operatorname{\scriptscriptstyle{H}}} \left( e^{ -D_{\scriptscriptstyle X}+ u\ell_{\scriptscriptstyle{X}} }\right) , \ \mathop{\rm ch}\nolimits(E) \right\rangle_{\scriptscriptstyle Y}\\ & = \left\langle e^{D_{\scriptscriptstyle Y}} \left( e^{-D_{\scriptscriptstyle Y}} \Phi_{\mathcal{E}}^{\operatorname{\scriptscriptstyle{H}}} e^{ -D_{\scriptscriptstyle X}}\right) (e^{ u\ell_{\scriptscriptstyle{X}} }) , \ \mathop{\rm ch}\nolimits(E) \right\rangle_{\scriptscriptstyle Y} \\ & = (r \, \ell_{\scriptscriptstyle{X}} ^g u^g /g!) \ \left \langle e^{D_{\scriptscriptstyle Y} -\ell_{\scriptscriptstyle{Y}}/u} , \ \mathop{\rm ch}\nolimits(E) \right\rangle_{\scriptscriptstyle Y}, \end{align*} since by Theorem \ref{prop:general-cohomo-FMT}, $e^{-D_{\scriptscriptstyle Y}} \Phi_{\mathcal{E}}^{\operatorname{\scriptscriptstyle{H}}} e^{ -D_{\scriptscriptstyle X}}(e^{ u\ell_{\scriptscriptstyle{X}} }) = (r \, \ell_{\scriptscriptstyle{X}} ^g u^g /g!) \, e^{-\ell_{\scriptscriptstyle{Y}}/u} $. So we have the following relation: \begin{lem} \label{prop:FMTact-central-charge} We have $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \cdot Z_{-D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} } = \zeta\ Z_{D_{\scriptscriptstyle Y} - \ell_{\scriptscriptstyle{Y}}/u}$, for $\zeta = r \, \ell_{\scriptscriptstyle{X}} ^g u^g /g!$. \end{lem} Assume there exist a stability condition for any complexified ample class $-D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} $ with a heart $\mathcal{A}^{\scriptscriptstyle X}_{-D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} }$ and a slicing $\mathcal{P}^{\scriptscriptstyle X}_{-D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} }$ associated to the central charge function $Z_{-D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} }$. Furthermore, assume similar stability conditions exist on $Y$. From Lemma \ref{prop:FMTact-central-charge} for any $\phi \in \mathbb{R}$, \ $\zeta \, Z_{D_{\scriptscriptstyle Y} - \ell_{\scriptscriptstyle{Y}}/u}\left( \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}\left( \mathcal{P}^{\scriptscriptstyle X}_{-D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} }(\phi) \right)\right) \subset \mathbb{R}_{>0} e^{i\pi \phi}$; that is $$ Z_{D_{\scriptscriptstyle Y} - \ell_{\scriptscriptstyle{Y}}/u}\left( \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}\left( \mathcal{P}^{\scriptscriptstyle X}_{-D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} }(\phi) \right)\right) \subset \mathbb{R}_{>0} e^{i\left(\pi \phi - \arg(\zeta)\right)}. $$ So we would expect $$ \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}\left( \mathcal{P}^{\scriptscriptstyle X}_{-D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} } (\phi) \right) = \mathcal{P}^{\scriptscriptstyle Y}_{D_{\scriptscriptstyle Y}- \ell_{\scriptscriptstyle{Y}}/u} \left( \phi - \frac{\arg (\zeta)}{\pi}\right), $$ and so $$ \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}\left( \mathcal{P}^{\scriptscriptstyle X}_{-D_{\scriptscriptstyle X}+u\ell_{\scriptscriptstyle{X}} } ((0,\,1])\right) = \mathcal{P}^{\scriptscriptstyle Y}_{D_{\scriptscriptstyle Y}- \ell_{\scriptscriptstyle{Y}}/u} \left(\left( -\frac{\arg (\zeta)}{\pi}, \, -\frac{\arg (\zeta)}{\pi} +1\right]\right). $$ For $0\le \alpha <1$, $ \mathcal{P}^{\scriptscriptstyle Y}_{D_{\scriptscriptstyle Y}- \ell_{\scriptscriptstyle{Y}}/u} ((\alpha, \alpha+1]) =\left \langle \mathcal{P}^{\scriptscriptstyle Y}_{D_{\scriptscriptstyle Y}- \ell_{\scriptscriptstyle{Y}}/u} ((0, \alpha])\,[1] , \, \mathcal{P}^{\scriptscriptstyle Y}_{D_{\scriptscriptstyle Y}- \ell_{\scriptscriptstyle{Y}}/u} ((\alpha, 1]) \right \rangle$ is a tilt of $ \mathcal{A}^{\scriptscriptstyle Y}_{D_{\scriptscriptstyle Y}- \ell_{\scriptscriptstyle{Y}}/u} = \mathcal{P}^{\scriptscriptstyle Y}_{D_{\scriptscriptstyle Y}- \ell_{\scriptscriptstyle{Y}}/u} ((0,1])$ associated to a torsion theory coming from $Z_{D_{\scriptscriptstyle Y}- \ell_{\scriptscriptstyle{Y}}/u} $ stability. Therefore, one would expect $ \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} \left( \mathcal{A}^{\scriptscriptstyle X}_{-D_{\scriptscriptstyle X} + u\ell_{\scriptscriptstyle{X}} } \right)$ is a tilt of $\mathcal{A}^{\scriptscriptstyle Y}_{D_{\scriptscriptstyle Y}- \ell_{\scriptscriptstyle{Y}}/u}$ associated to a torsion theory coming from $Z_{D_{\scriptscriptstyle Y}- \ell_{\scriptscriptstyle{Y}}/u}$ stability, up to some shift. Moreover, for the Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}$ when $\zeta$ is real, that is, \begin{equation} \label{eqn:condition-for-abelian-equivalence} u^g \in \mathbb{R} \end{equation} we would expect that the Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}: D^b(X) \to D^b(Y)$ gives the equivalence of associated stability condition hearts. We conjecturally formulate this for any dimensional abelian varieties in Section \ref{sec:cojectural-any-abelian}. \subsection{Very weak stability conditions} \label{sec:very-weak-stability} Let us recall the general arguments of very weak stability conditions. We closely follow the notions as in \cite[Section 2]{PT}. Let $\mathcal{D}$ be a triangulated category, and $K(\mathcal{D})$ its Grothendieck group. \begin{defi} \label{defi:weak-stability} \rm A \textit{very weak stability condition} on $\mathcal{D}$ is a pair $(Z, \mathcal{A})$, where $\mathcal{A}$ is the heart of a bounded t-structure on $\mathcal{D}$, and $Z: K(\mathcal{D}) \to \mathbb{C}$ is a group homomorphism satisfying the following conditions: \begin{enumerate} \item For any $E \in \mathcal{A}$, we have $Z(E) \in \mathbb{H} \cup \mathbb{R}_{\le 0}$. Here $\mathbb{H}$ is the upper half plane $ \{ z \in \mathbb{C}: \operatorname{Im} Z >0\}$. \item The associated slope function $\mu: \mathcal{A} \to \mathbb{R} \cup \{+\infty\}$ is defined by $$ \mu (E) = \begin{cases} + \infty & \ \text{if } \operatorname{Im} Z(E)=0 \\ -\frac{\mathop{\rm Re}\nolimits Z(E)}{\operatorname{Im} Z(E)} & \ \text{otherwise}, \end{cases} $$ and it satisfies the Harder-Narasimhan property. \end{enumerate} \end{defi} We say that $E \in \mathcal{A}$ is $\mu$-(semi)stable if for any non-zero subobject $F \subset E$ in $\mathcal{A}$, we have the inequality: $\mu(F) <(\le) \, \mu(E/F)$. The Harder-Narasimhan filtration of an object $E \in \mathcal{A}$ is a chain of subobjects $0=E_0 \subset E_1 \subset \cdots \subset E_n=E$ in $\mathcal{A}$ such that each $F_i=E_i/E_{i-1}$ is $\mu$-semistable with $\mu(F_i)>\mu(F_{i+1})$. If such Harder-Narasimhan filtrations exists for all objects in $\mathcal{A}$, we say that $\mu$ satisfies the Harder-Narasimhan property. For a given a very weak stability condition $(Z, \mathcal{A})$, we define its slicing on $\mathcal{D}$ (see \cite[Definition~3.3]{BriStab}) \begin{align*} \{\mathcal{P}(\phi)\}_{\phi \in \mathbb{R}}, \ \mathcal{P}(\phi) \subset \mathcal{D} \end{align*} as in the case of Bridgeland stability conditions (see \cite[Proposition~5.3]{BriStab}). Namely, for $0<\phi \le 1$, the category $\mathcal{P}(\phi)$ is defined to be \begin{align*} \mathcal{P}(\phi) =\{ E \in \mathcal{A} : E \mbox{ is } \mu \mbox{-semistable with } \mu(E)=-1/\tan (\pi \phi)\} \cup \{0\}. \end{align*} Here we set $-1/\tan \pi =\infty$. The other subcategories are defined by setting \begin{align*} \mathcal{P}(\phi+1)=\mathcal{P}(\phi)[1]. \end{align*} For an interval $I \subset \mathbb{R}$, we define $\mathcal{P}(I)$ to be the smallest extension closed subcategory of $\mathcal{D}$ which contains $\mathcal{P}(\phi)$ for each $\phi \in I$. For $0 \le s \le 1$, the pair $(\mathcal{P}((s, 1]), \mathcal{P}((0, s]) )$ of subcategories of $\mathcal{A} = \mathcal{P}((0,1])$ is a torsion pair, and the corresponding tilt is $\mathcal{P}((s, s+1])$. Note that the category $\mathcal{P}(1)$ contains the following category \begin{align*} \mathcal{C} \cneq \{E \in \mathcal{A} : Z(E)=0\}. \end{align*} It is easy to check that $\mathcal{C}$ is closed under subobjects and quotients in $\mathcal{A}$. In particular, $\mathcal{C}$ is an abelian subcategory of $\mathcal{A}$. Moreover, the pair $(Z, \mathcal{A})$ gives a \textit{Bridgeland stability condition} on $\mathcal{D}$ if $\mathcal{C}=\{0\}$. \subsection{Conjectural stability conditions} \label{sec:cojectural-any-abelian} Let $X$ be a $g$-dimensional abelian variety with $g \ge 2$. Motivated by the constructions for smooth projective surfaces (see \cite{BriK3, AB}) together with some observations in Mathematical Physics, for $X$, it is expected that the function defined by \begin{equation*} Z_{B + i \omega}(-) = - \int_X e^{-B- i \omega} \mathop{\rm ch}\nolimits(-) \end{equation*} is a central charge function of some geometric stability condition on $D^b(X)$ (see \cite[Conjecture 2.1.2]{BMT}). Here $B + i \omega \in \mathop{\rm NS}\nolimits_{\mathbb{C}}(X)$ is a complexified ample class on $X$, that is by definition $B, \omega \in \mathop{\rm NS}\nolimits_{\mathbb{R}}(X)$ with $\omega$ an ample class. By using the notion of very weak stability, let us conjecturally construct a heart for this central charge function. For $0\le k\le g$, we define the $k$-truncated Chern character by $$ \mathop{\rm ch}\nolimits_{\le k}(E) = (\mathop{\rm ch}\nolimits_0(E), \mathop{\rm ch}\nolimits_1(E), \ldots, \mathop{\rm ch}\nolimits_k(E), 0, \ldots, 0), $$ and the function $Z^{(k)}_{B + i \omega} : K(X) \to \mathbb{C}$ by \begin{equation*} Z^{(k)}_{B + i \omega}(E) = - i^{n-k}\int_X e^{-B- i \omega} \mathop{\rm ch}\nolimits_{\le k}(E). \end{equation*} The usual slope stability on sheaves gives the very weak stability condition $(Z^{(1)}_{B + i \omega}, \mathop{\rm Coh}\nolimits(X))$. Moreover, we formulate the following: \begin{conj} \label{prop:conjecture-stab-cond} For each $1 \le k < g$, the pair $\sigma_k = (Z^{(k)}_{B + i \omega}, \mathcal{A}^{(k)}_{B + i \omega})$ gives a very weak stability condition on $D^b(X)$, where the hearts $\mathcal{A}^{(k)}_{B + i \omega}$, $1\le k \le g$ are defined by \begin{equation*} \left.\begin{aligned} & \mathcal{A}^{(1)}_{B + i \omega} = \mathop{\rm Coh}\nolimits(X) \\ &\mathcal{A}^{(k+1)}_{B + i \omega} = \mathcal{P}_{\sigma_k}((1/2,\,3/2]) \end{aligned} \ \right\}. \end{equation*} Moreover, the pair $\sigma_g = (Z_{B + i \omega}, \mathcal{A}^{(g)}_{B + i \omega})$ is a Bridgeland stability condition on $D^b(X)$. \end{conj} This is known to be true for abelian surfaces (\cite{BriK3, AB}) and abelian threefolds (\cite{MP1, MP2, PiyThesis, BMS}). \begin{rmk} \label{prop:remark-conj-stab} \rm Although we assumed $X$ to be an abelian variety, the above Conjecture \ref{prop:conjecture-stab-cond} makes sense for any smooth projective variety. In fact, $(Z^{(1)}_{\omega,B}, \mathcal{A}^{(1)}_{B + i \omega} = \mathop{\rm Coh}\nolimits(X))$ is a very weak stability condition for any variety and a Bridgeland stability condition for curves. By \cite{BriK3, AB}, $ (Z^{(2)}_{B + i \omega}, \mathcal{A}^{(2)}_{B + i \omega} )$ is a Bridgeland stability condition for surfaces. In \cite{BMT}, the authors proved that the pair $(Z^{(2)}_{B + i \omega}, \mathcal{A}^{(2)}_{B + i \omega} )$ is again a very weak stability condition for threefolds. Here the stability was called tilt slope stability. The usual Bogomolov-Gieseker inequality for $Z^{(1)}_{B + i \omega}$ stable sheaves plays a crucial role in these proofs. Clearly the same arguments work for any higher dimensional varieties. Therefore, we can always construct the category $\mathcal{A}^{(3)}_{B + i \omega}$ when $\dim X \ge 2$. In \cite{BMT}, the authors conjectured that this category is a heart of a Bridgeland stability condition with the central charge $Z_{B+i\omega}$. Moreover, they reduced it to prove Bogomolov-Gieseker type inequalities for $Z^{(2)}_{B + i \omega}$ stable objects $E \in \mathcal{A}^{(2)}_{B + i \omega}$ with $\mathop{\rm Re}\nolimits Z^{(2)}_{B + i \omega}(E) =0$, and the strong form of this inequality is \begin{equation*} \label{BGineq} \mathop{\rm ch}\nolimits^B_3 (E) \le \frac{\omega^2}{18} \mathop{\rm ch}\nolimits_1^B(E). \end{equation*} This is exactly Conjecture \ref{prop:BG-ineq-conjecture}. \end{rmk} Let $X, Y$ be two derived equivalent $g$-dimensional abelian varieties as in Section \ref{sec:cohomological-FMT}, which is given by the Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} : D^b(X) \to D^b(Y)$. Let $\ell_{\scriptscriptstyle{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$ be an ample class on $X$ and let $\ell_{\scriptscriptstyle{Y}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(Y)$ be the induced ample class on $Y$ as in Theorem \ref{prop:general-cohomo-FMT}. By considering the complexified classes associated to the condition \eqref{eqn:condition-for-abelian-equivalence}, we conjecture the following for all abelian varieties. \begin{conj} \label{prop:conjecture-equivalence-stab-hearts} The Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}: D^b(X) \to D^b(Y)$ gives the equivalence of stability condition hearts conjecturally constructed in Conjecture \ref{prop:conjecture-stab-cond}: $$ \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} [k] \left( \mathcal{A}_{\Omega} \right) = \mathcal{A}_{\Omega'}. $$ Here $\Omega = -D_{\scriptscriptstyle X} + \lambda e^{i k\pi/g }\, \ell_{\scriptscriptstyle{X}} $ and $\Omega' = D_{\scriptscriptstyle Y} - (1/\lambda) e^{-i k \pi/g} \, \ell_{\scriptscriptstyle{Y}}$ are complexified ample classes on $X$ and $Y$ respectively, for any $k \in \{1, 2, \ldots, (g-1)\}$ and any $ \lambda \in \mathbb{R}_{>0}$. \end{conj} \begin{note} \rm This conjecture is known to be true for abelian surfaces and we discuss it in Section \ref{sec:equivalence-stab-hearts-surface}. Moreover, the main aim of the next sections is to show this conjecture indeed holds on abelian threefolds; see Theorem \ref{prop:equivalence-hearts-abelian-threefolds}. \end{note} \section{Bogomolov-Gieseker Type Inequality on Abelian Threefolds} Let $X, Y$ be derived equivalent abelian threefolds and let $\ell_{\scriptscriptstyle{X}} , \ell_{\scriptscriptstyle{Y}}$ be ample classes on them respectively as in Theorem \ref{prop:general-cohomo-FMT}. \begin{nota} \rm Let $\Psi$ be the Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}$ from $X$ to $Y$ with kernel $\mathcal{E}$, and let $\widehat{\Psi} = \Phi_{\mathcal{E}^\vee}^{\scriptscriptstyle Y \to \scriptscriptstyle X}$. \end{nota} \begin{prop} \label{prop:imgainary-part-central-charge} We have the following: \begin{enumerate}[label=(\arabic*)] \item For $E \in D^b(X)$, $$ \operatorname{Im} Z_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} + \frac{ \lambda}{2} \ell_{\scriptscriptstyle{X}} , \frac{ \lambda}{2} }(E) =\frac{ \lambda \sqrt{3}}{4} \left( v_2^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} }(E)- \lambda v_1^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} }(E) \right), $$ and for $E \in D^b(Y)$, $$ \operatorname{Im} Z_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y} - \frac{1}{2 \lambda} \ell_{\scriptscriptstyle{Y}} , \frac{1}{2 \lambda}}(E) =\frac{ \sqrt{3}}{4\lambda} \left( v_2^{D_{\scriptscriptstyle Y}, \ell_{\scriptscriptstyle{Y}}}(E) +\frac{1}{\lambda} v_1^{-D_{\scriptscriptstyle Y}, \ell_{\scriptscriptstyle{Y}}}(E) \right). $$ \item For $E \in D^b(Y)$, $$ \operatorname{Im} Z_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} + \frac{ \lambda}{2} \ell_{\scriptscriptstyle{X}} , \frac{ \lambda}{2} }(\widehat{\Psi}[1](E)) =- \frac{3!\lambda^3}{ r \ell_{\scriptscriptstyle{Y}}^3} \operatorname{Im} Z_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y} - \frac{1}{2 \lambda} \ell_{\scriptscriptstyle{Y}} , \frac{1}{2 \lambda}}(E), $$ and for $E \in D^b(X)$ $$ \operatorname{Im} Z_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y} - \frac{1}{2 \lambda} \ell_{\scriptscriptstyle{Y}} , \frac{1}{2 \lambda}}(\Psi(E)) = - \frac{3!}{\lambda^3 r \ell_{\scriptscriptstyle{X}} ^3} \operatorname{Im} Z_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} + \frac{ \lambda}{2} \ell_{\scriptscriptstyle{X}} , \frac{ \lambda}{2} }(E). $$ \end{enumerate} \end{prop} \begin{proof} Let us prove (1). By definition \begin{align*} Z_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} + \frac{ \lambda}{2} \ell_{\scriptscriptstyle{X}} , \frac{ \lambda}{2} }(E) & = - \int_X e^{D_{\scriptscriptstyle X} - \frac{ \lambda}{2} \ell_{\scriptscriptstyle{X}} - i \frac{ \lambda \sqrt{3}}{2} \ell_{\scriptscriptstyle{X}} } \mathop{\rm ch}\nolimits(E) \\ & = - \int_X e^{-\lambda \ell_{\scriptscriptstyle{X}} \left( \frac{1}{2} + i \frac{\sqrt{3}}{2} \right) } \mathop{\rm ch}\nolimits^{-D_{\scriptscriptstyle X}}(E). \end{align*} Hence its imaginary part is \begin{align*} \operatorname{Im} Z_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} + \frac{ \lambda}{2} \ell_{\scriptscriptstyle{X}} , \frac{ \lambda}{2} }(E) & = \int_X (0 , \lambda \ell_{\scriptscriptstyle{X}} \sqrt{3}/2, -\lambda^2 \ell_{\scriptscriptstyle{X}} ^2 \sqrt{3}/4, 0) \cdot \mathop{\rm ch}\nolimits^{-D_{\scriptscriptstyle X}}(E) \\ & = \frac{ \lambda \sqrt{3}}{4} \left( v_2^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} }- \lambda v_1^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} } \right) \end{align*} as required. Similarly one can prove the other part. \\ Part (2) follows from Theorem \ref{prop:antidiagonal-rep-cohom-FMT} together with part (1). \end{proof} Most of the next sections are devoted to prove the following: \begin{thm} \label{prop:equivalence-hearts-abelian-threefolds} The Fourier-Mukai transforms $\Psi[1]$ and $\widehat{\Psi}[2]$ give the equivalences \begin{align*} &\Psi[1]\left(\mathcal{A}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} + \frac{ \lambda}{2} \ell_{\scriptscriptstyle{X}} , \frac{ \lambda}{2} } \right) \cong \mathcal{A}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y} - \frac{1}{2 \lambda} \ell_{\scriptscriptstyle{Y}} , \frac{1}{2 \lambda}}, \\ &\widehat{\Psi}[2] \left( \mathcal{A}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y} - \frac{1}{2 \lambda} \ell_{\scriptscriptstyle{Y}} , \frac{1}{2 \lambda}} \right) \cong \mathcal{A}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} + \frac{ \lambda}{2} \ell_{\scriptscriptstyle{X}} , \frac{ \lambda}{2} } \end{align*} of the abelian categories as in \eqref{eqn:double-tilt-heart} of Section \ref{sec:double-tilting-construction}. \end{thm} \begin{rmk} \label{prop:equivalence-stab-parameters} \rm One can see that the complexified ample classes \begin{equation*} \left.\begin{aligned} &\Omega = \left( -D_{\scriptscriptstyle X} + \lambda \ell_{\scriptscriptstyle{X}} /2 \right) + i \sqrt{3} \lambda \ell_{\scriptscriptstyle{X}} /2 \\ & \Omega' = \left( D_{\scriptscriptstyle Y} - \ell_{\scriptscriptstyle{Y}}/(2 \lambda) \right) + i \sqrt{3} \ell_{\scriptscriptstyle{Y}}/(2 \lambda) \end{aligned} \ \right\} \end{equation*} on $X$, $Y$ associated to the above theorem are exactly the solutions given for the $g=3$ case in \eqref{eqn:condition-for-abelian-equivalence}. Moreover, the shifts are compatible with the images of the skyscraper sheaves $\mathcal{O}_x$, $\mathcal{O}_y$ under the Fourier-Mukai transforms which are also minimal objects in the corresponding abelian categories, as discussed in Example~\ref{example:minimal-objects-semihomogeneous-bundles}. \end{rmk} \begin{thm} \label{prop:BG-ineq-abelian-threefolds} The Bogomolov-Gieseker type inequality in Conjecture \ref{prop:BG-ineq-conjecture} holds for $X$. \end{thm} \begin{proof} By deforming tilt stability parameters it is enough to consider a dense family of classes $B \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$, and $\alpha \ell_{\scriptscriptstyle{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$ for $\nu_{\ell_{\scriptscriptstyle{X}} , B, \alpha}$ stable objects of zero tilt slope. For any given $B \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X), \alpha \in \mathbb{Q}_{>0}$ and ample class $\ell_{\scriptscriptstyle{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$, one can find $-D_{\scriptscriptstyle X} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$ and $\lambda \in \mathbb{Q}_{>0}$ such that \begin{align*} B = -D_{\scriptscriptstyle X} + \lambda \ell_{\scriptscriptstyle{X}} /2, \ \text{ and} \ \alpha = \lambda /2 . \end{align*} Now one can find a non-trivial Fourier-Mukai transform $\Psi$ which gives the equivalence of abelian categories as in Theorem~\ref{prop:equivalence-hearts-abelian-threefolds}. From Lemma \ref{prop:reduction-BG-ineq-class}, it is enough to check that the Bogomolov-Gieseker type inequality is satisfied by each object in $\mathcal{M}_{\ell_{\scriptscriptstyle{X}} , B, \alpha }$. Moreover, the objects in $$ \mathcal{M}^o: = \{M: M \cong \mathcal{E}^*_{X \times\{y\}}[1] \text{ for some } y \in X \} \subset \mathcal{M}_{\ell_{\scriptscriptstyle{X}} , B, \alpha } $$ satisfy the Bogomolov-Gieseker type inequality (Example~\ref{example:minimal-objects-semihomogeneous-bundles} and Note~\ref{prop:BG-ineq-for-tilt-stable-trivial-discriminant}). So we only need to check the Bogomolov-Gieseker type inequality for objects in $\mathcal{M}_{\ell_{\scriptscriptstyle{X}} , B, \alpha } \setminus \mathcal{M}^o$. Let $E \in \mathcal{M}_{\ell_{\scriptscriptstyle{X}} , B, \alpha } \setminus \mathcal{M}^o$. Then $E[1] \in \mathcal{A}_{\ell_{\scriptscriptstyle{X}} , B, \alpha }$ is a minimal object and so by the equivalence in Theorem~\ref{prop:equivalence-hearts-abelian-threefolds}, $\Psi[1](E[1]) \in \mathcal{A}_{\ell_{\scriptscriptstyle{Y}}, B', \alpha'}$ is also a minimal object. Here \begin{align*} B' = D_{\scriptscriptstyle Y} - \ell_{\scriptscriptstyle{Y}}/(2 \lambda), \ \text{ and} \ \alpha' = {1}/(2 \lambda). \end{align*} So $\Psi[1](E[1]) \in \mathcal{F}'_{\ell_{\scriptscriptstyle{Y}}, B', \alpha'}[1]$ or $\Psi[1](E[1]) \in \mathcal{T}'_{\ell_{\scriptscriptstyle{Y}}, B', \alpha'}$. Since $\operatorname{Im} Z_{\ell_{\scriptscriptstyle{X}} , B, \alpha}(E) = 0$, from Proposition~\ref{prop:imgainary-part-central-charge}, $\operatorname{Im} Z_{\ell_{\scriptscriptstyle{Y}}, B', \alpha'}(\Psi[1](E[1]))=0$. If $\Psi[1](E[1]) \in \mathcal{T}'_{\ell_{\scriptscriptstyle{Y}}, B', \alpha'}$ then by Proposition~\ref{prop:first-tilt-behaves-like-sheaves-surfaces}, $\Psi[1](E[1]) \in \mathop{\rm Coh}\nolimits_{0}(Y)$ and so $E$ has a filtration of objects from $\mathcal{M}^o$; which is not possible. Hence, $\Psi[1](E)$ is a $\nu_{\ell_{\scriptscriptstyle{Y}}, B', \alpha'}$ stable object with zero tilt slope. Moreover, for any $y \in Y$ we have $$ \mathop{\rm Ext}\nolimits_{\scriptscriptstyle Y}^1(\mathcal{O}_y, \Psi[1](E)) \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\mathcal{O}_y, \Psi[2](E)) \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{E}^*_{X \times\{y\}}[1],E) = 0, $$ as $E \not \cong \mathcal{E}^*_{X \times\{y\}}[1]$. Hence, $\Psi[1](E) \in \mathcal{M}_{\ell_{\scriptscriptstyle{Y}}, B', \alpha' }$. Therefore, from Proposition \ref{prop:slope-bounds}-(4) $v_1^{B' - \alpha'\ell_{\scriptscriptstyle{Y}}}(\Psi[1](E)) \ge 0$. So we have \begin{equation*} v_1^{B' - \alpha'\ell_{\scriptscriptstyle{Y}}}(\Psi[1](E)) =v_1^{D_{\scriptscriptstyle Y} , \ell_{\scriptscriptstyle{Y}}}(\Psi[1](E)) + \frac{1}{ \lambda} v_0^{D_{\scriptscriptstyle Y} , \ell_{\scriptscriptstyle{Y}}}(\Psi[1](E)) \ge 0. \end{equation*} From Theorem \ref{prop:antidiagonal-rep-cohom-FMT}, we have \begin{equation*} v_2^{-D_{\scriptscriptstyle X} , \ell_{\scriptscriptstyle{X}} }(E) - \frac{1}{ \lambda} v_3^{-D_{\scriptscriptstyle X} , \ell_{\scriptscriptstyle{X}} }(E) \ge 0. \end{equation*} Since $\operatorname{Im} Z_{\ell_{\scriptscriptstyle{X}} , B, \alpha}(E) = 0$, from Proposition~\ref{prop:imgainary-part-central-charge}--(1) $$ v_2^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} }(E) = \lambda v_1^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} }(E). $$ Therefore \begin{equation*} \label{BGineq} v_3^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} }(E) - \lambda^2 v_1^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} }(E) \le 0. \end{equation*} This is the required Bogomolov-Gieseker type inequality for $E$. \end{proof} We can now deduce the main theorem of this paper: \begin{thm} \label{prop:Bridgeland-stab-abelian-threefolds} Conjecture \ref{prop:BMT-stab-cond-conjecture} holds for abelian threefolds. Therefore, we have the symmetries of Bridgeland stability conditions as in Theorem \ref{prop:intro-main-stab-symmetries}. \end{thm} \section{FM Transform of Sheaves on Abelian Varieties} \label{sec:FMT-abelian-varieties} Let $X, Y$ be two derived equivalent $g$-dimensional abelian varieties as in Section \ref{sec:cohomological-FMT}, which is given by the Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} : D^b(X) \to D^b(Y)$. Let $\ell_{\scriptscriptstyle{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$ be an ample class on $X$ and let $\ell_{\scriptscriptstyle{Y}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(Y)$ be the induced ample class on $Y$ as in Theorem \ref{prop:general-cohomo-FMT}. We study some properties of the slope stability of the images under the Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}$ in this section. The slope $\mu_{\ell_{\scriptscriptstyle{X}} , B}$ of $E \in \mathop{\rm Coh}\nolimits(X)$ is defined by $$ \mu_{\ell_{\scriptscriptstyle{X}} , B}(E) = \frac{\ell_{\scriptscriptstyle{X}} ^{g-1} \mathop{\rm ch}\nolimits_1^{B}(E)}{\ell_{\scriptscriptstyle{X}} ^{g} \mathop{\rm ch}\nolimits_0^{B}(E)}, $$ where $g = \dim X$, and we consider the notion of slope stability as similar to threefolds in Section \ref{sec:double-tilting-construction}. \begin{nota} \rm Let use write \begin{align*} &\Psi : = \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y} : D^b(X) \to D^b(Y), \\ &\widehat{\Psi} : =\Phi_{\mathcal{E}^\vee}^{\scriptscriptstyle Y \to \scriptscriptstyle X} : D^b(Y) \to D^b(X). \end{align*} Let $\mathcal{C}$ be a heart of a bounded t-structure on $D^b(Y)$. For a sequence of integers $i_1, i_2, \ldots, i_k$ we define $$ V^{\Psi}_{\mathcal{C}}(i_1, i_2, \ldots, i_k) = \{ E \in D^b(X) : H^{i}_{\mathcal{C}}(E) =0 \text{ for } i \ne \{ i_1, i_2, \ldots, i_k \}\}. $$ For $E \in D^b(X)$ we write $$ \Psi^{k}(E) = H^k_{\mathop{\rm Coh}\nolimits(Y)} (\Psi(E)) = \mathcal{H}^k(\Psi(E)). $$ We consider similar notions for $\widehat{\Psi}$. \end{nota} \begin{note} \label{prop:restriction-setup} \rm There exists a minimal $N \in \mathbb{Z}_{>0}$ such that $N \ell_{\scriptscriptstyle{X}} $ becomes integral. Let us fix a divisor $H_{\scriptscriptstyle X}$ from the linear system $| N \ell_{\scriptscriptstyle{X}} |$. The divisor $H_{\scriptscriptstyle X, x} \in | N \ell_{\scriptscriptstyle{X}} |$ is the translation of $H_{\scriptscriptstyle X}$ by $-x$: \begin{equation*} H_{\scriptscriptstyle X, x} := t_x^{-1}\left( H_{\scriptscriptstyle X} \right). \end{equation*} For positive integer $m$, let $m H_{\scriptscriptstyle X, x}$ be the divisor in the linear system $|m N \ell_{\scriptscriptstyle{X}} |$. So $mH_{\scriptscriptstyle X, x}$ is the zero locus of a section of the line bundle $\mathcal{O}_X(mH_{\scriptscriptstyle X, x})$, and we have the short exact sequence \begin{equation*} \label{ses_res} 0 \to \mathcal{O}_X(- mH_{\scriptscriptstyle X, x}) \to \mathcal{O}_X \to \mathcal{O}_{mH_{\scriptscriptstyle X}, x} \to 0 \end{equation*} in $\mathop{\rm Coh}\nolimits(X)$. Let $E \in \mathop{\rm Coh}\nolimits(X)$. Apply the functor $E \stackrel{\textbf{L}}{\otimes} (-)$ to the above short exact sequence and consider the long exact sequence of $\mathop{\rm Coh}\nolimits(X)$-cohomologies. Since $\mathcal{O}_X(- mH_{\scriptscriptstyle X, x}), \mathcal{O}_X$ are locally free, we have the long exact sequence $$ 0 \to \mathop{{\tT}or}\nolimits_1(E, \mathcal{O}_{mH_{\scriptscriptstyle X, x}}) \to E(- mH_{\scriptscriptstyle X, x}) \to E \to E \otimes\mathcal{O}_{mH_{\scriptscriptstyle X, x}} \to 0 $$ in $\mathop{\rm Coh}\nolimits(X)$ and $\mathop{{\tT}or}\nolimits_i(E, \mathcal{O}_{mH_{\scriptscriptstyle X, x}}) = 0$ for $i \ge 2$. Assume $E \in \mathop{\rm Coh}\nolimits_k(X)$ for some $k \in \{0,1,\cdots, g\}$. For generic $x \in X$, we have $\dim (\mathop{\rm Supp}\nolimits(E) \cap H_{\scriptscriptstyle X,x}) \le (k-1)$ and so $\mathop{{\tT}or}\nolimits_1(E, \mathcal{O}_{mH_{\scriptscriptstyle X,x}}) \in \mathop{\rm Coh}\nolimits_{\le k-1}(X)$. However, $ E(- mH_{\scriptscriptstyle X,x}) \in \mathop{\rm Coh}\nolimits_k(X)$, and so $\mathop{{\tT}or}\nolimits_1(E, \mathcal{O}_{mH_{\scriptscriptstyle X, x}}) =0$. Therefore we have the short exact sequence \begin{equation} \label{eqn:restriction-ses-higher-degree} 0 \to E(- mH_{\scriptscriptstyle X, x}) \to E \to E|_{m H_{\scriptscriptstyle X,x}} \to 0 \end{equation} in $\mathop{\rm Coh}\nolimits(X)$, where we write $$ E|_{mH_{\scriptscriptstyle X,x}} := E \otimes\mathcal{O}_{mH_{\scriptscriptstyle X,x}}. $$ Since any $E \in \mathop{\rm Coh}\nolimits(X)$ is an extension of sheaves from $\mathop{\rm Coh}\nolimits_k(X)$, $1 \le k \le g$, for generic $x \in X$ we have $\mathop{{\tT}or}\nolimits_i(E, \mathcal{O}_{mH_{\scriptscriptstyle X,x}}) = 0$ for $i \ge 1$ and so the short exact sequence \eqref{eqn:restriction-ses-higher-degree}. Moreover, when $0 \to E_1 \to E_2 \to E_3 \to 0$ is a short exact sequence in $\mathop{\rm Coh}\nolimits(X)$, for generic $x \in X$ we have $\mathop{{\tT}or}\nolimits_i(E_j, \mathcal{O}_{mH_{\scriptscriptstyle X,x}}) = 0$ for $i \ge 1$ and all $j$, and so \begin{equation*} 0 \to E_1|_{mH_{\scriptscriptstyle X,x}} \to E_2|_{mH_{\scriptscriptstyle X,x}} \to E_3|_{mH_{\scriptscriptstyle X,x}} \to 0 \end{equation*} is a short exact sequence in $\mathop{\rm Coh}\nolimits(X)$. \end{note} \begin{nota} \rm Similarly, we consider the divisors $H_{\scriptscriptstyle Y, y}, H_{\scriptscriptstyle \widehat{X} , \widehat{x}}, H_{\scriptscriptstyle \widehat{Y},\widehat{y}}$ on $Y, \widehat{X}, \widehat{Y}$ with respect to the induced ample classes $\ell_{\scriptscriptstyle{Y}}, \ell_{\scriptscriptstyle \widehat{X}} , \ell_{\scriptscriptstyle \widehat{Y}}$ as in Theorem \ref{prop:general-cohomo-FMT} under the Fourier-Mukai transforms. \end{nota} \begin{note} \rm From the definition of Fourier-Mukai transform, for any $E \in \mathop{\rm Coh}\nolimits(X)$ we have $$ \Psi^k(E) = 0, \text{ for all } k \not \in \{0, 1, \ldots, g\}. $$ \end{note} \begin{prop} \label{prop:V-0-for-higher-ample-twist} Let $E \in \mathop{\rm Coh}\nolimits(X)$. Then for large enough $m \in \mathbb{Z}_{>0}$, and any $x \in X$, we have the following: \begin{enumerate} \item $E(mH_{\scriptscriptstyle X, x}) \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(0)$. \item If $E \in \mathop{\rm Coh}\nolimits_k(X)$ such that $\operatorname{\mathcal{E}\textit{xt}}^i(E, \mathcal{O}_X) =0$ for $i \ne k$, then $E(-mH_{\scriptscriptstyle X, x}) \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(g-k)$. \end{enumerate} \end{prop} \begin{proof} (i) \ Let $y \in Y$ be any point. We have \begin{align*} \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi^g(E(mH_{\scriptscriptstyle X, x})), \mathcal{O}_y) & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}( \Psi(E(mH_{\scriptscriptstyle X, x}))[g], \Psi(\mathcal{E}^*_{X \times \{y\}}[g]) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(E(mH_{\scriptscriptstyle X, x}), \mathcal{E}^*_{X \times \{y\}} ) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(E(mH_{\scriptscriptstyle X, x}) \otimes \mathcal{E}_{X \times \{y\}}, \mathcal{O}_{X}) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{O}_{X}, E(mH_{\scriptscriptstyle X, x}) \otimes \mathcal{E}_{X \times \{y\}}[g] )\\ & \cong H^{g}(X, E(mH_{\scriptscriptstyle X, x}) \otimes \mathcal{E}_{X \times \{y\}}) =0, \end{align*} for large enough $m\in \mathbb{Z}_{>0}$. Hence, $\Psi^g(E(mH_{\scriptscriptstyle X, x})) =0$. So we have \begin{align*} \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi^{g-1}(E(mH_{\scriptscriptstyle X, x})), \mathcal{O}_y) & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}( \Psi(E(mH_{\scriptscriptstyle X, x}))[g-1], \Psi(\mathcal{E}^*_{X \times \{y\}}[g]) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(E(mH_{\scriptscriptstyle X, x}), \mathcal{E}^*_{X \times \{y\}}[1] ) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(E(mH_{\scriptscriptstyle X, x}) \otimes \mathcal{E}_{X \times \{y\}}, \mathcal{O}_{X}[1]) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{O}_{X}, E(mH_{\scriptscriptstyle X, x}) \otimes \mathcal{E}_{X \times \{y\}}[g-1] )\\ & \cong H^{g-1}(X, E(mH_{\scriptscriptstyle X, x}) \otimes \mathcal{E}_{X \times \{y\}}) =0, \end{align*} for large enough $m\in \mathbb{Z}_{>0}$. Hence, $\Psi^{g-1}(E(mH_{\scriptscriptstyle X, x})) =0$. In this way, one can show that for large enough $m \in \mathbb{Z}_{>0}$, $\Psi^{k}(E(mH_{\scriptscriptstyle X, x})) =0$ for all $k \ne 0$ as required. \\ \noindent (ii) \ From Lemma \ref{prop:dual-FMT} and part (i), we have \begin{align*} \Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}}(E(-mH_{\scriptscriptstyle X, x})) & \cong \left( \Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}^{\vee}} \left( \left(E(-mH_{\scriptscriptstyle X, x})\right)^\vee \right)[g] \right)^\vee \\ & \cong \left( \Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}^{\vee}} \left( \operatorname{\mathcal{E}\textit{xt}}^k(E, \mathcal{O}_X)(mH_{\scriptscriptstyle X, x}) \right)[g-k] \right)^\vee\\ & \cong \left( \mathcal{H}^0 \left( \Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}^{\vee}} \left( \operatorname{\mathcal{E}\textit{xt}}^k(E, \mathcal{O}_X)(mH_{\scriptscriptstyle X, x}) \right) \right) \right)^\vee [-(g-k)]. \end{align*} Therefore, we have the required claim. \end{proof} \begin{prop} \label{prop:FMT-cohomology-vanishing-torsion-sheaves} Let $E \in \mathop{\rm Coh}\nolimits_{\le k}(X)$. Then for $j \ge k+1$ $$ \Psi^{j}(E) = 0. $$ \end{prop} \begin{proof} Let $y \in Y$ be any point. If $k \le (g-1)$ then as similar to the proof of Proposition \ref{prop:V-0-for-higher-ample-twist}--(i), we have \begin{align*} \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi^g(E), \mathcal{O}_y) & \cong H^{g}(X, E \otimes \mathcal{E}_{X \times \{y\}}) =0, \end{align*} as $E \otimes \mathcal{E}_{X \times \{y\}} \in \mathop{\rm Coh}\nolimits_{\le k}(X)$; hence, $\Psi^g(E(mH_{\scriptscriptstyle X, x})) =0$. If $k \le (g-2)$, then similarly we have \begin{align*} \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi^{g-1}(E ), \mathcal{O}_y) & \cong H^{g-1}(X, E \otimes \mathcal{E}_{X \times \{y\}}) =0, \end{align*} as $E \otimes \mathcal{E}_{X \times \{y\}} \in \mathop{\rm Coh}\nolimits_{\le k}(X) \subset \mathop{\rm Coh}\nolimits_{\le (g-2)}(X)$; hence, $\Psi^{g-1}(E(mH_{\scriptscriptstyle X, x})) =0$. In this way, one can show that for $j \ge k+1$, $ \Psi^{j}(E) = 0$. \end{proof} \begin{prop} \label{prop:FMT-0-cohomology-reflexive} Let $E \in \mathop{\rm Coh}\nolimits(X)$. Then we have the following: \begin{enumerate} \item If $E \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(0)$ then $\Psi^0(E)$ is a locally free sheaf. \item $\Psi^0(E)$ is a reflexive sheaf on $Y$. \end{enumerate} \end{prop} \begin{proof}(i) \ \ Suppose $E \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(0)$. For any $y \in Y$, we have \begin{align*} \mathop{\rm Ext}\nolimits^{1}_{\scriptscriptstyle Y}(\Psi^0(E), \mathcal{O}_y) & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi^0(E), \mathcal{O}_y[1]) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\widehat{\Psi} \Psi^0(E)[g], \widehat{\Psi} (\mathcal{O}_y) [g+1]) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(E, \mathcal{E}^*_{X \times \{y\}}[g+1]). \end{align*} Hence $\mathop{\rm Sing}\nolimits \Psi^0(E) = \emptyset$, that is $\Psi^0(E)$ is a locally free sheaf (see Definition \ref{defi:singularity-set}). \\ \noindent (ii) \ For generic $x \in X$ and $m \in \mathbb{Z}_{>0} $, apply the Fourier-Mukai transform $\Psi$ to the $\mathcal{O}_X(mH_{\scriptscriptstyle X, x})$ twisted short exact sequence \eqref{eqn:restriction-ses-higher-degree}: $$ 0 \to E \to E(mH_{\scriptscriptstyle X, x}) \to E(mH_{\scriptscriptstyle X, x})|_{m H_{\scriptscriptstyle X, x}} \to 0. $$ By considering long exact sequence of $\mathop{\rm Coh}\nolimits(Y)$ cohomologies, we get the following short exact sequence \begin{equation*} 0 \to \Psi^0(E) \to \Psi^0(E(mH_{\scriptscriptstyle X, x})) \to Q \to 0 \end{equation*} for some subsheaf $Q$ of $\Psi^0(E(mH_{\scriptscriptstyle X, x} )|_{mH_{\scriptscriptstyle X, x}} )$. From Proposition \ref{prop:V-0-for-higher-ample-twist}--(i), for large enough $m \in \mathbb{Z}_{>0}$, $E(m H_{\scriptscriptstyle X, x}) \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(0)$. From part (i) $\Psi^0(E(mH_{\scriptscriptstyle X, x}))$ is locally free. Hence $\Psi^0(E)$ is a torsion free sheaf. Similarly, one can show that $\Psi^0(E(m H_{\scriptscriptstyle X, x})|_{m H_{\scriptscriptstyle X, x}} )$ is torsion free. Therefore, from Lemma \ref{prop:reflexive-sheaf-results}--(2), $\Psi^0(E)$ is a reflexive sheaf on $Y$. \end{proof} \begin{prop} \label{prop:FMT-0-g-cohomo-vanishing} Let $E \in \mathop{\rm Coh}\nolimits(X)$. Then we have the following: \begin{enumerate} \item If $E \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}((0, +\infty])$ then $\Psi^g(E) = 0$. \item If $E \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}(0)$ then $\Psi^g(E) = \mathop{\rm Coh}\nolimits_0(Y)$. \item If $E \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}((-\infty,0])$ then $\Psi^0(E) = 0$. \end{enumerate} \end{prop} \begin{proof} (i) \ Let $E \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}((0, +\infty])$. Then for any $y \in Y$, we have \begin{align*} \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi^g(E), \mathcal{O}_y) & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi(E)[g], \mathcal{O}_y) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi(E)[g], \Psi(\mathcal{E}^*_{X \times\{y\}})[g]) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(E, \mathcal{E}_{X \times\{y\}}^*) = 0, \end{align*} as $\mathcal{E}_{X \times\{y\}}^* \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}(0)$. Therefore, $\Psi^g(E) = 0$ as required. \\ \noindent (ii) \ Suppose $E \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}(0)$ is slope stable. If $\Psi^g(E) \ne 0$ then there exists $y \in Y$ such that $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi^g(E), \mathcal{O}_{y }) \ne 0$. Hence, as in part (i) there exists a non-trivial map $E \to \mathcal{E}_{X \times\{y\}}^*$. Since $E$ is slope stable, this map is an injection with a quotient in $\mathop{\rm Coh}\nolimits_{\le (g-2)}(X)$. By applying the Fourier-Mukai transform $\Psi$ to this short exact sequence of sheaves on $X$, and considering the long exact sequence of $\mathop{\rm Coh}\nolimits(Y)$ cohomologies, we obtain that $\Psi^g(E) \cong \mathcal{O}_{y }$. This completes the proof. \\ \noindent (iii) \ Let $E \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}((-\infty,0])$. We can assume $E$ is slope stable using the Harder-Narasimhan and Jordan-H\"older filtrations. Since $$ \operatorname{\mathcal{E}\textit{xt}}^{i}(\Psi^j(E),\mathcal{O}_{\scriptscriptstyle Y}) \in \mathop{\rm Coh}\nolimits_{\le (g-i)}(Y), $$ for generic $y \in Y$ we have \begin{align*} & \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\tau_{\ge 1} \Psi(E), \mathcal{O}_y) =0, \\ & \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\tau_{\ge 1} \Psi(E) [-1], \mathcal{O}_y). \end{align*} Hence, by applying the functor $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(-, \mathcal{O}_y)$ to the distinguished triangle $$ \tau_{\ge 1} \Psi(E)[-1] \to \Psi^0(E) \to \Psi(E) \to \tau_{\ge 1} \Psi(E) $$ for generic $y \in Y$, we have \begin{align*} \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi^0(E), \mathcal{O}_y) & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi(E), \mathcal{O}_y) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi(E), \Psi(\mathcal{E}_{X \times\{y\}}^* )[g]) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(E, \mathcal{E}_{X \times\{y\}}^* [g]) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X} (\mathcal{E}_{X \times\{y\}}^* , E)^\vee. \end{align*} If $\mu_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}} (E) <0$ then $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle X} (\mathcal{E}_{X \times\{y\}}^* , E) = 0$. Otherwise, $\mu_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}} (E) =0$ and since $E$ is assumed to be slope stable, any non-trivial map in $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle X} (\mathcal{E}_{X \times\{y\}}^* , E)$ gives rise to an isomorphism of sheaves; and in this case we have $\Psi^0(E)=0$. Therefore, for generic $y \in Y$, $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi^0(E), \mathcal{O}_y) = 0$. By Proposition~\ref{prop:FMT-0-cohomology-reflexive}, $\Psi^0(E)$ is reflexive, and so we have $\Psi^0(E) = 0$. \end{proof} \begin{prop} \label{prop:sheaf-cohomo-vanishing-FMT-vanishing} We have the following for $E \in \mathop{\rm Coh}\nolimits(X)$: \begin{enumerate} \item If $H^g(X, E \otimes \mathcal{E}_{X \times\{y\}}) =0 $ for any $y \in Y$, then $\Psi^g(E) = 0$. \item If $H^0(X, E \otimes \mathcal{E}_{X \times\{y\}}) =0 $ for any $y \in Y$, then $\Psi^0(E) = 0$. \end{enumerate} \end{prop} \begin{proof} (i) \ As similar to the proof Proposition \ref{prop:V-0-for-higher-ample-twist}--(i), for any $y \in Y$ \begin{align*} \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi^g(E), \mathcal{O}_y) & \cong H^g(X, E \otimes \mathcal{E}_{X \times\{y\}}) =0. \end{align*} Therefore, $\Psi^g(E) = 0$ as required. \\ \noindent (ii) \ Suppose $H^0(X, E \otimes \mathcal{E}_{X \times\{y\}}) =0 $ for any $y \in Y$. By similar arguments in the proof of (iii) of Proposition \ref{prop:FMT-0-g-cohomo-vanishing}, for generic $y \in Y$, we have \begin{align*} \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Psi^0(E), \mathcal{O}_y) & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X} (\mathcal{E}_{X \times\{y\}}^* , E)^\vee \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X} (\mathcal{O}_X , E \otimes \mathcal{E}_{X \times\{y\}})^\vee \\ & \cong H^0(X, E \otimes \mathcal{E}_{X \times\{y\}})^\vee = 0. \end{align*} Since $\Psi^0(E)$ is reflexive (Proposition \ref{prop:FMT-0-cohomology-reflexive}--(ii)), we have $\Psi^0(E) = 0$ as required. \end{proof} \begin{prop} \label{prop:slope-bound-FMT-0-g} Let $E \in \mathop{\rm Coh}\nolimits(X)$. Then we have the following: \begin{enumerate} \item $\Psi^0(E) \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((-\infty,0])$. \item If $E \in \mathop{\rm Coh}\nolimits_{\ge 1}(X)$ then $\Psi^0(E) \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((-\infty,0))$. \end{enumerate} \end{prop} \begin{proof} For generic $x \in X$ and large enough $m \in \mathbb{Z}_{>0} $, apply the Fourier-Mukai transform $\Psi$ to the $\mathcal{O}_X(mH_{\scriptscriptstyle X, x})$ twisted short exact sequence \eqref{eqn:restriction-ses-higher-degree}: $$ 0 \to E \to E(mH_{\scriptscriptstyle X, x}) \to E(mH_{\scriptscriptstyle X, x})|_{m H_{\scriptscriptstyle X, x}} \to 0. $$ By considering the long exact sequence of $\mathop{\rm Coh}\nolimits(Y)$ cohomologies we get $$ \Psi^0(E) \hookrightarrow \Psi^0(E(mH_{\scriptscriptstyle X, x})). $$ Therefore, it is enough to show the corresponding claims for $E(mH_{\scriptscriptstyle X, x})$ with large enough $m \in \mathbb{Z}_{>0}$ and generic $x \in X$. For such $m$, $E(mH_{\scriptscriptstyle X, x}) \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(0)$. \\ \noindent (i) \ For any $ T \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((0, +\infty])$, we have \begin{align*} \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(T, \Psi^0(E(m H_{\scriptscriptstyle X, x}))) & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\widehat{\Psi}(T), \widehat{\Psi} \Psi^0(E(m H_{\scriptscriptstyle X, x}))) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\widehat{\Psi}(T),E(m H_{\scriptscriptstyle X, x})[-g]) =0, \end{align*} as from Proposition \ref{prop:FMT-0-g-cohomo-vanishing}--(i), $\widehat{\Psi}^g(T) =0$. Hence, $\Psi^0(E(mH_{\scriptscriptstyle X, x})) \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((-\infty,0])$ as required. \\ \noindent (ii) \ Let us assume $E \in \mathop{\rm Coh}\nolimits_{\ge 1}(X)$. For any $ T \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}([0, +\infty])$, we have \begin{align*} \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(T, \Psi^0(E(m H_{\scriptscriptstyle X, x}))) & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\widehat{\Psi}(T), \widehat{\Psi} \Psi^0(E(m H_{\scriptscriptstyle X, x}))) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\widehat{\Psi}(T),E(m H_{\scriptscriptstyle X, x})[-g]) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\widehat{\Psi}^g(T),E(m H_{\scriptscriptstyle X, x})) =0, \end{align*} as from Proposition \ref{prop:FMT-0-g-cohomo-vanishing}--(ii), $\widehat{\Psi}^g(T) \in \mathop{\rm Coh}\nolimits_0(X)$. Hence, $\Psi^0(E(m H_{\scriptscriptstyle X, x})) \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((-\infty,0))$ as required. \\ \end{proof} \begin{prop} \label{prop:slope-bound-torsion-sheaf} Let $1 \le k \le g$. If $E \in \mathop{\rm Coh}\nolimits_{\le k}(X)$, then $\Psi^k(E) \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((0, +\infty])$. \end{prop} \begin{proof} Consider the torsion sequence of $E \in \mathop{\rm Coh}\nolimits_{\le k}(X)$; so $E$ fits into the short exact sequence $$ 0 \to E_{\le (k-1)} \to E \to E_k \to 0, $$ for some $E_{\le (k-1)} \in \mathop{\rm Coh}\nolimits_{\le (k-1)}(X)$ and $E_k \in \mathop{\rm Coh}\nolimits_k(X)$. By applying the Fourier-Mukai transform $\Psi$ and considering the long exact sequence of $\mathop{\rm Coh}\nolimits(Y)$ cohomologies, we obtain $$ \Psi^k (E) = \Psi^k(E_k). $$ Hence, we can assume $E \in \mathop{\rm Coh}\nolimits_k(X)$. For generic $x \in X$ and large enough $m \in \mathbb{Z}_{>0} $, apply the Fourier-Mukai transform $\Psi$ to the $\mathcal{O}_X(mH_{\scriptscriptstyle X, x})$ twisted short exact sequence \eqref{eqn:restriction-ses-higher-degree}: $$ 0 \to E \to E(mH_{\scriptscriptstyle X, x}) \to E(mH_{\scriptscriptstyle X, x})|_{m H_{\scriptscriptstyle X, x}} \to 0. $$ Here $E(m H_{\scriptscriptstyle X, x})|_{m H_x} \in \mathop{\rm Coh}\nolimits_{(k-1)}(X)$. By considering long exact sequence of $\mathop{\rm Coh}\nolimits(Y)$ cohomologies, we get \begin{equation*} \Psi^k(E) \cong \Psi^{k-1}\left( E(m H_{\scriptscriptstyle X, x})|_{m H_{\scriptscriptstyle X, x}}\right), \end{equation*} as for large enough $m \in \mathbb{Z}_{>0}$, $E(m H_{\scriptscriptstyle X, x}) \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(0)$. Therefore, inductively we only need to consider the case $k=1$. Suppose $E \in \mathop{\rm Coh}\nolimits_1(X)$. For generic $x \in X$ and large enough $m \in \mathbb{Z}_{>0}$, apply the Fourier-Mukai transform $\Psi$ to the short exact sequence \eqref{eqn:restriction-ses-higher-degree}: $$ 0 \to E(-m H_{\scriptscriptstyle X, x}) \to E \to E|_{m H_{\scriptscriptstyle X, x}} \to 0, $$ where $E|_{m H_{\scriptscriptstyle X, x}} \in \mathop{\rm Coh}\nolimits_{0}(X)$. By considering long exact sequence of $\mathop{\rm Coh}\nolimits(Y)$ cohomologies, we get $E(-m H_{\scriptscriptstyle X, x}) \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(1)$ and also \begin{equation} \label{eqn:quotient-torsion-1} \Psi^1( E(-m H_{\scriptscriptstyle X, x})) \twoheadrightarrow \Psi^1( E). \end{equation} From Lemma \ref{prop:dual-FMT}, $\left(\Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}}( E(-m H_{\scriptscriptstyle X, x}))\right)^\vee \cong \Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}^{\vee}} \left( \left( E(-m H_{\scriptscriptstyle X, x})\right)^\vee \right)[g]$, and from Proposition \ref{prop:slope-bound-FMT-0-g}--(ii), $$ \mathcal{H}^0 (\Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}^{\vee}}(\operatorname{\mathcal{E}\textit{xt}}^1(E, \mathcal{O}_X) (m H_{\scriptscriptstyle X, x})) \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, -D_{\scriptscriptstyle Y}}^{\mu}((-\infty ,0)), $$ so we deduce $$ \Psi^1( E(-m H_{\scriptscriptstyle X, x})) \cong \left( \mathcal{H}^0 (\Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}^{\vee}}(\operatorname{\mathcal{E}\textit{xt}}^1(E, \mathcal{O}_X) (m H_{\scriptscriptstyle X, x})) \right)^* \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((0, +\infty)). $$ From \eqref{eqn:quotient-torsion-1} we have $\Psi^1( E) \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((0, +\infty])$. This completes the proof. \end{proof} \section{Equivalences of Stability Condition Hearts on Abelian Surfaces} \label{sec:equivalence-stab-hearts-surface} In this section we show that the expectation in the end of Section \ref{sec:action-FMT-central-charge}, more precisely Conjecture \ref{prop:conjecture-equivalence-stab-hearts} holds on abelian surfaces. This result is already known due to Huybrechts and Yoshioka \cite{HuyK3Equivalence, YoshiokaFMT}. However, as for completeness and as a warm-up to study the abelian threefold case in the next sections we present the complete proof and we closely follow that of Yoshioka. Let $X, Y$ be derived equivalent abelian surfaces and let $\ell_{\scriptscriptstyle{X}} , \ell_{\scriptscriptstyle{Y}}$ be ample classes on them respectively as in Theorem \ref{prop:general-cohomo-FMT}. Let $\Psi$ be the Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}$ from $X$ to $Y$ with kernel $\mathcal{E}$, and let $\widehat{\Psi} = \Phi_{\mathcal{E}^\vee}^{\scriptscriptstyle Y \to \scriptscriptstyle X}$. We have \begin{equation*} \label{imagesurface} \Psi(\mathop{\rm Coh}\nolimits(X)) \subset \langle \mathop{\rm Coh}\nolimits(Y), \mathop{\rm Coh}\nolimits(Y)[-1], \mathop{\rm Coh}\nolimits(Y)[-2] \rangle, \end{equation*} and similar relation for $\widehat{\Psi}$. Since $\widehat{\Psi} \circ \Psi \cong [-2]$ and $\Psi \circ \widehat{\Psi} \cong [-2]$, we have the following convergences of the spectral sequences. \begin{equation} \label{eqn:mukai-specseq} \left.\begin{aligned} & E_2^{p,q} = \widehat{\Psi}^{p} \Psi^q(E) \Longrightarrow \mathcal{H}^{p+q-2}(E) \\ & E_2^{p,q} = \Psi^{p}\widehat{\Psi}^q(E) \Longrightarrow \mathcal{H}^{p+q-2}(E) \end{aligned} \ \right\}. \end{equation} Here and elsewhere we write $\widehat{\Psi}^{p}(E) = \mathcal{H}^p (\widehat{\Psi}(E))$ and $\Psi^{q}(E) = \mathcal{H}^q (\Psi(E))$. Immediately from the convergence of this spectral sequence for $E \in \mathop{\rm Coh}\nolimits(X)$, we deduce that \begin{itemize} \item $ \Psi^0(E) \in V^{\widehat{\Psi}}_{\mathop{\rm Coh}\nolimits(X)}(2) $, and $\Psi^2(E) \in V^{\widehat{\Psi} }_{ \mathop{\rm Coh}\nolimits(X)}(0)$; \item there is an injection $\widehat{\Psi}^0\Psi^1(E) \hookrightarrow \widehat{\Psi}^2\Psi^0(E)$, and a surjection $ \widehat{\Psi}^0\Psi^2(E) \twoheadrightarrow \widehat{\Psi}^2\Psi^1(E)$. \end{itemize} Let us recall the notation in Conjecture \ref{prop:conjecture-equivalence-stab-hearts} for our derived equivalent abelian surfaces. Consider the complexified ample classes $\Omega = -D_{\scriptscriptstyle X} + i \lambda \ell_{\scriptscriptstyle{X}} $, $\Omega' = D_{\scriptscriptstyle Y} + i (1 /\lambda) \ell_{\scriptscriptstyle{Y}} $ on $X$, $Y$ respectively. The function defined by $Z^{(1)}_{\Omega} = -i \int_{X} e^{- \Omega } (\mathop{\rm ch}\nolimits_0, \mathop{\rm ch}\nolimits_1, 0)$ together with the standard heart $\mathop{\rm Coh}\nolimits(X)$ defines a very weak stability condition $\sigma_1$ on $D^b(X)$. Define the subcategories $$ \mathcal{F}^{\scriptscriptstyle X} = \mathcal{P}^{\scriptscriptstyle X}_{\sigma_1}((0,\, 1/2]), \ \ \mathcal{T}^{\scriptscriptstyle X}= \mathcal{P}^{\scriptscriptstyle X}_{\sigma_1}((1/2 ,\, 1]) $$ of $\mathop{\rm Coh}\nolimits(X)$ in terms of the associated slicing $\mathcal{P}^{\scriptscriptstyle X}_{\sigma_1}$. In other words, $$ \mathcal{F}^{\scriptscriptstyle X} = \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}([0, +\infty)), \ \ \mathcal{T}^{\scriptscriptstyle X}= \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}((0, +\infty)). $$ Then the Bridgeland stability condition heart in Conjecture \ref{prop:conjecture-equivalence-stab-hearts} is $$ \mathcal{B}^{\scriptscriptstyle X} = \langle \mathcal{F}^{\scriptscriptstyle X}[1] , \mathcal{T}^{\scriptscriptstyle X} \rangle = \mathcal{P}^{\scriptscriptstyle X}_{\sigma_1}((1/2 ,\, 3/2]). $$ We consider similar subcategories associated to $\Omega'$ on $Y$. We need the following results about cohomology sheaves of the images under the Fourier-Mukai transforms, and closely follow the arguments in the author's PhD thesis \cite[Section 6]{PiyThesis} and which is also adopted from \cite{YoshiokaFMT}. \begin{prop} \label{prop:slope-bound-FMT-surface} Let $E \in \mathop{\rm Coh}\nolimits(X)$. Then we have the following: \begin{enumerate}[label=(\arabic*)] \item (i) If $E \in \mathcal{T}^{\scriptscriptstyle X}$ then $\Psi^2(E) = 0$, and (ii) if $E \in \mathcal{F}^{\scriptscriptstyle X}$ then $\Psi^0(E) = 0$. \item (i) $\Psi^2(E) \in \mathcal{T}^{\scriptscriptstyle Y}$, and (ii) $\Psi^0(E) \in \mathcal{F}^{\scriptscriptstyle Y}$. \item (i) if $E \in \mathcal{T}^{\scriptscriptstyle X}$ then $\Psi^1(E) \in \mathcal{T}^{\scriptscriptstyle Y}$, and (ii) if $E \in \mathcal{F}^{\scriptscriptstyle X}$ then $\Psi^1(E) \in \mathcal{F}^{\scriptscriptstyle Y}$. \end{enumerate} \end{prop} \begin{proof} \noindent (1) and (2) follows from Propositions \ref{prop:FMT-0-g-cohomo-vanishing}-(i), \ref{prop:FMT-0-g-cohomo-vanishing}-(iii), \ref{prop:slope-bound-FMT-0-g}--(i), and \ref{prop:slope-bound-torsion-sheaf}. \\ \noindent Let us prove part (3)--(i). Let $E \in \mathcal{T}^{\scriptscriptstyle X}$. By the Harder-Narasimhan filtration of $\Psi^1(E)$ there exists $T \in \mathcal{T}^{\scriptscriptstyle Y}$ and $F \in \mathcal{F}^{\scriptscriptstyle Y}$ such that $0 \to T \to \Psi^1(E) \to F \to 0$ is a short exact sequence in $\mathop{\rm Coh}\nolimits(Y)$. Assume $F \neq 0$ for a contradiction. Now apply the Fourier-Mukai transform $\widehat{\Psi}$ to this short exact sequence and then consider the long exact sequence of $\mathop{\rm Coh}\nolimits(X)$ cohomologies. By (1)(i) of this proposition, $\Psi^2(E) = 0$. So from the convergence of the Spectral Sequence \ref{Spec-Seq-Mukai} for $E$, $\widehat{\Psi}^2 \Psi^1(E) = 0$ and $\widehat{\Psi}^1 \Psi^1(E)$ is quotient of $E \in \mathcal{T}^{\scriptscriptstyle X}$. Hence, we have $\widehat{\Psi}^1 \Psi^1(E) \in \mathcal{T}^{\scriptscriptstyle X}$. By (1)(i) of this proposition, $\widehat{\Psi}^2(T) = 0$ and so there is a surjection $\widehat{\Psi}^1 \Psi^1(E) \twoheadrightarrow \widehat{\Psi}^1(F)$. Therefore, $$ \ell_{\scriptscriptstyle{X}} \cdot \mathop{\rm ch}\nolimits_1^{-D_{\scriptscriptstyle X}}(\widehat{\Psi}^1(F)) \ge 0, $$ where the equality holds when $\widehat{\Psi}^1(F) \in \mathop{\rm Coh}\nolimits_0(X)$. Also $\widehat{\Psi} (F) \in \mathop{\rm Coh}\nolimits(X)[-1]$, and so by Theorem \ref{prop:antidiagonal-rep-cohom-FMT}, $$ \ell_{\scriptscriptstyle{X}} \cdot \mathop{\rm ch}\nolimits_1^{-D_{\scriptscriptstyle X}}(\widehat{\Psi}^1(F)) \le 0. $$ Therefore, $\ell_{\scriptscriptstyle{X}} \cdot \mathop{\rm ch}\nolimits_1^{-D_{\scriptscriptstyle X}}(\widehat{\Psi}^1(F)) = 0$, and so $\widehat{\Psi}^1(F) \in \mathop{\rm Coh}\nolimits_0(X)$. But this is not possible as $\Psi \widehat{\Psi}^1(F) \in \mathop{\rm Coh}\nolimits(Y)[-1]$. This is the required contradiction to complete the proof. Similarly one can prove (3)(ii). \end{proof} In other words, the results of the above proposition say \begin{equation*} \left.\begin{aligned} & \Psi (\mathcal{T}^{\scriptscriptstyle X}) \subset \langle \mathcal{F}^{\scriptscriptstyle Y} , \mathcal{T}^{\scriptscriptstyle Y}[-1] \rangle \\ & \Psi (\mathcal{F}^{\scriptscriptstyle X}) \subset \langle \mathcal{F}^{\scriptscriptstyle Y}[-1] , \mathcal{T}^{\scriptscriptstyle Y}[-2] \rangle \end{aligned} \ \right\}. \end{equation*} Similar results hold for $\widehat{\Psi}$. Since $\mathcal{B}^{\scriptscriptstyle X} = \langle \mathcal{F}^{\scriptscriptstyle X}[1] , \mathcal{T}^{\scriptscriptstyle X} \rangle$ and $\mathcal{B}^{\scriptscriptstyle Y} = \langle \mathcal{F}^{\scriptscriptstyle Y}[1] , \mathcal{T}^{\scriptscriptstyle Y} \rangle$, we have $\Psi [1] (\mathcal{B}^{\scriptscriptstyle X} ) \subset \mathcal{B}^{\scriptscriptstyle Y}$ and $\widehat{\Psi} [1] (\mathcal{B}^{\scriptscriptstyle Y} ) \subset \mathcal{B}^{\scriptscriptstyle X}$. Hence, \begin{equation} \label{eqn:equivalence-surfaces-hearts} \Psi [1] (\mathcal{B}^{\scriptscriptstyle X} ) \cong \mathcal{B}^{\scriptscriptstyle Y}. \end{equation} as expected in Conjecture \ref{prop:conjecture-equivalence-stab-hearts} for $g=2$ case. \begin{note} \label{prop:BG-ineq-surface-FMT} \rm We can use the equivalence \eqref{eqn:equivalence-surfaces-hearts} of the tilted hearts to prove the usual Bogomolov-Gieseker type inequality for slope stable torsion free sheaves on an abelian surface. Let $E$ be a slope stable torsion free sheaf on an abelian surface $X$ with respect to an ample class $\ell_{\scriptscriptstyle{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$. Then it fits into the short exact sequence $0 \to E \to E^{**} \to T \to 0$ for some torsion sheaf $T \in \mathop{\rm Coh}\nolimits_0(X)$. Let $$ -D_{\scriptscriptstyle X} = \frac{c_1(E)}{\mathop{\rm rk}\nolimits(E)}. $$ Then consider the corresponding Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}: D^b(X) \to D^b(Y)$ as in Section \ref{sec:cohomological-FMT}. Similar to Lemma \ref{prop:minimal-objects-threefold-hearts}, for surfaces (see \cite[Theorem 0.2]{HuyK3Equivalence}), the object $$ E^{**}[1] \in \mathcal{B}^{\scriptscriptstyle X} $$ is a minimal object. Therefore under equivalence \eqref{eqn:equivalence-surfaces-hearts}, the object $$ F := \Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}[1](E^{**}[1]) \in \mathcal{B}^{\scriptscriptstyle Y} $$ is also a minimal object in $\mathcal{B}^{\scriptscriptstyle Y}$. Since $F$ fits in to the short exact sequence $$ 0 \to \mathcal{H}^{-1}(F)[1] \to F \to \mathcal{H}^0(F) \to 0, $$ we have either $\mathcal{H}^{-1}(F) =0$ or $\mathcal{H}^0(F)=0$. In the first case one can show that $\mathcal{H}^0(F) \cong \mathcal{O}_y$ some $y\in Y$, and so $E^{**} \cong \mathcal{E}^{*}_{X \times \{y\}}$; which satisfies $\mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle X}}(E^{**}) = 0$. In the remaining case, since $$ -\mathop{\rm ch}\nolimits_0(F) = \mathop{\rm rk}\nolimits (\mathcal{H}^{-1}(F) ) >0, $$ from Theorem \ref{prop:antidiagonal-rep-cohom-FMT} we get $\mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle X}}(E^{**}[1]) >0$. So we have $$ \mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle X}}(E) \le 0 $$ as required in the usual Bogomolov-Gieseker inequality for $E$. \end{note} \section{FM Transform of Sheaves on Abelian Threefolds} \label{sec:FMT-sheaves-abelian-threefolds} In this section we further study the slope stability of sheaves under the Fourier-Mukai transforms on abelian threefolds continuing Section \ref{sec:FMT-abelian-varieties}. Let $X, Y$ be derived equivalent abelian threefolds and let $\ell_{\scriptscriptstyle{X}} , \ell_{\scriptscriptstyle{Y}}$ be ample classes on them respectively as in Theorem \ref{prop:general-cohomo-FMT}. Let $\Psi$ be the Fourier-Mukai transform $\Phi_{\mathcal{E}}^{\scriptscriptstyle X \to \scriptscriptstyle Y}$ from $X$ to $Y$ with kernel $\mathcal{E}$, and let $\widehat{\Psi} = \Phi_{\mathcal{E}^\vee}^{\scriptscriptstyle Y \to \scriptscriptstyle X}$. Then $\widehat{\Psi} \circ \Psi \cong [-3] $ and $\Psi \circ \widehat{\Psi} \cong [-3] $. \begin{nota} \rm As in Section \ref{sec:FMT-abelian-varieties}, we write $$ \Psi^p(E) = \mathcal{H}^p\left( \Psi(E)\right) $$ and use similar notation for $\widehat{\Psi}$. \end{nota} \begin{mukaispecseq} \label{Spec-Seq-Mukai} $$ E_2^{p,q} = \widehat{\Psi}^p \Psi^q (E) \Longrightarrow \mathcal{H}^{p+q-3}(E). $$ \end{mukaispecseq} We can describe the second page of the Mukai Spectral Sequence for $E \in \mathop{\rm Coh}\nolimits(X)$ as in the following diagram: \begin{center} \begin{tikzpicture}[scale=1.7] \draw[gray,very thin] (0,0) grid (4,4); \draw[->,thick] (3.75,0.25) -- (4.5, 0.25) node[above] {$p$}; \draw[->,thick] (0.25,3.75) -- (0.25,4.5) node[left] {$q$}; \draw (2.5,0.5) node(a) {$\scriptstyle \widehat{\Psi}^2 \Psi^0 (E) $}; \draw (0.5,1.5) node(b) {$\scriptstyle \widehat{\Psi}^0 \Psi^1 (E)$}; \draw[->,thick] (b) -- node[below] {$\cong$} (a); \draw (3.5,2.5) node(c) {$\scriptstyle \widehat{\Psi}^3 \Psi^2 (E)$}; \draw (1.5,3.5) node(d) {$\scriptstyle \widehat{\Psi}^1 \Psi^3 (E)$}; \draw[->,thick] (d) -- node[above] {$\cong$} (c); \draw (3.5,0.5) node(e) {$\scriptstyle \widehat{\Psi}^3 \Psi^0 (E) $}; \draw (1.5,1.5) node(f) {$\scriptstyle \widehat{\Psi}^1 \Psi^1 (E)$}; \draw[>->,thick] (f) -- node[above] {$ $} (e); \draw (2.5,2.5) node(g) {$\scriptstyle \widehat{\Psi}^2 \Psi^2 (E) $}; \draw (0.5,3.5) node(h) {$\scriptstyle \widehat{\Psi}^0 \Psi^3 (E) $}; \draw[->>,thick] (h) -- node[above] {$ $} (g); \draw (2.5,1.5) node(i) {$\scriptstyle \widehat{\Psi}^2 \Psi^1 (E) $}; \draw (3.5,1.5) node(j) {$\scriptstyle \widehat{\Psi}^3 \Psi^1 (E) $}; \draw (0.5,2.5) node(k) {$\scriptstyle \widehat{\Psi}^0 \Psi^2 (E)$}; \draw (1.5,2.5) node(l) {$\scriptstyle \widehat{\Psi}^1 \Psi^2 (E)$}; \draw[->,thick] (l) -- node[above] {$ $} (j); \draw[->,thick] (k) -- node[above] {$ $} (i); \end{tikzpicture} \end{center} We deduce the following immediately from the convergence of the Mukai Spectral Sequence for $E \in \mathop{\rm Coh}\nolimits(X)$: \begin{align*} & \widehat{\Psi}^0 \Psi^0 (E) = \widehat{\Psi}^1 \Psi^0(E) = \widehat{\Psi}^2 \Psi^3 (E) = \widehat{\Psi}^3 \Psi^3 (E) = 0, \\ & \widehat{\Psi}^0 \Psi^1 (E) \cong \widehat{\Psi}^2 \Psi^0(E), \\ & \widehat{\Psi}^1 \Psi^3 (E) \cong \widehat{\Psi}^3 \Psi^2(E). \end{align*} \begin{prop} \label{prop:FMT-F-1-reflexivity} Let $E \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}((-\infty,0])$. Then $\Psi^1(E)$ is a reflexive sheaf. \end{prop} \begin{proof} By Proposition \ref{prop:FMT-0-g-cohomo-vanishing}--(iii), $\Psi^0(E) = 0$. Let $y \in Y$. From the convergence of Mukai Spectral Sequence~\ref{Spec-Seq-Mukai} for $E$ and $0 \le i \le 2$, we have \begin{align*} \mathop{\rm Ext}\nolimits^i_{\scriptscriptstyle Y}(\mathcal{O}_y, \Psi^1(E)) & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\mathcal{O}_y, \Psi^1(E)[i]) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\widehat{\Psi}(\mathcal{O}_y), \widehat{\Psi}(\Psi^1(E))[i]) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{E}^*_{X \times\{y\}}, \widehat{\Psi}^2\Psi^1(E)[i-2]) \end{align*} as $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{E}^*_{X \times\{y\}}, \tau_{\ge 3}\widehat{\Psi}(\Psi^1(E))[i]) \cong\mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{E}^*_{X \times\{y\}}, \widehat{\Psi}^3\Psi^1(E)[i-3])=0$. Therefore, $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\mathcal{O}_y, \Psi^1(E)) =\mathop{\rm Ext}\nolimits_{\scriptscriptstyle Y}^1(\mathcal{O}_y, \Psi^1(E))=0$, and $\mathop{\rm Ext}\nolimits_{\scriptscriptstyle Y}^2(\mathcal{O}_y, \Psi^1(E)) \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{E}^*_{X \times\{y\}} , \widehat{\Psi}^2\Psi^1(E))$. From the convergence of Mukai Spectral Sequence~\ref{Spec-Seq-Mukai} for $E$, $$ 0 \to \widehat{\Psi}^0\Psi^2(E) \to \widehat{\Psi}^2\Psi^1(E) \to F \to 0 $$ is a short exact sequence in $\mathop{\rm Coh}\nolimits(X)$. Here $F$ is a subobject of $E$ and so $F \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}((-\infty,0])$. By applying the functor $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{E}^*_{X \times\{y\}}, -)$, we obtain the exact sequence $$ 0 \to \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{E}^*_{X \times\{y\}}, \widehat{\Psi}^0\Psi^2(E)) \to \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{E}^*_{X \times\{y\}}, \widehat{\Psi}^2\Psi^1(E)) \to \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{E}^*_{X \times\{y\}} , F) \to \cdots. $$ Here $F\in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}((-\infty,0])$, and by Proposition~\ref{prop:slope-bound-FMT-0-g}--(i), $\widehat{\Psi}^0\Psi^2$ is also in $\operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}((-\infty,0])$. Therefore, $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{E}^*_{X \times\{y\}} , F) \ne 0$ or $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle X}(\mathcal{E}^*_{X \times\{y\}}, \widehat{\Psi}^0\Psi^2(E)) \ne 0$ for at most a finite number of points $y \in Y$. Therefore, from Lemma~\ref{prop:reflexive-sheaf-threefold}, $\Psi^1(E)$ is a reflexive sheaf. \end{proof} For any positive integer $s$, the semihomogeneous bundle \begin{equation*} \widehat{\mathcal{O}_{\widehat{Y}}(sH_{\scriptscriptstyle \widehat{Y} })} = \Phi^{\scriptscriptstyle \widehat{Y} \to \scriptscriptstyle Y}_{\mathcal{P}}(\mathcal{O}_{\widehat{Y}}(sH_{\scriptscriptstyle \widehat{Y}})) \end{equation*} is slope stable on $Y$. In the rest of this section we abuse notation to write $\widehat{\mathcal{O}_{\widehat{Y}}(sH_{\scriptscriptstyle \widehat{Y}})}$ for the functor $\widehat{\mathcal{O}_{\widehat{Y}}(sH_{\scriptscriptstyle \widehat{Y}})} \otimes(-)$. \begin{prop} \label{prop:slope-limit-check-higehst-HN} Let $E_n \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}([0, +\infty))$, $n \in \mathbb{Z}_{>0}$ be a sequence of coherent sheaves on $Y$. For any $s>0$ there is $N(s) > 0$ such that for any $n > N(s)$ we have $\widehat{\mathcal{O}_{\widehat{Y}}(sH_{\scriptscriptstyle \widehat{Y}})} E_n \in V_{\mathop{\rm Coh}\nolimits(X)}^{\widehat{\Psi}}(3)$. Then $\mu_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^+(E_n) \to 0$ as $n \to +\infty$. \end{prop} \begin{proof} Let $s$ be a positive integer. Let us prove that for $n > N(s)$ we have $\widehat{\mathcal{O}_{\widehat{Y}}(sH_{\scriptscriptstyle \widehat{Y}})} E_n \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((-\infty,0])$. From the Harder-Narasimhan property there exists $T \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((0 , +\infty])$ and $F \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((-\infty,0])$ such that $$ 0 \to T \to \widehat{\mathcal{O}_{\widehat{Y}}(sH_{\scriptscriptstyle \widehat{Y}})} E_n \to F \to 0 $$ is a short exact sequence in $\mathop{\rm Coh}\nolimits(Y)$. By applying the Fourier-Mukai transform $\widehat{\Psi}$ and considering the long exact sequence of $\mathop{\rm Coh}\nolimits(X)$-cohomologies, we obtain $T \in V^{\widehat{\Psi}}_{\mathop{\rm Coh}\nolimits(X)}(2)$ and $F \in V^{\widehat{\Psi}}_{\mathop{\rm Coh}\nolimits(X)}(1,3)$. Moreover, $\widehat{\Psi}^2(T) \cong \widehat{\Psi}^1(F)$. Hence, from the convergence of Mukai Spectral Sequence \ref{Spec-Seq-Mukai}, $T \cong \Psi^1\widehat{\Psi}^2(T) \cong \Psi^1\widehat{\Psi}^1(F) =0$. Therefore $\widehat{\mathcal{O}_{\widehat{Y}}(sH_{\scriptscriptstyle \widehat{Y}})} E_n \cong F \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((-\infty,0])$. We have $\widehat{\mathcal{O}_{\widehat{Y}}(sH_{\scriptscriptstyle \widehat{Y}})}$ is slope stable with $\mu_{\ell_{\scriptscriptstyle{Y}},0} = -k/s$ for some constant $k >0$. Hence, for $n > N(s)$ $$ E_n \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}([0, k/s]). $$ Therefore, the claim follows by considering large enough $s$. \end{proof} Let $s$ be a positive integer. Consider the Fourier-Mukai functor from $D^b(X)$ to $D^b(X)$ defined by $$ \Pi = \widehat{\Psi} \circ \widehat{\mathcal{O}_{\widehat{Y}}(sH_{\scriptscriptstyle \widehat{Y}})} \circ \Psi [3]. $$ Then $\Pi^i(\mathcal{O}_x)= 0$ for $i \ne 0$ and $\Pi^0(\mathcal{O}_x)$ is a semistable semihomogeneous bundle on $X$. Define the Fourier-Mukai functor $$ \widehat{\Pi} = \widehat{\Psi} \circ \widehat{\mathcal{O}_{\widehat{Y}}(sH_{\scriptscriptstyle \widehat{Y}})}^* \circ \Psi. $$ One can see $\widehat{\Pi}[3]$ is right and left adjoint to $\Pi$ (and vice versa). We have $\widehat{\Pi}^i(\mathcal{O}_x) = 0$ for $i \ne 0$, and $\widehat{\Pi}^0(\mathcal{O}_x)$ is a semistable semihomogeneous bundle on $X$. Therefore, $\Pi$ is a Fourier-Mukai functor with kernel a locally free sheaf $\mathcal{U}$ on $X \times X$. We have the spectral sequence \begin{equation} \label{Spec-Seq-FM-Functor} \widehat{\Psi}^p \left( \widehat{\mathcal{O}_{\widehat{Y}}(sH_{\scriptscriptstyle \widehat{Y}})} \ \Psi^q(E) \right) \Longrightarrow \Pi^{p+q-3} (E) \end{equation} for $E$. \begin{prop} \label{prop:slope-bound-FMT-torsion-1} Let $E \in \mathop{\rm Coh}\nolimits_1(X)$. Then $\mu_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^+(\Psi^1(E(-nH_{\scriptscriptstyle X}))) \to 0$ as $n \to +\infty$. \end{prop} \begin{proof} Since $E \in \mathop{\rm Coh}\nolimits_1(X)$, for sufficiently large $n \in \mathbb{Z}_{>0}$, we have $E(-nH_{\scriptscriptstyle X}) \in V_{\mathop{\rm Coh}\nolimits(Y)}^{\Psi}(1)$. By Proposition~\ref{prop:slope-bound-torsion-sheaf}, $\Psi^1(E(-nH_{\scriptscriptstyle X})) \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((0, +\infty))$. Let $s$ be a positive integer. Consider the convergence of the Spectral Sequence \eqref{Spec-Seq-FM-Functor} for $E(-nH_{\scriptscriptstyle X})$. For large enough $n \in \mathbb{Z}_{>0}$, we also have $E(-nH_{\scriptscriptstyle X}) \in V_{\mathop{\rm Coh}\nolimits(X)}^{\Pi}(1)$. Therefore, $\widehat{\mathcal{O}_{\widehat{Y}}(sH_{\scriptscriptstyle \widehat{Y}})} \Psi^1(E(-nH_{\scriptscriptstyle X})) \in V_{\mathop{\rm Coh}\nolimits(X)}^{\widehat{\Psi}}(3)$, and so the claim follows from Proposition~\ref{prop:slope-limit-check-higehst-HN}. \end{proof} \begin{prop} \label{prop:FMT-reflexive-large-negative-twist} Let $E$ be a reflexive sheaf. Then for sufficiently large $n \in \mathbb{Z}_{>0}$, \begin{enumerate}[label=(\roman*)] \item $E(-nH_{\scriptscriptstyle X}) \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(2,3)$, and \item $\Psi^2(E(-nH_{\scriptscriptstyle X})) \in \Psi^0(T_0)$ for some $T_0 \in \mathop{\rm Coh}\nolimits_0(X)$. \end{enumerate} \end{prop} \begin{proof} (i) \ Consider a minimal locally free resolution of $E$: $$ 0 \to F_2 \to F_1 \to E \to 0. $$ By applying the Fourier-Mukai transform $\Psi \circ \mathcal{O}_X(-nH_{\scriptscriptstyle X}) $ for sufficiently large $n \in \mathbb{Z}_{> 0}$, we obtain $E(-nH_{\scriptscriptstyle X}) \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(2,3)$ as required. \\ \noindent (ii) \ Since $E$ is a reflexive sheaf, there is a locally free sheaf $P$ and a torsion free sheaf $Q$ such that $$ 0 \to E \to P \to Q \to 0 $$ is a short exact sequence in $\mathop{\rm Coh}\nolimits(X)$ (see Lemma~\ref{prop:reflexive-sheaf-results}--(2)). By applying the Fourier-Mukai transform $\Psi \circ \mathcal{O}_X(-nH_{\scriptscriptstyle X})$ for sufficiently large $n$ we have $\Psi^2(E(-nH_{\scriptscriptstyle X}) ) \cong \Psi^1(Q(-nH_{\scriptscriptstyle X}) )$. The torsion free sheaf $Q$ fits into the short exact sequence $0 \to Q \to Q^{**} \to T \to 0$ for some $T \in \mathop{\rm Coh}\nolimits_{\le 1}(X)$. Apply the Fourier-Mukai transform $\Psi \circ \mathcal{O}_X(-nH_{\scriptscriptstyle X})$ for sufficiently large $n$ and consider the long exact sequence of $\mathop{\rm Coh}\nolimits(Y)$-cohomologies. Since $Q^{**}(-nH_{\scriptscriptstyle X}) \in V_{\mathop{\rm Coh}\nolimits(Y)}^{\Psi}(2,3)$, we have $\Psi^1(Q(-nH_{\scriptscriptstyle X}) ) \cong \Psi^0(T(-nH_{\scriptscriptstyle X}) )$. The torsion sheaf $T \in \mathop{\rm Coh}\nolimits_{\le 1}(X)$ fits into short exact sequence $0 \to T_0 \to T \to T_1 \to 0$ in $\mathop{\rm Coh}\nolimits(X)$ for $T_i \in \mathop{\rm Coh}\nolimits_i(X)$, $i=0,1$. Therefore, $ \Psi^0(T(-nH_{\scriptscriptstyle X}) ) \cong \Psi^0(T_0 )$, and so $\Psi^2(E(-nH_{\scriptscriptstyle X}) ) \cong \Psi^0(T_0 )$ as required. \end{proof} \begin{prop} \label{prop:supprt-result-FMT} Let $E \in \mathop{\rm Coh}\nolimits_1(X)$ with $E \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(1)$. If $0 \ne T \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}([0, +\infty])$ is a subsheaf of $\Psi^1(E)$ then $\ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{D_{\scriptscriptstyle Y}}(T) \le 0$. \end{prop} \begin{proof} Recall Note\ref{prop:restriction-setup}; choose $x \in X$ such that $\dim (\mathop{\rm Supp}\nolimits(E) \cap H_{\scriptscriptstyle X,x}) \le 0$. Then for $n \in \mathbb{Z}_{>0}$, we have the short exact sequence $$ 0 \to E (-nH_{\scriptscriptstyle X, x}) \to E \to T_0 \to 0 $$ in $\mathop{\rm Coh}\nolimits(X)$, where $T_0 = E|_{ n H_{\scriptscriptstyle X, x}} \in \mathop{\rm Coh}\nolimits_0(X)$. By applying the Fourier-Mukai transform $\Psi$ we get the following commutative diagram for some $A \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}([0, +\infty])$. $$ \xymatrixcolsep{3pc} \xymatrixrowsep{3pc} \xymatrix{ 0 \ar[r] & \Psi^0(T_0) \ar[r] & \Psi^1( E (-nH_{\scriptscriptstyle X, x})) \ar[r] & \Psi^1(E) \ar[r] & 0 \\ 0 \ar[r] & \Psi^0(T_0) \ar[r] \ar@{=}[u] & A \ar[r] \ar@{^{(}->}[u] & T \ar[r] \ar@{^{(}->}[u] & 0 } $$ For $k=1,2,3$, we have $\mathop{\rm ch}\nolimits_k^{D_{\scriptscriptstyle Y}}(\Psi^0(T_0))= 0$; so $\mathop{\rm ch}\nolimits_k^{D_{\scriptscriptstyle Y}}(A) = \mathop{\rm ch}\nolimits_k^{D_{\scriptscriptstyle Y}}(T)$. Let $G$ be a slope semistable Harder-Narasimhan factor of $A$. Then, from the usual Bogomolov-Gieseker inequality, we have \begin{align*} 2 \ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{D_{\scriptscriptstyle Y}}(G) & \le \frac{\left( \ell_{\scriptscriptstyle{Y}}^2 \mathop{\rm ch}\nolimits_1^{D_{\scriptscriptstyle Y}}(G)\right)^2}{\ell_{\scriptscriptstyle{Y}}^3 \mathop{\rm ch}\nolimits_0^{D_{\scriptscriptstyle Y}}(G)} \\ & \le \ell_{\scriptscriptstyle{Y}}^2 \mathop{\rm ch}\nolimits_1^{D_{\scriptscriptstyle Y}}(A) \ \mu_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}(G) \\ & \le \ell_{\scriptscriptstyle{Y}}^2 \mathop{\rm ch}\nolimits_1^{D_{\scriptscriptstyle Y}}(T) \ \mu_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^+ (\Psi^1( E (-nH_{\scriptscriptstyle X, x}))). \end{align*} Let $$ c_0 = \min \{2 \ell_{\scriptscriptstyle{Y}}\mathop{\rm ch}\nolimits_2^{D_{\scriptscriptstyle Y}}(F) >0 : F \in \mathop{\rm Coh}\nolimits(Y)\}. $$ By Proposition~\ref{prop:slope-bound-FMT-torsion-1}, $ \mu_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^+ (\Psi^1( E (-nH_{\scriptscriptstyle X, x}))) \to 0$ as $n \to +\infty$. So choose large enough $n \in \mathbb{Z}_{>0}$ such that $$ \ell_{\scriptscriptstyle{Y}}^2 \mathop{\rm ch}\nolimits_1^{D_{\scriptscriptstyle Y}}(T) \ \mu_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^+ (\Psi^1( E (-nH_{\scriptscriptstyle X, x}))) < c_0; $$ hence, we have $\ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{D_{\scriptscriptstyle Y}}(G) \le 0$. Therefore, $\ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{D_{\scriptscriptstyle Y}}(T) = \ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{D_{\scriptscriptstyle Y}}(A) \le 0$. \end{proof} \begin{prop} \label{prop:FMT-divisor-restrict-large-negative-twist} Let $E$ be a reflexive sheaf on $X$. Therefore, $\dim \mathop{\rm Sing}\nolimits(E) \le 0$, and so for generic $ x \in X$ we have $\mathop{\rm Sing}\nolimits(E) \cap H_{\scriptscriptstyle X, x} = \emptyset$. Let $m$ be any positive integer. For large enough $n \in \mathbb{Z}_{>0}$, $$ E(-n H_{\scriptscriptstyle X})|_{mH_{\scriptscriptstyle X, x}} \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(2). $$ \end{prop} \begin{proof} The dual sheaf $E^*$ is also reflexive (Lemma~\ref{prop:reflexive-sheaf-results}--(4)). Consider a minimal locally free resolution of $E^*$: $$ 0 \to G \to F \to E^* \to 0. $$ By applying the dualizing functor $\mathbf{R} \operatorname{\mathcal{H}\textit{om}}(- ,\mathcal{O}_X)$ to this short exact sequence, we get the following long exact sequence in $\mathop{\rm Coh}\nolimits(X)$: $$ 0 \to E \to F^* \to G^* \to \operatorname{\mathcal{E}\textit{xt}}^1(E^*, \mathcal{O}_X) \to 0. $$ Let $Q = \operatorname{coker}(E \to F^*)$. Since $E$ is reflexive, $$ \mathop{\rm Sing}\nolimits(E) = \mathop{\rm Sing}\nolimits(E^*) = \mathop{\rm Supp}\nolimits(\operatorname{\mathcal{E}\textit{xt}}^1(E^*, \mathcal{O}_X)). $$ By choice $\mathop{\rm Sing}\nolimits(E) \cap H_{\scriptscriptstyle X, x} = \emptyset$, and so from the short exact sequence $0 \to Q \to G^* \to \operatorname{\mathcal{E}\textit{xt}}^1(E^*, \mathcal{O}_X) \to 0$, $Q|_{mH_{\scriptscriptstyle X, x}} \cong G^*|_{mH_{\scriptscriptstyle X, x}}$. So we have the short exact sequence $$ 0 \to E|_{mH_{\scriptscriptstyle X, x}} \to F^*|_{mH_{\scriptscriptstyle X, x}} \to G^*|_{mH_{\scriptscriptstyle X, x}} \to 0 $$ in $\mathop{\rm Coh}\nolimits(X)$. Since $F^*$ and $G^*$ are locally free, for large enough $n \in \mathbb{Z}_{>0}$ we have $$ F^*(-nH_{\scriptscriptstyle X}) |_{mH_{\scriptscriptstyle X, x}} , G^*(-nH_{\scriptscriptstyle X}) |_{mH_{\scriptscriptstyle X, x}} \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(2), $$ and so $ E(-n H_{\scriptscriptstyle X})|_{mH_{\scriptscriptstyle X, x}} \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(2)$. \end{proof} \begin{prop} \label{prop:FMT-bridge-result} We have the following: \begin{enumerate}[label=(\roman*)] \item Let $E \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}((-\infty,0])$ be a reflexive sheaf. If $T \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}([0, +\infty])$ is a non-trivial subsheaf of $\Psi^1(E)$ then $\ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{D_{\scriptscriptstyle Y}}(T) \le 0$. \item Let $E \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}((0, +\infty])$ be a torsion free sheaf. If $ F \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((-\infty,0])$ is a non-trivial quotient of $\Psi^2(E)$ then $\ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{D_{\scriptscriptstyle Y}}(F) \le 0$. \end{enumerate} \end{prop} \begin{proof} (i) \ Since $E$ is reflexive, $\dim \mathop{\rm Sing}\nolimits(E) \le 0$. Choose $x, x' \in X$ such that \begin{itemize} \item $\dim (H_{\scriptscriptstyle X, x} \cap H_{\scriptscriptstyle X, x'}) = 1$, \item $\mathop{\rm Sing}\nolimits(E) \cap H_{\scriptscriptstyle X, x} = \emptyset$, and \item $\mathop{\rm Sing}\nolimits(E) \cap H_{\scriptscriptstyle X, x'} = \emptyset$. \end{itemize} Since $E$ is a reflexive sheaf, Proposition~\ref{prop:FMT-reflexive-large-negative-twist}--(i) implies, for sufficiently large $m \in \mathbb{Z}_{>0}$, $E(-mH_{\scriptscriptstyle X, x}) \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(2,3)$. By applying the Fourier-Mukai transform $\Psi$ to the short exact sequence $$ 0 \to E(-mH_{\scriptscriptstyle X, x}) \to E \to E|_{mH_{\scriptscriptstyle X, x}} \to 0 $$ in $\mathop{\rm Coh}\nolimits(X)$ and then considering the long exact sequence of $\mathop{\rm Coh}\nolimits(Y)$-cohomologies, we have $E|_{mH_{\scriptscriptstyle X, x}} \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(1,2)$ and $\Psi^1(E) \hookrightarrow \Psi^1\left(E|_{mD_x} \right)$. By Proposition~\ref{prop:FMT-divisor-restrict-large-negative-twist}, for large enough $n \in \mathbb{Z}_{>0}$, $ E(-n H_{\scriptscriptstyle X})|_{mH_{\scriptscriptstyle X, x}} \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(2)$. By applying the Fourier-Mukai transform $\Psi$ to the short exact sequence $$ 0 \to E(-n H_{\scriptscriptstyle X, x'})|_{mD_x} \to E|_{mD_x} \to E|_{mH_{\scriptscriptstyle X, x} \cap nH_{\scriptscriptstyle X, x'}} \to 0 $$ in $\mathop{\rm Coh}\nolimits(X)$ and then considering the long exact sequence of $\mathop{\rm Coh}\nolimits(Y)$-cohomologies, we get $E|_{mH_{\scriptscriptstyle X, x} \cap nH_{\scriptscriptstyle X, x'}} \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(1)$ and $\Psi^1 \left(E|_{mH_{\scriptscriptstyle X, x}} \right) \hookrightarrow \Psi^1 \left(E|_{mH_{\scriptscriptstyle X, x} \cap nH_{\scriptscriptstyle X, x'}} \right) $. Therefore, we have $$ T \hookrightarrow \Psi^1(E) \hookrightarrow \Psi^1 \left(E|_{mH_{\scriptscriptstyle X, x}} \right) \hookrightarrow \Psi^1 \left(E|_{mH_{\scriptscriptstyle X, x} \cap nH_{\scriptscriptstyle X, x'}} \right). $$ The claim follows from Proposition~\ref{prop:supprt-result-FMT}. \\ \noindent (ii) \ Since $F \ne 0$ is a quotient of $\Psi^2(E)$, we have $F^{*} \hookrightarrow \left(\Psi^2(E)\right)^*$. Here $F^* \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, -D_{\scriptscriptstyle Y}}([0, +\infty))$ fits into short exact sequence $0 \to T \to F^* \to F_0 \to 0$ in $\mathop{\rm Coh}\nolimits(Y)$ for some $T \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, -D_{\scriptscriptstyle Y}}((0, +\infty))$ and $F_0 \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, -D_{\scriptscriptstyle Y}}(0)$. By the usual Bogomolov-Gieseker inequality $\ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle Y}}(F_0) \le 0$. Let $$ \widetilde{\Psi} := \Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}^\vee}: D^b(X) \to D^b(Y). $$ By Proposition~\ref{prop:FMT-0-g-cohomo-vanishing}, $\Psi^3(E) = 0 = \widetilde{\Psi}^0(E^*)$. Consider the co-convergence of the ``Duality'' Spectral Sequence~\ref{Spec-Seq-Dual} for $E$ and the following diagram describes its second page. $$ \begin{tikzpicture}[xscale=1.8, yscale=1.2] \draw[gray,very thin] (-3,0) grid (1,4); \draw[gray,very thin] (0,-3) grid (4,1); \draw[->,thick] (3.75,0.5) -- (4.4,0.5) node[above] {$p$}; \draw[->,thick] (0.5,3.75) -- (0.5,4.4) node[left] {$q$}; \draw[style=dotted] (2,-3) -- (-3,2); \draw[style=dotted] (3,-3) -- (-3,3); \draw[style=dotted] (4,-3) -- (-3,4); \draw[style=dotted] (1,-3) -- (-3,1); \draw[style=dotted] (4,-2) -- (-2,4); \draw[style=dotted] (4,-1) -- (-1,4); \draw[style=dotted] (4,0) -- (0,4); \draw (2.5,3) node {$\scriptstyle\mathbb{D}^i(G) = \operatorname{\mathcal{E}\textit{xt}}^i(G, \mathcal{O})$}; \draw (-2.5,1.5) node {$\scriptstyle(\Psi^2(E))^{*}$}; \draw (-1.5,1.5) node {$\scriptstyle \mathbb{D}^1(\Psi^2(E))$}; \draw (-0.5,1.5) node (f2) {$\scriptstyle\mathbb{D}^2(\Psi^2(E))$}; \draw (0.5,1.5) node (f4) {$\scriptstyle\mathbb{D}^3(\Psi^2(E))$}; \draw (-2.5,2.5) node (f1) {$\scriptstyle(\Psi^1(E))^{*}$}; \draw (-1.5,2.5) node (f3) {$\scriptstyle\mathbb{D}^1(\Psi^1(E))$}; \draw (-0.5,2.5) node (f6) {$\scriptstyle\mathbb{D}^2(\Psi^1(E))$}; \draw (0.5,2.5) node (f8) {$\scriptstyle\mathbb{D}^3(\Psi^1(E))$}; \draw (-2.5,3.5) node (f5) {$\scriptstyle(\Psi^0(E))^{*}$}; \draw (-1.5,3.5) node (f7) {$\scriptstyle\mathbb{D}^1(\Psi^0(E))$}; \draw[->,thick] (f1) -- node[above] {$ $} (f2); \draw[->,thick] (f3) -- node[above] {$ $} (f4); \draw[->,thick] (f5) -- node[above] {$ $} (f6); \draw[->,thick] (f7) -- node[above] {$ $} (f8); \draw (1.5,-2.5) node {$\scriptstyle \widetilde{\Psi}^1(E^*)$}; \draw (2.5,-2.5) node (s2) {$\scriptstyle \widetilde{\Psi}^2(E^*)$}; \draw (3.5,-2.5) node (s4) {$\scriptstyle \widetilde{\Psi}^3(E^*)$}; \draw (0.5,-1.5) node (s1) {$\scriptstyle \widetilde{\Psi}^0(\mathbb{D}^1(E))$}; \draw (1.5,-1.5) node (s3) {$\scriptstyle \widetilde{\Psi}^1(\mathbb{D}^1(E))$}; \draw (0.5,-0.5) node {$\scriptstyle \widetilde{\Psi}^0(\mathbb{D}^2(E))$}; \draw[->,thick] (s1) -- node[above] {$ $} (s2); \draw[->,thick] (s3) -- node[above] {$ $} (s4); \end{tikzpicture} $$ We have the short exact sequence $$ 0 \to \widetilde{\Psi}^1 (E^*) \to \left(\Psi^2(E)\right)^* \to P \to 0 $$ in $\mathop{\rm Coh}\nolimits(Y)$, for some subsheaf $P$ of $\widetilde{\Psi}^0(\operatorname{\mathcal{E}\textit{xt}}^1(E, \mathcal{O}_X))$. By Proposition~\ref{prop:slope-bound-FMT-0-g}--(i), $\widetilde{\Psi}^0(\operatorname{\mathcal{E}\textit{xt}}^1(E, \mathcal{O}_X)) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, -D_{\scriptscriptstyle Y}}((-\infty, 0])$ and so $P \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, -D_{\scriptscriptstyle Y}}((-\infty, 0])$. Therefore, $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y} (T, P) = 0$, and so $T \hookrightarrow \widetilde{\Psi}^1 (E^*) $. Here $E^* \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle X}}^{\mu}((-\infty,0))$ and so by part (i), $\ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle Y}}(T) \le 0$. Therefore, $$ \ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{D_{\scriptscriptstyle Y}}(F) \le \ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{D_{\scriptscriptstyle Y}}(F^{**})= \ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle Y}} (F^{*}) = \ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle Y}}(F_0) + \ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle Y}}(T) \le 0 $$ as required. \end{proof} \begin{prop} \label{prop:slope-bound-FMT-F-1} For $E \in \mathop{\rm Coh}\nolimits(X)$, we have the following: \begin{enumerate}[label=(\roman*)] \item If $E \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty , 0])$ then $\Psi^1(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\infty , 0])$, and \item If $E \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}([0,+\infty))$ with $\Psi^3(E)=0$ then $\Psi^2(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}([0,+\infty])$. \end{enumerate} \end{prop} \begin{proof} (i) \ Assume the opposite for a contradiction. From the Harder-Narasimhan filtration of $\Psi^1(E)$ there exists $T \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((0, +\infty])$ and $F \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\infty , 0])$ such that \begin{equation} \label{eqn:local-ses-FMT-1} 0 \to T \to \Psi^1(E) \to F \to 0 \end{equation} is a short exact sequence in $\mathop{\rm Coh}\nolimits(Y)$. By Proposition~\ref{prop:FMT-0-cohomology-reflexive}--(ii), $\Psi^1(E)$ is reflexive. Therefore, \begin{equation} \label{eqn:local-ch1-bound} \ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_1^{D_{\scriptscriptstyle Y}}(T) > 0. \end{equation} By Lemma \ref{prop:reflexive-sheaf-results}--(2) there exists a locally free sheaf $G_1$ such that $\Psi^1(E)$ is a subsheaf of it with a torsion free quotient sheaf $G_1/ \Psi^1(E)$. Hence, $T$ is a a subsheaf of $G_1$ with a torsion free quotient sheaf. Therefore, again by Lemma \ref{prop:reflexive-sheaf-results}--(2), $T$ is a reflexive sheaf. By applying the functor $\mathbf{R} \operatorname{\mathcal{H}\textit{om}}(-, \mathcal{O}_Y)$ to the short exact sequence \eqref{eqn:local-ses-FMT-1}, we obtain the long exact sequence: $$ 0 \to F^{*} \to \left( \Psi^1(E)\right)^* \to T^* \to \operatorname{\mathcal{E}\textit{xt}} ^1(F, \mathcal{O}_Y) \to \operatorname{\mathcal{E}\textit{xt}} ^1(\Psi^1(E), \mathcal{O}_Y) \to \operatorname{\mathcal{E}\textit{xt}} ^1(T, \mathcal{O}_Y) \to \operatorname{\mathcal{E}\textit{xt}} ^2(F, \mathcal{O}_Y) \to 0. $$ Since $\Psi^1(E)$ and $T$ are reflexive, $ \operatorname{\mathcal{E}\textit{xt}}^1(\Psi^1(E), \mathcal{O}_Y) , \operatorname{\mathcal{E}\textit{xt}} ^1(T, \mathcal{O}_Y) \in \mathop{\rm Coh}\nolimits_0(Y)$, and so $$ \operatorname{\mathcal{E}\textit{xt}} ^1(F, \mathcal{O}_Y) , \operatorname{\mathcal{E}\textit{xt}} ^2(F, \mathcal{O}_Y) \in \mathop{\rm Coh}\nolimits_0(Y). $$ Therefore $F$ fits into the short exact sequence $$ 0 \to F \to F^{**} \to R \to 0 $$ for some $R \in \mathop{\rm Coh}\nolimits_0(Y)$. By applying the Fourier-Mukai transform $\widehat{\Psi}$, we get the short exact sequence $$ 0 \to \widehat{\Psi}^0(R) \to \widehat{\Psi}^1(F) \to \widehat{\Psi}^1(F^{**}) \to 0. $$ From the Harder-Narasimhan filtration, let $T_1$ be the subsheaf of $\widehat{\Psi}^1(F^{**})$ in $\operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((0, +\infty])$ with the quotient $F_1 \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty , 0])$. Then $\widehat{\Psi}^1(F)$ has a subsheaf $T_2 \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}([0, +\infty])$ with quotient $F_1$. Here $T_2$ fits into the short exact sequence $$ 0 \to \widehat{\Psi}^0(R) \to T_2 \to T_1 \to 0. $$ From Proposition \ref{prop:FMT-bridge-result}--(i), $\ell_{\scriptscriptstyle{X}} \mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle X}}(T_1) \le 0$, and since $\mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle X}}(\widehat{\Psi}^0(R))=0$, $$ \ell_{\scriptscriptstyle{X}} \mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle X}}(T_2) \le 0. $$ By applying the Fourier-Mukai transform $\widehat{\Psi}$ to the short exact sequence \eqref{eqn:local-ses-FMT-1}, we obtain that $T \in V_{\mathop{\rm Coh}\nolimits(X)}^{\widehat{\Psi}}(2)$ and $F \in V_{\mathop{\rm Coh}\nolimits(X)}^{\widehat{\Psi}}(1,2,3)$. Moreover, we have the short exact sequence $$ 0 \to \widehat{\Psi}^1(F) \to \widehat{\Psi}^2(T) \to E_1 \to 0 $$ in $\mathop{\rm Coh}\nolimits(X)$ for some subsheaf $E_1$ of $\widehat{\Psi}^2 \Psi^1(E)$. From the Mukai Spectral Sequence~\ref{Spec-Seq-Mukai} for $E$, we have the short exact sequence $$ 0 \to \widehat{\Psi}^0 \Psi^2(E) \to \widehat{\Psi}^2 \Psi^1(E) \to E_2 \to 0 $$ in $\mathop{\rm Coh}\nolimits(X)$ for some subsheaf $E_2$ of $E$. Therefore, $E_2 \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty , 0])$. By Proposition~\ref{prop:slope-bound-FMT-0-g}--(i), $ \widehat{\Psi}^0 \Psi^2(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty , 0])$. So we have $ \widehat{\Psi}^2 \Psi^1(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty , 0])$. Hence, $E_1 \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty , 0])$. So we have the following commutative diagram for some $F_2 \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty , 0])$. $$ \xymatrixcolsep{3pc} \xymatrixrowsep{2.2pc} \xymatrix{ & 0 & 0 & & \\ 0 \ar[r] & F_1 \ar[r]\ar[u] & F_2 \ar[r]\ar[u] & E_1 \ar[r] & 0 \\ 0 \ar[r] & \widehat{\Psi}^1(F) \ar[r] \ar[u] & \widehat{\Psi}^2(T) \ar[r] \ar[u] & E_1 \ar[r] \ar@{=}[u] & 0 \\ & T_2 \ar[u] \ar@{=}[r] & T_2 \ar[u] & & \\ & 0 \ar[u] & 0 \ar[u] & & } $$ By Proposition~\ref{prop:FMT-bridge-result}--(ii), $\ell_{\scriptscriptstyle{X}} \mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle X}}(F_2) \le 0$. Therefore, $$ \ell_{\scriptscriptstyle{X}} \mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle X}}(\widehat{\Psi}^2(T)) = \ell_{\scriptscriptstyle{X}} \mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle X}}(T_2) + \ell_{\scriptscriptstyle{X}} \mathop{\rm ch}\nolimits_2^{-D_{\scriptscriptstyle X}}(F_2) \le 0. $$ So from Theorem \ref{prop:antidiagonal-rep-cohom-FMT}, $\ell_{\scriptscriptstyle{Y}} \mathop{\rm ch}\nolimits_2^{D_{\scriptscriptstyle Y}}(T) \le 0$; but this is not possible as we have \eqref{eqn:local-ch1-bound}. This is the required contradiction. \\ \noindent (ii) \ Let $\widetilde{\Psi}:= \Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}^\vee}$. Since $E^* \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , D_{\scriptscriptstyle X}}((-\infty,0])$, from (i) $\widetilde{\Psi}^1(E^*) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, -D_{\scriptscriptstyle Y}}((-\infty,0])$. By the co-convergence of the ``Duality'' Spectral Sequence~\ref{Spec-Seq-Dual} for $E$, we have $(\Psi^2(E))^* \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, -D_{\scriptscriptstyle Y}}((-\infty,0])$. So $\Psi^2(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}([0,+\infty])$ as required. \end{proof} \begin{prop} \label{prop:slope-bound-FMT-T-2} Let $E \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((0, + \infty])$. Then $\Psi^2(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((0,+\infty])$. \end{prop} \begin{proof} From the Harder-Narasimhan filtration of $\Psi^2(E)$, there exist $T \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((0,+\infty])$ and $F \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\infty, 0])$ such that $0 \to T \to \Psi^2(E) \to F \to 0$ is a short exact sequence in $\mathop{\rm Coh}\nolimits(Y)$. Now we need to show $F = 0$. Apply the Fourier-Mukai transform $\widehat{\Psi}$ and consider the long exact sequence of $\mathop{\rm Coh}\nolimits(X)$-cohomologies. So we have $F \in V^{\widehat{\Psi}}_{\mathop{\rm Coh}\nolimits(X)}(1)$ and $$ 0 \to \widehat{\Psi}^1(T) \to \widehat{\Psi}^1 \Psi^2(E) \to \widehat{\Psi}^1(F) \to \widehat{\Psi}^2(T) \to 0 $$ is a long exact sequence in $\mathop{\rm Coh}\nolimits(X)$. From the convergence of the Mukai Spectral Sequence~\ref{Spec-Seq-Mukai} for $E$, we have the short exact sequence $$ 0 \to Q \to \widehat{\Psi}^1 \Psi^2(E) \to \widehat{\Psi}^3 \Psi^1(E) \to 0 $$ in $\mathop{\rm Coh}\nolimits(Y)$, where $Q$ is a quotient of $E$. Then $Q\in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((0, + \infty])$ and by Proposition~\ref{prop:slope-bound-torsion-sheaf}, $\widehat{\Psi}^3 \Psi^1(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((0, + \infty])$; so $\widehat{\Psi}^1 \Psi^2(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((0, + \infty])$. On the other hand, by Proposition~\ref{prop:slope-bound-FMT-F-1}--(i), $\widehat{\Psi}^1(F) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty,0])$. So the map $ \widehat{\Psi}^1 \Psi^2(E) \to \widehat{\Psi}^1(F) $ is zero and $\widehat{\Psi}^1(F) \cong \widehat{\Psi}^2(T)$. Hence, $F \cong \Psi^2 \widehat{\Psi}^1(F) \cong \Psi^2 \widehat{\Psi}^2(T) = 0$ as required. \end{proof} \section{Further Properties of Slope Stability under FM Transforms} \label{sec:further-FMT-sheaves-abelian-threefolds} \subsection{Some slope bounds of the FM transformed sheaves} \label{sec:FMT-0-special-bound} Recall that $\Psi$ is the Fourier-Mukai transform $\Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}}:D^b(X) \to D^b(Y)$ between the abelian threefolds such that $$ \mathop{\rm ch}\nolimits(\mathcal{E}_{\{x\} \times Y}) = r \, e^{D_{\scriptscriptstyle Y}}, \ \text{and} \ \mathop{\rm ch}\nolimits(\mathcal{E}_{X \times \{y\}} ) = r \, e^{D_{\scriptscriptstyle X}}. $$ Also $\ell_{\scriptscriptstyle{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X), \ell_{\scriptscriptstyle{Y}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(Y)$ are some ample classes such that $$ e^{- D_{\scriptscriptstyle Y}} \, \Phi_{\mathcal{E}}^{\operatorname{\scriptscriptstyle{H}}} \, e^{-D_{\scriptscriptstyle X}} ( e^{\ell_{\scriptscriptstyle X}}) = (r \, {\ell_{\scriptscriptstyle{X}} ^3}/{3!}) \, e^{-\ell_{\scriptscriptstyle Y}}, $$ with $({\ell_{\scriptscriptstyle{X}} ^3}/3!)({\ell_{\scriptscriptstyle{Y}}^3}/3!)= 1/r^2$. Moreover, Theorem \ref{prop:antidiagonal-rep-cohom-FMT} says, if we consider $v^{-D_{\scriptscriptstyle X} ,\ell_{\scriptscriptstyle{X}} }, v^{D_{\scriptscriptstyle Y},\ell_{\scriptscriptstyle{Y}}}$ as column vectors, then $$ v^{D_{\scriptscriptstyle Y},\ell_{\scriptscriptstyle{Y}}}\left(\Phi_\mathcal{E}^{\scriptscriptstyle X \to \scriptscriptstyle Y}(E) \right) = \frac{3!}{r \, \ell_{\scriptscriptstyle X}^3} \, \operatorname{Adiag}\left(1,-1,1,-1\right) \ v^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} }(E). $$ Here the vector $v^{B,{\ell_{\scriptscriptstyle{X}} }}(E)$ is defined by \begin{equation*} v^{B,{\ell_{\scriptscriptstyle{X}} }}(E) = \left( v^{B,{\ell_{\scriptscriptstyle{X}} }}_0(E) , v^{B,{\ell_{\scriptscriptstyle{X}} }}_1(E) , v^{B,{\ell_{\scriptscriptstyle{X}} }}_2(E) , v^{B,{\ell_{\scriptscriptstyle{X}} }}_3(E) \right). \end{equation*} \begin{prop} \label{prop:special-slope-bound-FMT-0-3} For $\lambda \in \mathbb{Q}_{> 0}$, \begin{enumerate}[label=(\roman*)] \item if $E \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}((0,\lambda])$ then $\Psi^0(E) \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}((-\infty, -\frac{1}{\lambda}])$, \item if $E \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}([-\lambda,0])$ then $\Psi^3(E) \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}([\frac{1}{ \lambda }, +\infty])$. \end{enumerate} \end{prop} \begin{proof} (i) \ Let $E \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}((0,\lambda])$. Let $Z$ be the fine moduli space of simple semihomogeneous bundles $E$ on $X$ with $c_1(E)/\mathop{\rm rk}\nolimits(E) =D_{\scriptscriptstyle X} - \lambda \ell_{\scriptscriptstyle{X}} $. Then there is some fixed $r' \in\mathbb{Z}_{>0}$ such that $$ r' = \mathop{\rm rk}\nolimits(E) $$ for such $E$. Due to Mukai and Orlov, $Z$ is an abelian threefold. Let $\mathcal{F}$ be the associated universal bundle on $Z \times X$; so by Lemma \ref{prop:semihomo-numerical}--(1) we have $$ \mathop{\rm ch}\nolimits(\mathcal{F}_{ \{z\} \times X }) = r' \, e^{ D_{\scriptscriptstyle X} - \lambda \ell_{\scriptscriptstyle{X}} }. $$ Let $$ \Pi: = \Phi_\mathcal{F}^{\scriptscriptstyle X \to \scriptscriptstyle Z} : D^b(X) \to D^b(Z) $$ be the corresponding Fourier-Mukai transform from $D^b(X)$ to $D^b(Z)$ with kernel $\mathcal{F}$. Then its quasi inverse is given by $\Phi_{\mathcal{F}^\vee}^{\scriptscriptstyle Z \to \scriptscriptstyle X}[3]$. Again, by Lemma \ref{prop:semihomo-numerical}--(2) $$ \mathop{\rm ch}\nolimits(\mathcal{F}_{Z \times \{x\}} ) = r' \, e^{D_{\scriptscriptstyle Z}} $$ for some $D_{\scriptscriptstyle Z} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(Z)$. Similar to the Fourier-Mukai transform $\Psi$ in Section \ref{sec:cohomological-FMT}, there exists an ample class $\ell_{\scriptscriptstyle{Z}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(Z)$ such that $$ e^{- D_{\scriptscriptstyle Z}} \, \Pi^{\operatorname{\scriptscriptstyle{H}}} \, e^{-D_{\scriptscriptstyle X}+\lambda \ell_{\scriptscriptstyle{X}} } ( e^{\ell_{\scriptscriptstyle X}}) = (r' \, {\ell_{\scriptscriptstyle{X}} ^3}/{3!}) \, e^{-\ell_{\scriptscriptstyle Z}}, $$ with $({\ell_{\scriptscriptstyle{X}} ^3}/3!)({\ell_{\scriptscriptstyle{Z}}^3}/3!)= 1/r'^2$ (Theorem \ref{prop:general-cohomo-FMT}). Moreover, Theorem \ref{prop:antidiagonal-rep-cohom-FMT} says, \begin{equation*} v^{D_{\scriptscriptstyle Z},\ell_{\scriptscriptstyle{Z}}}\left(\Pi(E) \right) = \frac{3!}{r' \, \ell_{\scriptscriptstyle X}^3} \, \operatorname{Adiag}\left(1,-1,1,-1\right) \ v^{-D_{\scriptscriptstyle X}+\lambda \ell_{\scriptscriptstyle{X}} , \ell_{\scriptscriptstyle{X}} }(E). \end{equation*} Let $\Xi: D^b(Y) \to D^b(Z)$ be the Fourier-Mukai transform defined by $$ \Xi := \Pi \circ \widehat{\Psi}[3]. $$ We have $\widehat{\Psi}(\mathcal{O}_y) = \mathcal{E}^*_{X \times \{y\}}$ is a stable semihomogeneous bundle in $\operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}(0) = \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}+\lambda \ell_{\scriptscriptstyle{X}} }(-\lambda)$. Therefore the image $ \Xi(\mathcal{O}_y) = \Pi ( \mathcal{E}^*_{X \times \{y\}}[3]) $ of the skyscraper sheaf $\mathcal{O}_y$ is also a stable semihomogeneous bundle on $Z$. Hence, $\Xi$ is a Fourier-Mukai transform $\Phi^{\scriptscriptstyle Y \to \scriptscriptstyle Z}_{\mathcal{G}}$ with kernel $\mathcal{G}$ on $Y \times Z$ such that $$ \mathcal{G}_{\{y\} \times Z} = \Xi(\mathcal{O}_y) = \Pi( \mathcal{E}^*_{X \times \{y\}}[3]). $$ From Theorem \ref{prop:general-cohomo-FMT} and Lemma \ref{prop:reflexive-sheaf-results}, there is $r''>0$ such that \begin{align*} & \mathop{\rm ch}\nolimits( \mathcal{G}_{\{y\} \times Z}) = r'' e^{D_{\scriptscriptstyle Z} + \frac{1}{\lambda}\ell_{\scriptscriptstyle{Z}}}, \\ & \mathop{\rm ch}\nolimits( \mathcal{G}^*_{Y \times \{z\}}) = r'' e^{D_{\scriptscriptstyle Y} - \frac{1}{\lambda}\ell_{\scriptscriptstyle{Y}}}. \end{align*} The isomorphism $\Xi \circ \Psi \cong \Pi$ gives us the convergence of the spectral sequence: \begin{equation} \label{Spec-Seq-Local-FMT-0-bound} E_2^{p,q} = \Xi^p \Psi^q (E) \Longrightarrow \Pi^{p+q}(E) \end{equation} for $E$. Since $E \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} + \lambda \ell_{\scriptscriptstyle{X}} }((-\lambda,0])$, from Proposition~\ref{prop:FMT-0-g-cohomo-vanishing}--(iii), \begin{equation*} \Pi^0 (E) = 0. \end{equation*} Now from the convergence of the above spectral sequence \eqref{Spec-Seq-Local-FMT-0-bound}, $\Xi^0 \Psi^0 (E) = 0$ and \begin{equation*} \Xi^1 \Psi^0 (E) \hookrightarrow \Pi ^1(E). \end{equation*} By Proposition~\ref{prop:slope-bound-FMT-F-1}--(i), $\Pi^1(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Z}}, D_{\scriptscriptstyle Z}}((-\infty, 0])$. Since we have $ \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Z}}, D_{\scriptscriptstyle Z}}((-\infty, 0]) \subset \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Z}}, D_{\scriptscriptstyle Z}+\frac{1}{\lambda}\ell_{\scriptscriptstyle{Z}}}((-\infty, 0])$, \begin{equation} \label{firstbound} \Xi^1 \Psi^0 (E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Z}}, D_{\scriptscriptstyle Z}+\frac{1}{\lambda}\ell_{\scriptscriptstyle{Z}}}((-\infty, 0]). \end{equation} From the Harder-Narasimhan filtration property, $\Psi^0(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\infty, 0])$ fits into the short exact sequence \begin{equation} \label{mu-ses} 0 \to F \to \Psi^0(E) \to G \to 0 \end{equation} in $\mathop{\rm Coh}\nolimits(Y)$ for some $F \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\frac{1}{ \lambda },0])$ and $G \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\infty , -\frac{1}{ \lambda}])$. Assume $F \ne 0$ for a contradiction. Then we can write $v^{D_{\scriptscriptstyle Y}, \ell_{\scriptscriptstyle{Y}}}(F) = (a_0, \mu a_0, a_2, a_3)$ with \begin{equation*} 0 \ge \mu > - \frac{1}{ \lambda}. \end{equation*} By applying the Fourier-Mukai transform $\widehat{\Psi}$ to short exact sequence \eqref{mu-ses} we have the following exact sequence in $\mathop{\rm Coh}\nolimits(X)$: $$ 0 \to \widehat{\Psi}^1(G) \to \widehat{\Psi}^2(F) \to \widehat{\Psi}^2\Psi^0(E) \to \cdots. $$ By Mukai Spectral Sequence \ref{Spec-Seq-Mukai}, $\widehat{\Psi}^2 \Psi^0(E) \cong \widehat{\Psi}^0 \Psi^1(E)$ and so by Proposition~\ref{prop:slope-bound-FMT-0-g}--(i), it is in $\operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty, 0])$. Also by Proposition~\ref{prop:slope-bound-FMT-F-1}--(i), $\widehat{\Psi}^1(G) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty, 0])$. Therefore, $\widehat{\Psi}^2(F) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty, 0])$. By Proposition~\ref{prop:slope-bound-torsion-sheaf}, $\widehat{\Psi}^3(F) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((0, +\infty])$. Therefore, we have $v_1^{-D_{\scriptscriptstyle X},\ell_{\scriptscriptstyle{X}} }(\widehat{\Psi}(F)) = \ell_{\scriptscriptstyle{X}} ^2 \mathop{\rm ch}\nolimits_1^{-D_{\scriptscriptstyle X}}(\widehat{\Psi}(F)) \le 0$, and so from Theorem \ref{prop:antidiagonal-rep-cohom-FMT} \begin{equation*} a_2 \ge 0. \end{equation*} Since $\Xi^0(F) \hookrightarrow \Xi^0 \Psi^0(E) = 0$, we have $\Xi^0(F) =0$. Moreover, since $$ F \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\frac{1}{ \lambda },0])= \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}-\frac{1}{\lambda}\ell_{\scriptscriptstyle{Y}}}((0, \frac{1}{\lambda}]), $$ from Proposition \ref{prop:FMT-0-g-cohomo-vanishing}--(i) we have \begin{equation*} \Xi^3(F) = 0. \end{equation*} Apply the Fourier-Mukai transform $\Xi$ to short exact sequence \eqref{mu-ses} and consider the long exact sequence of $\mathop{\rm Coh}\nolimits(Z)$-cohomologies: $$ 0 \to \Xi^0(G) \to \Xi^1(F) \to \Xi^1 \Psi^0 (E) \to \cdots. $$ By \eqref{firstbound}, $\Xi^1 \Psi^0 (E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Z}}, D_{\scriptscriptstyle Z}+\frac{1}{\lambda}\ell_{\scriptscriptstyle{Z}}}((-\infty, 0])$, and by Proposition~\ref{prop:slope-bound-FMT-0-g}--(i), $\Xi^0(G) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Z}}, D_{\scriptscriptstyle Z}+\frac{1}{\lambda}\ell_{\scriptscriptstyle{Z}}}((-\infty, 0])$. Therefore, $\Xi^1(F) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Z}}, D_{\scriptscriptstyle Z}+\frac{1}{\lambda}\ell_{\scriptscriptstyle{Z}}}((-\infty, 0])$. By Proposition~\ref{prop:slope-bound-FMT-T-2}, $\Xi^2(F) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Z}}, D_{\scriptscriptstyle Z}+\frac{1}{\lambda}\ell_{\scriptscriptstyle{Z}}}((0,+\infty])$. So \begin{equation} \label{first-v1-bound} v_1^{D_{\scriptscriptstyle Z}+\frac{1}{\lambda}\ell_{\scriptscriptstyle{Z}}, \ell_{\scriptscriptstyle{Z}}} (\Xi(F)) \ge 0. \end{equation} On the other hand, we have \begin{align*} & v^{D_{\scriptscriptstyle Z}+\frac{1}{\lambda}\ell_{\scriptscriptstyle{Z}}, \ell_{\scriptscriptstyle{Z}}} (\Xi(F)) \\ & \quad \quad \quad = \frac{3!}{r''\ell_{\scriptscriptstyle{Y}}^3} \begin{pmatrix} & & & 1 \\ & & -1 & \\ &1 & & \\ -1 & & & \end{pmatrix} \ v^{D_{\scriptscriptstyle Y}-\frac{1}{\lambda}\ell_{\scriptscriptstyle{Y}}, \ell_{\scriptscriptstyle{Y}}}(F) \\ & \quad \quad \quad = \frac{3!}{r''\ell_{\scriptscriptstyle{Y}}^3} \begin{pmatrix} & & & 1 \\ & & -1 & \\ &1 & & \\ -1 & & & \end{pmatrix} \begin{pmatrix} 1 & & & \\ \frac{1}{\lambda} & 1 & & \\ \frac{1}{\lambda^2}&\frac{1}{\lambda} & 1 & \\ \frac{1}{\lambda^3} & \frac{1}{\lambda^2} & \frac{1}{\lambda} & 1 \end{pmatrix} \ v^{D_{\scriptscriptstyle Y}, \ell_{\scriptscriptstyle{Y}}}(F) \\ & \quad \quad \quad = \frac{3!}{r''\ell_{\scriptscriptstyle{Y}}^3} \begin{pmatrix} *& * & * & * \\ -1/\lambda^2 & -1/\lambda & -1 & 0\\ *& * & * & * \\ *& * & * & * \end{pmatrix} \begin{pmatrix} a_0 \\ \mu a_0 \\ a_2\\ a_3 \end{pmatrix} \\ & \quad \quad \quad = \frac{3!}{r''\ell_{\scriptscriptstyle{Y}}^3} \ \left(*, - \frac{a_0}{\lambda}\left( \mu+ \frac{1}{\lambda}\right) - a_2, *, * \right). \end{align*} Here $a_0 >0$, $ \left( \mu+ \frac{1}{ \lambda} \right)>0$, $ a_2 \ge 0$ and so $v_1^{D_{\scriptscriptstyle Z}+\frac{1}{\lambda}\ell_{\scriptscriptstyle{Z}}, \ell_{\scriptscriptstyle{Z}}} (\Xi(F)) < 0$. This contradicts with \eqref{first-v1-bound}. \\ \noindent (ii) \ Let $E \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}^{\mu}([-\lambda,0])$ for some $\lambda \in \mathbb{Q}_{> 0}$. Let $\widetilde{\Psi}:= \Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}^\vee}$. From the co-convergence of the ``Duality'' Spectral Sequence \ref{Spec-Seq-Dual} for $E$ we have $$ \left( \Psi ^3(E) \right)^* \cong \widetilde{\Psi}^0(E^*). $$ We have $E^{*} \in \operatorname{HN}_{\ell_{\scriptscriptstyle{X}} , D_{\scriptscriptstyle X}}^{\mu}([0,\lambda])$. So by Proposition~\ref{prop:FMT-0-g-cohomo-vanishing}--(iii) and part (i), we have $\widetilde{\Psi}^0(E^*)\in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, -D_{\scriptscriptstyle Y}}^{\mu}((-\infty, -\frac{1}{ \lambda }])$. Therefore, $\Psi^3(E) \in \operatorname{HN}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}^{\mu}([\frac{1}{ \lambda }, +\infty])$ as required. \end{proof} \subsection{Images of the first tilted hearts under the FM transforms} Let us recall the first tilting associated to numerical parameters of Theorem \ref{prop:equivalence-hearts-abelian-threefolds} involving the Fourier-Mukai transform $\Psi : D^b(X) \to D^b(Y)$. \begin{nota} The subcategories \begin{align*} & \mathcal{F}^{\scriptscriptstyle X}_1= \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} + \frac{\lambda \ell_{\scriptscriptstyle{X}} }{2}}((-\infty, 0]) = \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} }((-\infty, \frac{\lambda }{2}]), \\ & \mathcal{T}^{\scriptscriptstyle X}_1= \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} + \frac{\lambda \ell_{\scriptscriptstyle{X}} }{2}}((0, +\infty]) = \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} }(( \frac{\lambda }{2} , \infty]) \end{align*} of $\mathop{\rm Coh}\nolimits(X)$ forms a torsion pair, and the corresponding tilted category is $$ \mathcal{B}^{\scriptscriptstyle X} = \langle \mathcal{F}^{\scriptscriptstyle X}_1[1] , \mathcal{T}^{\scriptscriptstyle X}_1 \rangle. $$ Similarly, the subcategories \begin{align*} & \mathcal{F}^{\scriptscriptstyle Y}_1= \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y} - \frac{\ell_{\scriptscriptstyle{Y}}}{2\lambda}}((-\infty, 0]) = \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\infty, - \frac{1}{2\lambda}]), \\ & \mathcal{T}^{\scriptscriptstyle Y}_1= \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y} - \frac{ \ell_{\scriptscriptstyle{Y}}}{2 \lambda}}((0, +\infty]) = \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\frac{1}{2\lambda}, +\infty]) \end{align*} of $\mathop{\rm Coh}\nolimits(Y)$ forms a torsion pair, and the corresponding tilted category is $$ \mathcal{B}^{\scriptscriptstyle Y} = \langle \mathcal{F}^{\scriptscriptstyle Y}_1[1] , \mathcal{T}^{\scriptscriptstyle Y}_1 \rangle. $$ \end{nota} \begin{thm} \label{prop:image-B-under-FMT} We have the following: \begin{itemize} \item[(i)] $\Psi \left(\mathcal{B}^{\scriptscriptstyle X} \right) \subset \langle \mathcal{B}^{\scriptscriptstyle Y} , \mathcal{B}^{\scriptscriptstyle Y}[-1], \mathcal{B}^{\scriptscriptstyle Y}[-2] \rangle$, and \item[(ii)] $\widehat{\Psi}[1] \left(\mathcal{B}^{\scriptscriptstyle Y} \right) \subset \langle \mathcal{B}^{\scriptscriptstyle X} , \mathcal{B}^{\scriptscriptstyle X}[-1], \mathcal{B}^{\scriptscriptstyle X}[-2] \rangle$. \end{itemize} \end{thm} \begin{proof} (i) \ We can visualize $\mathcal{B}^{\scriptscriptstyle X}$ and $\mathcal{B}^{\scriptscriptstyle Y}$ as follows: $$ \begin{tikzpicture}[scale=1.2] \draw[style=dashed] (0.5,0) grid (3.5,1); \fill[lightgray] (1,0) -- (2,0) to[out=25,in=-115] (3,1) -- (2,1) to[out=-115,in=25] (1,0); \draw[style=thick] (1,0) -- (2,0) to[out=25,in=-115] (3,1)-- (2,1) to[out=-115,in=25] (1,0); \draw[style=thick] (2,0) -- (2,1); \draw (1.75,0.25) node {$\scriptscriptstyle B$}; \draw (2.3,0.7) node {$\scriptscriptstyle A$}; \draw (1.5,- 0.3) node {$\scriptstyle{-1}$}; \draw (2.5,-0.3) node {$\scriptstyle{0}$}; \draw (-1,0.5) node {$\mathcal{B}^{\scriptscriptstyle X} = \langle \mathcal{F}^{\scriptscriptstyle X}_1[1] , \mathcal{T}^{\scriptscriptstyle X}_1 \rangle : $}; \draw (5.5,0.8) node {$\scriptstyle A \in \mathcal{T}^{\scriptscriptstyle X}_1 = \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} }(( \frac{\lambda }{2} , +\infty])$ }; \draw (5.5,0.2) node {$\scriptstyle B \in \mathcal{F}^{\scriptscriptstyle X}_1 = \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} }(( -\infty, \frac{\lambda }{2}])$ }; \end{tikzpicture} $$ $$ \begin{tikzpicture}[scale=1.2] \draw[style=dashed] (0.5,0) grid (3.5,1); \fill[lightgray] (1,0) -- (2,0) to[out=65,in=-155] (3,1) -- (2,1) to[out=-155,in=65] (1,0); \draw[style=thick] (1,0) -- (2,0) to[out=65,in=-155] (3,1)-- (2,1) to[out=-155,in=65] (1,0); \draw[style=thick] (2,0) -- (2,1); \draw (1.6,0.4) node {$\scriptscriptstyle D$}; \draw (2.25,0.75) node {$\scriptscriptstyle C$}; \draw (1.5,- 0.3) node {$\scriptstyle{-1}$}; \draw (2.5,-0.3) node {$\scriptstyle{0}$}; \draw (-1.2,0.5) node {$\mathcal{B}^{\scriptscriptstyle Y} = \langle \mathcal{F}^{\scriptscriptstyle Y}_1[1] , \mathcal{T}^{\scriptscriptstyle Y}_1 \rangle : $}; \draw (5.5,0.8) node {$\scriptstyle C \in \mathcal{T}^{\scriptscriptstyle Y}_1= \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\frac{1}{2\lambda}, +\infty])$ }; \draw (5.5,0.2) node {$\scriptstyle D \in \mathcal{F}^{\scriptscriptstyle Y}_1= \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\infty, - \frac{1}{2\lambda}])$ }; \end{tikzpicture} $$ If $E \in \mathcal{F}^{\scriptscriptstyle X}_1 = \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} }(( -\infty, \frac{\lambda }{2}])$ then by Propositions \ref{prop:FMT-0-g-cohomo-vanishing}--(iii) and \ref{prop:special-slope-bound-FMT-0-3}--(i), $\Psi^0(E) \in \mathcal{F}^{\scriptscriptstyle Y}_1$. Also by Proposition \ref{prop:slope-bound-torsion-sheaf}, $\Psi^3(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((0, +\infty]) \subset \mathcal{T}^{\scriptscriptstyle Y}_1$. Therefore, $\Psi(E)$ has $\mathcal{B}^{\scriptscriptstyle Y}$-cohomologies in 1,2,3 positions. That is $$ \Psi\left(\mathcal{F}^{\scriptscriptstyle X}_1 [1]\right) \subset \langle \mathcal{B}^{\scriptscriptstyle Y}, \mathcal{B}^{\scriptscriptstyle Y}[-1], \mathcal{B}^{\scriptscriptstyle Y}[-2] \rangle. $$ $$ \begin{tikzpicture}[scale=1.2] \draw[style=dashed] (0.5,-2) grid (6.5,-1); \fill[lightgray] (1,-2) -- (4,-2) to[out=65,in=-155] (5,-1) -- (2,-1) to[out=-155,in=65] (1,-2); \draw[style=thick] (1,-2) -- (4,-2) to[out=65,in=-155] (5,-1) -- (2,-1) to[out=-155,in=65] (1,-2); \draw[style=dashed] (2,-2) -- (2,-1) ; \draw[style=dashed] (3,-2) -- (3,-1) ; \draw[style=dashed] (4,-2) -- (4,-1) ; \draw (1.6,-1.7) node {$\scriptscriptstyle \Psi^0(B) $}; \draw (2.5,-1.5) node {$\scriptscriptstyle \Psi^1(B) $}; \draw (3.5,-1.5) node {$\scriptscriptstyle \Psi^2(B)$}; \draw (4.32,-1.2) node {$\scriptscriptstyle \Psi^3(B)$}; \draw (1.5,- 2.3) node {$\scriptscriptstyle{-1}$}; \draw (2.5,-2.3) node {$\scriptscriptstyle{0}$}; \draw (3.5,- 2.3) node {$\scriptscriptstyle{1}$}; \draw (4.5,-2.3) node {$\scriptscriptstyle{2}$}; \draw (5.5,-2.3) node {$\scriptscriptstyle{3}$}; \draw[style=dashed] (-3 ,-2) grid (-1,-1); \fill[lightgray] (-3,-2) -- (-2,-2) -- (-2,-1) to[out=-115,in=25] (-3,-2); \draw[style=thick] (-3,-2) -- (-2, -2) to[out=25,in=-115] (-1, -1) -- (-2, -1) to[out=-115,in=25] (-3, -2); \draw[style=thick] (-2,-2) -- (-2, -1); \draw (-2.25,-1.75) node {$\scriptscriptstyle B$}; \draw (-2.5,- 2.3) node {$\scriptscriptstyle{-1}$}; \draw (-1.5,-2.3) node {$\scriptscriptstyle{0}$}; \draw (-3.2,-2.1) to [out=120,in=240] (-3.2,-0.9); \draw (-0.8,-2.1) to [out=60,in=300] (-0.8,-0.9); \draw (-3.7,-1.5) node {$\Psi$}; \draw (0,-1.5) node {$=$}; \end{tikzpicture} $$ On the other hand, if $\mathcal{T}^{\scriptscriptstyle X}_1 = \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} }(( \frac{\lambda }{2} , +\infty])$ then by Proposition~\ref{prop:FMT-0-g-cohomo-vanishing}--(i), $\Psi^3(E) =0$, and by Proposition \ref{prop:slope-bound-FMT-T-2}, $\Psi^2 (E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((0, +\infty]) \subset \mathcal{T}^{\scriptscriptstyle Y}_1$. So $\Psi(E)$ has $\mathcal{B}^{\scriptscriptstyle Y}$-cohomologies in positions 0,1,2 only. That is $$ \Psi\left(\mathcal{T}^{\scriptscriptstyle X}_1 \right) \subset \langle \mathcal{B}^{\scriptscriptstyle Y}, \mathcal{B}^{\scriptscriptstyle Y}[-1], \mathcal{B}^{\scriptscriptstyle Y}[-2] \rangle. $$ $$ \begin{tikzpicture}[scale=1.2] \draw[style=dashed] (0.5,0) grid (6.5,1); \fill[lightgray] (2,0) -- (4,0)to[out=65,in=-155] (5,1) -- (2,1) --(2,0); \draw[style=thick] (2,0) -- (4,0) to[out=65,in=-155] (5,1) -- (2,1) -- (2,0); \draw[style=dashed] (3,0) -- (3,1) ; \draw[style=dashed] (4,0) -- (4,1) ; \draw (2.5,0.5) node {$\scriptscriptstyle \Psi^0(A)$}; \draw (3.5,0.5) node {$\scriptscriptstyle \Psi^1(A)$}; \draw (4.32,0.8) node {$\scriptscriptstyle \Psi^2(A)$}; \draw (1.5,- 0.3) node {$\scriptscriptstyle{-1}$}; \draw (2.5,-0.3) node {$\scriptscriptstyle{0}$}; \draw (3.5,- 0.3) node {$\scriptscriptstyle{1}$}; \draw (4.5,-0.3) node {$\scriptscriptstyle{2}$}; \draw (5.5,-0.3) node {$\scriptscriptstyle{3}$}; \draw[style=dashed] (-3 ,0) grid (-1,1); \fill[lightgray] (-2,0) to[out=25,in=-115] (-1,1) -- (-2,1) -- (-2,0); \draw[style=thick] (-3,0) -- (-2, 0) to[out=25,in=-115] (-1, 1) -- (-2, 1) to[out=-115,in=25] (-3, 0); \draw[style=thick] (-2,0) -- (-2, 1); \draw (-1.7,0.7) node {$\scriptscriptstyle A$}; \draw (-2.5,- 0.3) node {$\scriptscriptstyle{-1}$}; \draw (-1.5,-0.3) node {$\scriptscriptstyle{0}$}; \draw (-3.2,-0.1) to [out=120,in=240] (-3.2,1.1); \draw (-0.8,-0.1) to [out=60,in=300] (-0.8,1.1); \draw (-3.7,0.5) node {$\Psi$}; \draw (0,0.5) node {$=$}; \end{tikzpicture} $$ Hence, $\Psi\left(\mathcal{B}^{\scriptscriptstyle X} \right) \subset \langle \mathcal{B}^{\scriptscriptstyle Y}, \mathcal{B}^{\scriptscriptstyle Y}[-1], \mathcal{B}^{\scriptscriptstyle Y}[-2] \rangle $ as $\mathcal{B}^{\scriptscriptstyle X} = \langle \mathcal{F}^{\scriptscriptstyle X}_1 , \mathcal{T}^{\scriptscriptstyle X}_1 \rangle$. \\ \noindent (ii) \ We can use Propositions \ref{prop:FMT-0-g-cohomo-vanishing}--(iii), \ref{prop:slope-bound-FMT-0-g}--(i), \ref{prop:FMT-0-g-cohomo-vanishing}--(i) and \ref{prop:special-slope-bound-FMT-0-3}--(ii) in a similar way to the above proof. \end{proof} \section{Some Stable Reflexive Sheaves on Abelian Threefolds} \label{sec:special-stable-reflexive-sheaves-abelain-threefolds} In this section we shall consider slope semistable sheaves with vanishing first and second parts of the twisted Chern characters. Such sheaves arise as the $\mathop{\rm Coh}\nolimits(X)$-cohomology of some of the tilt-stable objects on $X$; see Proposition \ref{prop:trivial-discriminant-tilt-stable-objects}. \begin{nota} Let $X$ be an abelian threefold. Let $\mathcal{P}$ be the Poincar\'e bundle on $X \times \widehat{X}$. We simply write \begin{align*} &\Phi = \Phi^{\scriptscriptstyle X \to \scriptscriptstyle \widehat{X} }_{\mathcal{P}}: D^b(X) \to D^b(\widehat{X}), \\ & \widehat{\Phi} = \Phi^{\scriptscriptstyle \widehat{X} \to \scriptscriptstyle X}_{\mathcal{P}^\vee}: D^b(\widehat{X}) \to D^b(X). \end{align*} Let $\ell_{\scriptscriptstyle{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$ and $ \ell_{\scriptscriptstyle \widehat{X}} \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(\widehat{X})$ be the ample classes as in Lemma \ref{classicalcohomoFMT} (or equivalently Theorem \ref{prop:general-cohomo-FMT}). \end{nota} First we prove the following: \begin{lem} \label{prop:trivial-discriminant-reflexive-sheaves} Let $E$ be a slope semistable sheaf on $X$ with respect to $\ell_{\scriptscriptstyle{X}} $ such that $\mathop{\rm ch}\nolimits_k(E)=0$ for $k=1,2$. Then $E^{**}$ is a homogeneous bundle, that is $E^{**}$ is filtered with quotients from $\mathop{\rm Pic}\nolimits^0(X)$. \end{lem} \begin{proof} Any torsion free sheaf $E$ fits into the short exact sequence $0 \to E \to E^{**} \to Q \to 0$ in $\mathop{\rm Coh}\nolimits(X)$ for some $Q \in \mathop{\rm Coh}\nolimits_{\le 1}(X)$. If $\mathop{\rm ch}\nolimits_k(E)=0$ for $k=1,2$ then $\ell_{\scriptscriptstyle{X}} \mathop{\rm ch}\nolimits_2(E^{**}) \ge 0$ where the equality holds when $Q \in \mathop{\rm Coh}\nolimits_0(X)$. If $E$ is slope semistable then $E^{**}$ is also slope semistable, and so by the usual Bogomolov-Gieseker inequality $\ell_{\scriptscriptstyle{X}} \mathop{\rm ch}\nolimits_2(E^{**}) = 0$. Hence, $\ell_{\scriptscriptstyle{X}} \mathop{\rm ch}\nolimits_2(Q) =0$, and so $\mathop{\rm ch}\nolimits_2(Q) =0$; that is $\mathop{\rm ch}\nolimits_2(E^{**}) =0$. Assume the opposite for a contradiction. Then there exists a semistable reflexive sheaf $E$ with $\mathop{\rm ch}\nolimits_k(E) = 0$ for $k=1,2$, and $H^k(X, E \otimes \mathcal{P}_{X \times \{\widehat{x}\}}) = 0$ for $k=0,3$ and any $\widehat{x} \in \widehat{X}$. So we have $\Phi^0(E) = \Phi^3(E) =0$. By a result of Simpson (see Lemma~\ref{prop:Simpson-result-trivial-disciminant}), we have $\mathop{\rm ch}\nolimits_3(E) = 0$. Therefore, $\mathop{\rm ch}\nolimits(E) = (r, 0 , 0, 0)$ for some positive integer $r$. By Proposition~\ref{prop:slope-bound-FMT-F-1}, $\Phi^1(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle \widehat{X}} , 0}((-\infty, 0])$, and $\Phi^2(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle \widehat{X}} , 0}([0,+\infty])$. So we have $\ell_{\scriptscriptstyle \widehat{X}} ^2 \mathop{\rm ch}\nolimits_1(\Phi^1(E)) \le 0$ and $ \ell_{\scriptscriptstyle \widehat{X}} ^2 \mathop{\rm ch}\nolimits_1(\Phi^2(E)) \ge 0$. Therefore, $\ell_{\scriptscriptstyle \widehat{X}} ^2 \mathop{\rm ch}\nolimits_1(\Phi(E)) \ge 0$. Moreover, since $\mathop{\rm ch}\nolimits_2(E) =0$, from Theorem \ref{prop:antidiagonal-rep-cohom-FMT}, we obtain $ \ell_{\scriptscriptstyle \widehat{X}} ^2 \mathop{\rm ch}\nolimits_1(\Phi(E))= 0$. Hence, $\ell_{\scriptscriptstyle \widehat{X}} ^2 \mathop{\rm ch}\nolimits_1(\Phi^1(E)) =\ell_{\scriptscriptstyle \widehat{X}} ^2 \mathop{\rm ch}\nolimits_1(\Phi^2(E)) =0$. So we have \begin{align*} \mathop{\rm ch}\nolimits(\Phi^1(E)) = (a, D , -C, d), \ \ \mathop{\rm ch}\nolimits(\Phi^2(E)) = (a, D, -C, -r+d), \end{align*} for some $a>0$, $D \in \mathop{\rm NS}\nolimits(\widehat{X})$, $C \in H^4_{\operatorname{\scriptstyle{alg}}}(\widehat{X},\mathbb{Q})$ such that $\ell_{\scriptscriptstyle \widehat{X}} ^2 D=0$ and $\ell_{\scriptscriptstyle \widehat{X}} C \ge 0$. Moreover, we have $\Phi^1(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle \widehat{X}} , 0}(0)$. If $\widehat{\Phi}^3 \Phi^1(E) \ne 0$ then $\Phi^1(E)$ fits into a short exact sequence $0 \to K_1 \to \Phi^1(E) \to \mathcal{P}_{\{x_1\}\times \widehat{X}} \mathcal{I}_{C_1} \to 0$ in $\mathop{\rm Coh}\nolimits(\widehat{X})$ for some $x_1 \in X$ and $C_1 \in H_2(\widehat{X}, \mathbb{Z})$. Then $K_1 \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle \widehat{X}} , 0}(0)$ and we have the following exact sequence $$ \cdots \to \widehat{\Phi}^3(K_1) \to \widehat{\Phi}^3 \Phi^1(E) \to \mathcal{O}_{x_1} \to 0 $$ in $\mathop{\rm Coh}\nolimits(X)$. If $\widehat{\Phi}^3(K_1) \ne 0$ then $K_1$ fits into a short exact sequence $0 \to K_2 \to K_1 \to \mathcal{P}_{\{x_2\}\times \widehat{X}} \mathcal{I}_{C_2} \to 0$ in $\mathop{\rm Coh}\nolimits(\widehat{X})$. Then $K_2 \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle \widehat{X}} , 0}(0)$ and we have the following exact sequence $$ \cdots \to\widehat{\Phi}^3(K_2) \to \widehat{\Phi}^3(K_1) \to \mathcal{O}_{x_2} \to 0 $$ in $\mathop{\rm Coh}\nolimits(X)$. We can continue this process for only a finite number of steps since $\mathop{\rm rk}\nolimits (\Phi^1(E)) < +\infty$, and hence $\widehat{\Phi}^3 \Phi^1(E) $ is filtered by skyscraper sheaves. Moreover, from the convergence of Mukai Spectral Sequence~\ref{Spec-Seq-Mukai} for $E$, we have the short exact sequence $$ 0 \to \widehat{\Phi}^0 \Phi^2(E) \to \widehat{\Phi}^2 \Phi^1(E) \to Q \to 0 $$ in $\mathop{\rm Coh}\nolimits(X)$, where $Q$ is a subsheaf of $E$ and so $Q \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , 0}((-\infty, 0])$. By Proposition~\ref{prop:slope-bound-FMT-0-g}--(i), $\widehat{\Phi}^0 \Phi^2(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , 0}((-\infty, 0])$. This implies $\widehat{\Phi}^2 \Phi^1(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , 0}((-\infty, 0])$. Therefore, we have $\ell_{\scriptscriptstyle{X}} ^2 \mathop{\rm ch}\nolimits_1(\widehat{\Phi}(\Phi^1(E))) \le 0$, and so $\ell_{\scriptscriptstyle \widehat{X}} C \le 0$. Hence, $\ell_{\scriptscriptstyle \widehat{X}} C = 0$. By Proposition~\ref{prop:FMT-F-1-reflexivity}, $\Phi^1(E)$ is a reflexive sheaf and since $\Phi^1(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle \widehat{X}} , 0}(0)$ it is slope semistable. So by Lemma~\ref{prop:Simpson-result-trivial-disciminant}, we have $D=0$, $C=0$ and $d= \mathop{\rm ch}\nolimits_3(\Phi^1(E)) =0$. Therefore, $\mathop{\rm ch}\nolimits(\widehat{\Phi}(\Phi^1(E))) = (0,0,0,-a)$. Since $\widehat{\Phi}^3 \Phi^1(E) \in \mathop{\rm Coh}\nolimits_0(X)$, we have $\mathop{\rm ch}\nolimits_k(\widehat{\Phi}^2 \Phi^1(E)) = 0$ for $k=0,1,2$. So $\widehat{\Phi}^2 \Phi^1(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , 0}((0, +\infty])$. Therefore, $\widehat{\Phi}^2 \Phi^1(E) = 0$ and we have the short exact sequence $$ 0 \to E \to \widehat{\Phi}^1 \Phi^2 (E) \to \widehat{\Phi}^3 \Phi^1(E) \to 0 $$ in $\mathop{\rm Coh}\nolimits(X)$. Since $\widehat{\Phi}^3 \Phi^1(E) \in \mathop{\rm Coh}\nolimits_0(X)$ and $E$ is locally free, $\mathop{\rm Ext}\nolimits^1_{\scriptscriptstyle X}(\widehat{\Phi}^3 \Phi^1(E), E) = 0$. Therefore, $\widehat{\Phi}^1 \Phi^2(E) \cong E \oplus \widehat{\Phi}^3 \Phi^1(E)$. Since $\widehat{\Phi}^1 \Phi^2(E) \in V^{\Phi}_{\mathop{\rm Coh}\nolimits(Y)}(2)$, we have $\widehat{\Phi}^3 \Phi^1(E) = 0$ and so $E \in V^{\Phi}_{\mathop{\rm Coh}\nolimits(Y)}(2)$. Therefore, $\mathop{\rm ch}\nolimits(\Phi^2(E)) = (0,0,0,-r)$. But it is not possible to have $-r > 0$ and this is the required contradiction to complete the proof. \end{proof} Then we show the following. \begin{thm} \label{prop4.15} Let $E$ be a slope stable torsion free sheaf of rank $r$ with $\mathop{\rm ch}\nolimits^{B}_k(E) = 0$, $k=1,2$ for some $B \in \mathop{\rm NS}\nolimits_{\mathbb{Q}}(X)$. Then $E^{**}$ is a slope stable semihomogeneous bundle with $\mathop{\rm ch}\nolimits(E^{**}) = r e^B$. \end{thm} \begin{proof} The slope stable torsion free sheaf $E$ fits into the short exact sequence $ 0 \to E \to E^{**} \to T \to 0 $ for some $T \in \mathop{\rm Coh}\nolimits_{\le 1}(X)$. Now $E^{**}$ is also slope stable and so by the usual Bogomolov-Gieseker inequality $\mathop{\rm ch}\nolimits_k^{B}(E^{**}) = 0$ for $k=1,2$. By Lemma \ref{prop:trivial-discriminant-reflexive-sheaves}, $\operatorname{\mathcal{E}\textit{nd}}(E^{**})$ is a homogeneous bundle. Therefore, by Lemma \ref{prop:Mukai-semihomognoeus-properties}, $E^{**}$ is a slope stable semihomogeneous bundle, and so from Lemma \ref{prop:semihomo-numerical}--(i) $\mathop{\rm ch}\nolimits(E^{**})= r e^B$. \end{proof} \section{Equivalences of Stability Condition Hearts on Abelian Threefolds} \label{sec:FMT-tilt-stability} Let us recall the second tilting associated to numerical parameters of Theorem \ref{prop:equivalence-hearts-abelian-threefolds} involving the Fourier-Mukai transform $\Psi : D^b(X) \to D^b(Y)$. \begin{nota} The subcategories \begin{align*} & \mathcal{F}^{\scriptscriptstyle X}_2= \operatorname{HN}^{\nu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} + \frac{\lambda \ell_{\scriptscriptstyle{X}} }{2}, \frac{\lambda }{2}}((-\infty, 0]) , \\ & \mathcal{T}^{\scriptscriptstyle X}_2= \operatorname{HN}^{\nu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} + \frac{\lambda \ell_{\scriptscriptstyle{X}} }{2} , \frac{\lambda }{2}}((0, +\infty]) \end{align*} of $\mathcal{B}^{\scriptscriptstyle X}$ forms a torsion pair, and the corresponding tilt is $$ \mathcal{A}^{\scriptscriptstyle X} = \langle \mathcal{F}^{\scriptscriptstyle X}_2[1] , \mathcal{T}^{\scriptscriptstyle X}_2 \rangle. $$ Similarly, \begin{align*} & \mathcal{F}^{\scriptscriptstyle Y}_2= \operatorname{HN}^{\nu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y} - \frac{\ell_{\scriptscriptstyle{Y}}}{2\lambda}, \frac{1}{2\lambda}}((-\infty, 0]), \\ & \mathcal{T}^{\scriptscriptstyle Y}_2= \operatorname{HN}^{\nu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y} - \frac{ \ell_{\scriptscriptstyle{Y}}}{2 \lambda}, \frac{1}{2\lambda}}((0, +\infty]) \end{align*} defines a torsion pair of $\mathcal{B}^{\scriptscriptstyle Y}$, and the corresponding tilt is $$ \mathcal{A}^{\scriptscriptstyle Y} = \langle \mathcal{F}^{\scriptscriptstyle Y}_2[1] , \mathcal{T}^{\scriptscriptstyle Y}_2 \rangle. $$ Let us write the complexified ample classes by \begin{align*} &\Omega = \left( -D_{\scriptscriptstyle X} + \lambda \ell_{\scriptscriptstyle{X}} /2 \right) + i \sqrt{3} \lambda \ell_{\scriptscriptstyle{X}} /2, \\ & \Omega' = \left( D_{\scriptscriptstyle Y} - \ell_{\scriptscriptstyle{Y}}/(2 \lambda) \right) + i \sqrt{3} \ell_{\scriptscriptstyle{Y}}/(2 \lambda). \end{align*} We write the corresponding central charge functions simply by \begin{align*} & Z_{\Omega} = Z_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X} + \frac{\lambda \ell_{\scriptscriptstyle{X}} }{2}, \frac{\lambda }{2}}, \\ & Z_{\Omega'} = Z_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y} - \frac{ \ell_{\scriptscriptstyle{Y}}}{2 \lambda}, \frac{1}{2\lambda}}. \end{align*} \end{nota} It will be convenient to abbreviate the Fourier-Mukai transforms $\Psi$ and $\widehat{\Psi}[1]$ by $\Gamma$ and $\widehat{\Gamma}$ respectively. That is, \begin{align*} & \Gamma := \Psi = \Phi^{\scriptscriptstyle X \to \scriptscriptstyle Y}_{\mathcal{E}} : D^b(X) \to D^b(Y), \\ & \widehat{\Gamma} := \Psi [1]= \Phi^{\scriptscriptstyle Y \to \scriptscriptstyle X}_{\mathcal{E}^\vee} [1] : D^b(Y) \to D^b(X). \end{align*} Then by Theorem~\ref{prop:image-B-under-FMT}, the images of an object from $\mathcal{B}^{\scriptscriptstyle X}$ (and $\mathcal{B}^{\scriptscriptstyle Y}$) under $\Gamma$ (and $\widehat{\Gamma}$) are complexes whose cohomologies with respect to $\mathcal{B}^{\scriptscriptstyle Y}$ (and $\mathcal{B}^{\scriptscriptstyle X}$) can only be non-zero in $0,1,2$ positions. \begin{nota} In the rest of the paper we write \begin{align*} & \Gamma^i_{\mathcal{B}} (-) = H^{i}_{\mathcal{B}^{\scriptscriptstyle Y}}(\Gamma(-)),\\ & \widehat{\Gamma}^i_{\mathcal{B}} (-) = H^{i}_{\mathcal{B}^{\scriptscriptstyle X}}(\widehat{\Gamma}(-)). \end{align*} \end{nota} We have $\Gamma \circ \widehat{\Gamma} \cong [-2]$ and $\widehat{\Gamma} \circ \Gamma \cong [-2]$. This gives us the following convergence of spectral sequences. \begin{specseq} \label{Spec-Seq-B} \begin{enumerate}[label=(\arabic*)] \item[] \item $E_2^{p,q} = \widehat{\Gamma}^p_{\mathcal{B}} \Gamma^q_{\mathcal{B}} (E) \Longrightarrow H^{p+q-2}_{\mathcal{B}^{\scriptscriptstyle X}} (E)$, and \item$ E_2^{p,q} = \Gamma^p_{\mathcal{B}} \widehat{\Gamma}^q_{\mathcal{B}} (E) \Longrightarrow H^{p+q-2}_{\mathcal{B}^{\scriptscriptstyle Y}} (E)$. \end{enumerate} \end{specseq} Such convergence of the spectral sequences for $E \in \mathcal{B}^{\scriptscriptstyle X}$ and $E \in \mathcal{B}^{\scriptscriptstyle Y}$ behave in the same way as the convergence of the Mukai Spectral Sequence \ref{Spec-Seq-Mukai} for coherent sheaves on an abelian surface. The following diagram describes the convergence of Spectral Sequence~\ref{Spec-Seq-B}--(1) for $E \in \mathcal{B}^{\scriptscriptstyle X}$. \begin{center} \begin{tikzpicture}[scale=1.80] \draw[gray,very thin] (0,0) grid (3,3); \draw[->,thick] (2.75,0.25) -- (3.5, 0.25) node[above] {$p$}; \draw[->,thick] (0.25,2.75) -- (0.25,3.5) node[left] {$q$}; \draw (2.5,0.5) node(a) {$ \widehat{\Gamma}^2_{\mathcal{B}} \Gamma^0_{\mathcal{B}}(E)$}; \draw (0.5,1.5) node(b) {$\widehat{\Gamma}^0_{\mathcal{B}} \Gamma^1_{\mathcal{B}}(E)$}; \draw[>->,thick] (b) -- node[above] {$ $} (a); \draw (2.5,1.5) node(d) {$\widehat{\Gamma}^2_{\mathcal{B}} \Gamma^1_{\mathcal{B}}(E)$}; \draw (0.5,2.5) node(c) {$\widehat{\Gamma}^0_{\mathcal{B}} \Gamma^2_{\mathcal{B}}(E)$}; \draw[->>,thick] (c) -- node[above] {$ $} (d); \draw (1.5,1.5) node {$\widehat{\Gamma}^1_{\mathcal{B}} \Gamma^1_{\mathcal{B}}(E)$}; \end{tikzpicture} \end{center} \begin{prop} \label{prop:FMT-B-cat-bounds-Imginary-Z} We have the following: \begin{enumerate}[label=(\arabic*)] \item For $ E \in \mathcal{T}_{2}^{\scriptscriptstyle Y}$, (i) $\mathcal{H}^0(\widehat{\Gamma}^2_{\mathcal{B}}(E)) = 0$, and (ii) if $\widehat{\Gamma}^2_{\mathcal{B}}(E) \ne 0$ then $\operatorname{Im} Z_{\Omega}(\widehat{\Gamma}^2_{\mathcal{B}}(E)) > 0$. \item For $E \in \mathcal{F}_{2}^{\scriptscriptstyle Y}$, (i) $ \mathcal{H}^{-1}(\widehat{\Gamma}^0_{\mathcal{B}}(E)) = 0$, and (ii) if $\widehat{\Gamma}^0_{\mathcal{B}}(E) \ne 0$ then $\operatorname{Im} Z_{\Omega}(\widehat{\Gamma}^0_{\mathcal{B}}(E)) < 0$. \item For $E \in \mathcal{T}_{2}^{\scriptscriptstyle X}$, (i) $ \mathcal{H}^0(\Gamma^2_{\mathcal{B}}(E)) = 0$, and (ii) if $ \Gamma^2_{\mathcal{B}}(E) \ne 0$ then $\operatorname{Im} Z_{\Omega'}(\Gamma^2_{\mathcal{B}}(E)) > 0$. \item For $E \in \mathcal{F}_{2}^{\scriptscriptstyle X}$, (i) $\mathcal{H}^{-1} (\Gamma^0_{\mathcal{B}}(E)) = 0$, and (ii) if $\Gamma^0_{\mathcal{B}}(E) \ne 0$ then $\operatorname{Im} Z_{\Omega'}(\Gamma^0_{\mathcal{B}}(E)) < 0$. \end{enumerate} \end{prop} \begin{proof} (1) \ Let $E \in \mathcal{T}_{2}^{\scriptscriptstyle Y}$. \\ \noindent (i) \ For any $x \in X$, \begin{align*} \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X} ( \widehat{\Gamma}^2_{\mathcal{B}}(E) , \mathcal{O}_x ) & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X} ( \widehat{\Gamma}^2_{\mathcal{B}}(E) , \widehat{\Gamma}^2_{\mathcal{B}}(\mathcal{E}_{\{x\} \times Y} ) ) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X} ( \widehat{\Gamma}(E) , \widehat{\Gamma} (\mathcal{E}_{\{x\} \times Y} ) ) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X} ( E, \mathcal{E}_{\{x\} \times Y} ) = 0, \end{align*} since $E \in \mathcal{T}^{\scriptscriptstyle Y}$ and $ \mathcal{E}_{\{x\} \times Y} \in \mathcal{F}^{\scriptscriptstyle Y}$. Therefore, $\mathcal{H}^0(\widehat{\Gamma}^2_{\mathcal{B}}(E)) = 0$ as required. \\ \noindent (ii) \ From (1)(i), we have $\widehat{\Gamma}^2_{\mathcal{B}}(E) \cong A[1]$ for some $0 \neq A \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty, \frac{\lambda}{2}])$. Consider the convergence of the spectral sequence: $$ E^{p,q}_2=\widehat{\Gamma}^{p} (\mathcal{H}^{q} (E)) \Longrightarrow \widehat{\Gamma}^{p+q} (E) $$ for $E$. By Proposition~\ref{prop:slope-bounds}--(2), we have $\mathcal{H}^0(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((0, +\infty])$ and so by Propositions \ref{prop:slope-bound-FMT-T-2} and \ref{prop:slope-bound-torsion-sheaf}, $$ \widehat{\Psi}^2 (\mathcal{H}^0(E)), \widehat{\Psi}^3 (\mathcal{H}^{-1}(E)) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((0,+\infty]). $$ Therefore, from the convergence of the above spectral sequence for $E$, we have $$ A \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty, \frac{\lambda}{2}]) \cap \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((0,+\infty]) = \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((0, \frac{\lambda}{2}]). $$ Let $v^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} } (A)= (a_0, a_1, a_2, a_3)$. From the usual Bogomolov-Gieseker inequalities for all the Harder-Narasimhan semistable factors of $A$ we have $\frac{\lambda}{2}a_1-a_2 \ge 0$ and so by Proposition \ref{prop:imgainary-part-central-charge}--(1), $$ \operatorname{Im} Z_{\Omega}(\widehat{\Gamma}^2_{\mathcal{B}}(E)) = \operatorname{Im} Z_{\Omega}(A[1]) = \frac{ \sqrt{3} \lambda}{4}(\lambda a_1-a_2) > 0 $$ as required. \\ \noindent (2) \ Let $E \in \mathcal{F}^{\scriptscriptstyle Y}_2$. \\ \noindent (i) \ For any $x \in X$ we have \begin{align*} \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X} ( \widehat{\Gamma}^0_{\mathcal{B}}(E) , \mathcal{O}_x[1] ) & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y} (\Gamma \widehat{\Gamma}^0_{\mathcal{B}}(E) , \Gamma (\mathcal{O}_x[1]))\\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y} ( \Gamma^2_{\mathcal{B}} \widehat{\Gamma}^0_{\mathcal{B}}(E)[-2] , \mathcal{E}_{\{x\} \times Y}[1] ) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y} ( \Gamma^2_{\mathcal{B}} \widehat{\Gamma}^0_{\mathcal{B}}(E) , \mathcal{E}_{\{x\} \times Y}[3] ) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y} ( \mathcal{E}_{\{x\} \times Y} , \Gamma^2_{\mathcal{B}} \widehat{\Gamma}^0_{\mathcal{B}}(E) )^\vee . \end{align*} From the convergence of the Spectral Sequence~\ref{Spec-Seq-B} for $E$, we have the short exact sequence $$ 0 \to \Gamma^0_{\mathcal{B}} \widehat{\Gamma}^1_{\mathcal{B}}(E) \to \Gamma^2_{\mathcal{B}} \widehat{\Gamma}^0_{\mathcal{B}}(E) \to F \to 0 $$ in $\mathcal{B}^{\scriptscriptstyle Y}$, where $F$ is a subobject of $E$ and so $F \in \mathcal{F}^{\scriptscriptstyle Y}$. Moreover, by the Harder-Narasimhan filtration, $F$ fits into the following short exact sequence in $\mathcal{B}^{\scriptscriptstyle Y}$: $$ 0 \to F_0 \to F \to F_1 \to 0, $$ where $F_0 \in \operatorname{HN}^{\nu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}- \frac{1}{2 \lambda}\ell_{\scriptscriptstyle{Y}}, \frac{1}{2 \lambda}}(0)$ and $F_1 \in \operatorname{HN}^{\nu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}- \frac{1}{2 \lambda} \ell_{\scriptscriptstyle{Y}}, \frac{1}{2 \lambda}}((-\infty, 0))$. Since $\mathcal{E}_{\{x\} \times Y} \in \operatorname{HN}^{\nu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}- \frac{1}{2 \lambda}, \frac{1}{2 \lambda}}(0)$, $$ \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y} (\mathcal{E}_{\{x\} \times Y} , F_1) =0. $$ Moreover, $F_0$ fits into a filtration with quotients of $\nu_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}- \frac{1}{2 \lambda}\ell_{\scriptscriptstyle{Y}}, \frac{1}{2 \lambda}}$-stable objects $F_{0,i}$ with $\nu_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}- \frac{1}{2 \lambda}\ell_{\scriptscriptstyle{Y}}, \frac{1}{2 \lambda}}(F_{0,i}) = 0$. By Proposition~\ref{prop:reduction-BG-ineq-class}, each $F_{0,i}$ fits into a non-splitting short exact sequence $$ 0 \to F_{0,i} \to M_i \to T_i \to 0 $$ in $\mathcal{B}^{\scriptscriptstyle Y}$ for some $T_i \in \mathop{\rm Coh}\nolimits_0(Y)$ such that $M_i[1] \in \mathcal{A}^{\scriptscriptstyle Y}$ is a minimal object. Moreover, $\mathcal{E}_{\{x\} \times Y}[1] \in \mathcal{A}^{\scriptscriptstyle Y}$ is a minimal object. So finitely many $x \in X$ we can have $\mathcal{E}_{\{x\} \times Y} \cong M_i$ for some $i$. So for generic $x \in X$, $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\mathcal{E}_{\{x\} \times Y}, M_i) = 0$ and so $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\mathcal{E}_{\{x\} \times Y} , F_{0,i}) = 0$ which implies $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\mathcal{E}_{\{x\} \times Y} , F_0) = 0$. Therefore, for generic $x \in X$, $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y} ( \mathcal{E}_{\{x\} \times Y} , F ) = 0$. On the other hand, \begin{align*} \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y} ( \mathcal{E}_{\{x\} \times Y} , \Gamma^0_{\mathcal{B}} \widehat{\Gamma}^1_{\mathcal{B}} (E)) & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y} ( \Gamma^0_{\mathcal{B}} (\mathcal{O}_x) , \Gamma^0_{\mathcal{B}} \widehat{\Gamma}^1_{\mathcal{B}} (E) )\\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y} ( \Gamma (\mathcal{O}_x) , \Gamma \widehat{\Gamma}^1_{\mathcal{B}} (E) ) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X} (\mathcal{O}_x , \widehat{\Gamma}^1_{\mathcal{B}} (E) ). \end{align*} Here $\widehat{\Gamma}^1_{\mathcal{B}} (E)$ fits into the short exact sequence $$ 0 \to \mathcal{H}^{-1}(\widehat{\Gamma}^1_{\mathcal{B}}(E))[1] \to \widehat{\Gamma}^1_{\mathcal{B}}(E) \to \mathcal{H}^0(\widehat{\Gamma}^1_{\mathcal{B}} (E)) \to 0 $$ in $\mathcal{B}^{\scriptscriptstyle X}$, where $\mathcal{H}^{-1}(\widehat{\Gamma}^1_{\mathcal{B}} (E))$ is torsion free and $\mathcal{H}^0(\widehat{\Gamma}^1_{\mathcal{B}} (E))$ can have torsion supported on a 0-subscheme of finite length. Hence, for generic $x \in X$, $ \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X} (\mathcal{O}_x , \widehat{\Gamma}^1_{\mathcal{B}} (E)) = 0$. Therefore, for generic $x \in X$, we have $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y} (\mathcal{E}_{\{x\} \times Y} , \Gamma^0_{\mathcal{B}} \widehat{\Gamma}^1_{\mathcal{B}} (E)) =\mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}( \mathcal{E}_{\{x\} \times Y} , F ) =0$. So $\mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y} (\mathcal{E}_{\{x\} \times Y}, \Gamma^2_{\mathcal{B}} \widehat{\Gamma}^0_{\mathcal{B}}(E)) =0$. Hence, for generic $x \in X$ $$ \mathop{\rm Hom}\nolimits_{\scriptscriptstyle X} (\widehat{\Gamma}^0_{\mathcal{B}}(E) , \mathcal{O}_x[1]) = 0. $$ But $\mathcal{H}^{-1}(\widehat{\Gamma}^0_{\mathcal{B}}(E))$ is torsion free and so $\mathcal{H}^{-1}(\widehat{\Gamma}^0_{\mathcal{B}}(E)) = 0$ as required. \\ \noindent (ii) \ From (2)(i) we have $\widehat{\Gamma}^0_{\mathcal{B}}(E) \cong A$ for some non-trivial coherent sheaf $A \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((\frac{\lambda}{2} , + \infty])$. For any $x \in X$ we have \begin{align*} \mathop{\rm Ext}\nolimits^1_{\scriptscriptstyle X}(\mathcal{O}_x, A) & \cong \mathop{\rm Ext}\nolimits^1_{\scriptscriptstyle X} (\mathcal{O}_x, \widehat{\Gamma}^0_{\mathcal{B}}(E)) \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\Gamma(\mathcal{O}_x), \Gamma \widehat{\Gamma}^0_{\mathcal{B}}(E) [1]) \\ & \cong \mathop{\rm Hom}\nolimits_{\scriptscriptstyle Y}(\mathcal{E}_{\{x\} \times Y}, \Gamma^2_{\mathcal{B}} \widehat{\Gamma}^0_{\mathcal{B}}(E) [-1]) = 0. \end{align*} So $A \in \mathop{\rm Coh}\nolimits_{\ge 2}(X)$, and if $v^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} } (A) = (a_0, a_1, a_2, a_3)$ then we have $a_1 > 0$. Apply the Fourier-Mukai transform $\Gamma$ to $\widehat{\Gamma}^0_{\mathcal{B}}(E)$. Since $\widehat{\Gamma}^0_{\mathcal{B}}(E) \in V^{\Gamma}_{\mathcal{B}^{\scriptscriptstyle Y}}(2)$, $\Gamma^2_{\mathcal{B}} \widehat{\Gamma}^0_{\mathcal{B}}(E) \in \mathcal{B}^{\scriptscriptstyle Y}$ has $\mathop{\rm Coh}\nolimits(Y)$-cohomologies: \begin{itemize} \item $\Psi^1 (A)$ in position $-1$, and \item $\Psi^2 (A)$ in position $0$. \end{itemize} So we have $A \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(Y)}(1,2)$, $\Psi^1 (A) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y} }((-\infty, - \frac{1}{2 \lambda}])$, and by of Proposition~\ref{prop:slope-bound-FMT-T-2}, $\Psi^2 (A) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y} }((0, +\infty])$. Therefore, $v_1^{D_{\scriptscriptstyle Y}, \ell_{\scriptscriptstyle{Y}}} (\Psi^1 (A)) \le 0$, $v_1^{D_{\scriptscriptstyle Y}, \ell_{\scriptscriptstyle{Y}}} (\Psi^2 (A)) \ge 0$, and so $v_1^{D_{\scriptscriptstyle Y}, \ell_{\scriptscriptstyle{Y}}} (\Psi (A)) \ge 0$. Hence, by Theorem \ref{prop:antidiagonal-rep-cohom-FMT}, \begin{align*} a_2 & = v_2^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} } (A) \le 0. \end{align*} So $$ \operatorname{Im} Z_{\Omega}(\widehat{\Gamma}^0_{\mathcal{B}}(E)) = \operatorname{Im} Z_{\Omega}(A) = \frac{ \sqrt{3}\lambda}{4}(a_2 - \lambda a_1) < 0 $$ as required. \\ \noindent (3) \ Let $E \in \mathcal{T}^{\scriptscriptstyle X}_2$. \\ \noindent (i) \ Similar to the proof of (1)(i). \\ \noindent (ii) \ From (3)(i), we have $\Gamma^2_{\mathcal{B}}(E) \cong A[1]$ for some coherent sheaf $0 \ne A \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\infty, -\frac{1}{2\lambda}])$. Let $v^{D_{\scriptscriptstyle Y}, \ell_{\scriptscriptstyle{Y}}}(A) = (a_0, a_1, a_2, a_3)$. So $a_1 < 0$. Apply the Fourier-Mukai transform $\widehat{\Gamma}$ to $\Gamma^2_{\mathcal{B}}(E)$. Since $\Gamma^2_{\mathcal{B}}(E) \in V^{\widehat{\Gamma}}_{\mathcal{B}^{\scriptscriptstyle X}}(0)$, $\widehat{\Gamma}^0_{\mathcal{B}} \Gamma^2_{\mathcal{B}}(E) \in \mathcal{B}^{\scriptscriptstyle X}$ has $\mathop{\rm Coh}\nolimits(X)$-cohomologies: \begin{itemize} \item $\widehat{\Psi}^1 (A)$ in position $-1$, and \item $\widehat{\Psi}^2(A)$ in position $0$. \end{itemize} So we have $A \in V^{\widehat{\Psi}}_{\mathop{\rm Coh}\nolimits(X)}(1,2)$, $\widehat{\Psi}^2 (A) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}([\frac{\lambda}{2}, +\infty])$, and by Proposition~\ref{prop:slope-bound-FMT-F-1}--(i), $\widehat{\Psi}^1(A) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty, 0])$. Therefore, $v_1^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} }(\widehat{\Psi}^1(A))\le 0$ and $v_1^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} }(\widehat{\Psi}^2(A)) \ge 0$. So $v_1^{-D_{\scriptscriptstyle X}, \ell_{\scriptscriptstyle{X}} }(\widehat{\Psi}(A)) \ge 0$, and hence, from Theorem \ref{prop:antidiagonal-rep-cohom-FMT}, $a_2 \le 0$. Therefore, \begin{align*} \operatorname{Im} Z_{\Omega'}(\Gamma^2_{\mathcal{B}}(E)) = \operatorname{Im} Z_{\Omega'}(A[1]) = \frac{\sqrt{3}}{4 \lambda}\left(-a_2 - \frac{1}{\lambda}a_1\right) > 0 \end{align*} as required. \\ \noindent (4) \ Let $E \in \mathcal{F}^{\scriptscriptstyle X}_2 $. \\ \noindent (i) \ Similar to the proof of (2)(i). \\ \noindent (ii) \ From (4)(i) we have $\Gamma^0_{\mathcal{B}}(E) \cong A$ for some non-trivial coherent sheaf $A \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\frac{1}{2 \lambda},+\infty])$. Consider the convergence of the spectral sequence for $E$: $$ E^{p,q}_2=\Gamma^{p} \mathcal{H}^{q}(E) \Longrightarrow \Gamma^{p+q}(E). $$ By Proposition~\ref{prop:slope-bounds}--(i), we have $\mathcal{H}^{-1}(E) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{X}} , -D_{\scriptscriptstyle X}}((-\infty, 0])$, and so by Propositions \ref{prop:slope-bound-FMT-F-1}-(i) and \ref{prop:slope-bound-FMT-0-g}--(i), $$ \Psi^1(\mathcal{H}^{-1}(E)) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\infty,0]), \text{ and } \Psi^0 (\mathcal{H}^{0}(E)) \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\infty,0]). $$ Therefore, from the convergence of the above spectral sequence for $E$, we have $$ A \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\frac{1}{2 \lambda },+\infty]) \cap \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\infty,0]) =\operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}((-\frac{1}{2 \lambda},0]). $$ Also by Propositions \ref{prop:FMT-F-1-reflexivity} and \ref{prop:FMT-0-cohomology-reflexive}--(ii), $\Psi^1 (\mathcal{H}^{-1}(E))$ and $\Psi^0 (\mathcal{H}^{0}(E))$ are reflexive sheaves, and so $A$ is reflexive. Let $v^{D_{\scriptscriptstyle Y}, \ell_{\scriptscriptstyle{Y}}} (A)= (a_0, a_1, a_2, a_3)$. By the usual Bogomolov-Gieseker inequalities for all the Harder-Narasimhan semistable factors of $A$, we obtain $a_2 + \frac{1}{2\lambda } a_1 \le 0$. So we have $$ \operatorname{Im} Z_{\Omega'}(\Gamma^0_{\mathcal{B}}(E)) = \operatorname{Im} Z_{\Omega'}(\Gamma^0_{\mathcal{B}}(A)) = \frac{ \sqrt{3}}{4 \lambda }\left(a_2 + \frac{1}{\lambda} a_1 \right) \le 0. $$ Equality holds when $A \in \operatorname{HN}^{\mu}_{\ell_{\scriptscriptstyle{Y}}, D_{\scriptscriptstyle Y}}(0)$ with $v^{D_{\scriptscriptstyle Y}, \ell_{\scriptscriptstyle{Y}}}(A) = (a_0, 0, 0, *)$. By considering a Jordan{-}H\"{o}lder filtration for $A$ together with Theorem~\ref{prop4.15}, $A$ is filtered with quotients of sheaves $K_i$ each of them fits into the short exact sequence $$ 0 \to K_i \to \mathcal{E}_{\{x_i\} \times Y} \to \mathcal{O}_{Z_i} \to 0 $$ in $\mathop{\rm Coh}\nolimits(Y)$ for some 0-subschemes $Z_i \subset Y$. Here $\Gamma^0_{\mathcal{B}}(E) \cong A \in V^{\widehat{\Gamma}}_{\mathcal{B}^{\scriptscriptstyle X}}(2)$ implies $A \in V^{\Psi}_{\mathop{\rm Coh}\nolimits(X)}(2,3)$. An easy induction on the number of $K_i$ in $A$ shows that $A \in V^{\widehat{\Psi}}_{\mathop{\rm Coh}\nolimits(X)}(1,3)$ and so $A \in V^{\widehat{\Psi}}_{\mathop{\rm Coh}\nolimits(X)}(3)$. Therefore, $Z_i =\emptyset$ for all $i$ and so $\widehat{\Gamma}^2_{\mathcal{B}} \Gamma^0_{\mathcal{B}}(E) \in \mathop{\rm Coh}\nolimits_0(X)$. Now consider the convergence of the Spectral Sequence~\ref{Spec-Seq-B} for $E$. We have the short exact sequence $$ 0 \to \widehat{\Gamma}^0_{\mathcal{B}}\Gamma^1_{\mathcal{B}}(E) \to \widehat{\Gamma}^2_{\mathcal{B}}\Gamma^0_{\mathcal{B}}(E) \to G \to 0 $$ in $\mathcal{B}^{\scriptscriptstyle X}$, where $G$ is a subobject of $E$ and so $G \in \mathcal{F}^{\scriptscriptstyle X}_2$. Now $\widehat{\Gamma}^2_{\mathcal{B}} \Gamma^0_{\mathcal{B}}(E) \in \mathop{\rm Coh}\nolimits_0(X) \subset \mathcal{T}^{\scriptscriptstyle X}_2$ implies $G =0$ and so $\widehat{\Gamma}^0_{\mathcal{B}}\Gamma^1_{\mathcal{B}}(E) \cong \widehat{\Gamma}^2_{\mathcal{B}}\Gamma^0_{\mathcal{B}}(E)$. Then we have $\Gamma^0_{\mathcal{B}}(E) \cong \Gamma^0_{\mathcal{B}} \widehat{\Gamma}^0_{\mathcal{B}}\Gamma^1_{\mathcal{B}}(E) =0$. This is not possible as $\Gamma^0_{\mathcal{B}}(E) \ne 0$. Therefore, we have the strict inequality $\operatorname{Im} Z_{\Omega'}(\Gamma^0_{\mathcal{B}}(E)) <0$ as required. \end{proof} \begin{lem} \label{prop:B-cohomo-vanishing-FMT-0-2} We have the following: \begin{enumerate}[label=(\arabic*)] \item if $E \in \mathcal{T}^{\scriptscriptstyle Y}_2$ then $\widehat{\Gamma}^2_{\mathcal{B}}(E) =0$, \item if $E \in \mathcal{F}^{\scriptscriptstyle Y}_2 $ then $\widehat{\Gamma}^0_{\mathcal{B}}(E) =0$, \item if $E \in \mathcal{T}^{\scriptscriptstyle X}_2$ then $\Gamma^2_{\mathcal{B}}(E) =0$, and \item if $E \in \mathcal{F}^{\scriptscriptstyle X}_2$ then $\Gamma^0_{\mathcal{B}}(E) =0$. \end{enumerate} \end{lem} \begin{proof} First let us prove (1). Let $E \in \mathcal{T}^{\scriptscriptstyle Y}_2$. From the convergence of the Spectral Sequence~\ref{Spec-Seq-B} for $E$, we have the short exact sequence $$ 0 \to Q \to \Gamma^0_{\mathcal{B}} \widehat{\Gamma}^2_{\mathcal{B}}(E) \to \Gamma^2_{\mathcal{B}} \widehat{\Gamma}^1_{\mathcal{B}}(E) \to 0 $$ in $\mathcal{B}^{\scriptscriptstyle Y}$. Here $Q$ is a quotient of $E$ and so $Q \in \mathcal{T}^{\scriptscriptstyle Y}_2$. From the Harder-Narasimhan filtration property $\Gamma^0_{\mathcal{B}} \widehat{\Gamma}^2_{\mathcal{B}}(E)$ fits into the short exact sequence $$ 0 \to T \to \Gamma^0_{\mathcal{B}} \widehat{\Gamma}^2_{\mathcal{B}}(E) \to F \to 0 $$ in $\mathcal{B}^{\scriptscriptstyle Y}$ for some $T \in \mathcal{T}^{\scriptscriptstyle Y}_2$ and $F \in \mathcal{F}^{\scriptscriptstyle Y}_2$. Now apply the Fourier-Mukai transform $\widehat{\Gamma}$ and consider the long exact sequence of $\mathcal{B}^{\scriptscriptstyle X}$-cohomologies. Then we have $\widehat{\Gamma}^0_{\mathcal{B}}(T) =0$, $\widehat{\Gamma}^1_{\mathcal{B}}(T) \cong \widehat{\Gamma}^0_{\mathcal{B}}(F)$. By Proposition \ref{prop:FMT-B-cat-bounds-Imginary-Z}--(2)(ii), $\operatorname{Im} Z_{\Omega}(\widehat{\Gamma}^0_{\mathcal{B}}(F)) \le 0$ and by Proposition \ref{prop:FMT-B-cat-bounds-Imginary-Z}--(1)(ii), $\operatorname{Im} Z_{\Omega}(\widehat{\Gamma}^2_{\mathcal{B}}(T)) \ge 0$. So $\operatorname{Im} Z_{\Omega}(\widehat{\Gamma}(T)) \ge 0$, and by Proposition~\ref{prop:imgainary-part-central-charge}--(2), $\operatorname{Im} Z_{\Omega'}(T) \le 0$. Since $T \in \mathcal{T}^{\scriptscriptstyle Y}_2$, we have $\operatorname{Im} Z_{\Omega'}(T) = 0$ and $v_1^{D_{\scriptscriptstyle Y} - \frac{1}{2 \lambda}\ell_{\scriptscriptstyle{Y}}, \ell_{\scriptscriptstyle{Y}}}(T)=0$. From Lemma~\ref{prop:first-tilt-behaves-like-sheaves-surfaces}, $T \cong T_0$ for some $T_0 \in \mathop{\rm Coh}\nolimits_0(Y)$. But $\mathop{\rm Coh}\nolimits_0(Y) \subset V^{\widehat{\Gamma}}_{\mathcal{B}^{\scriptscriptstyle X}}(0)$. Hence, $T =0$ and so $Q =0$. Then $\Gamma^0_{\mathcal{B}} \widehat{\Gamma}^2_{\mathcal{B}}(E) \cong \Gamma^2_{\mathcal{B}} \widehat{\Gamma}^1_{\mathcal{B}}(E) $ and so we have $\widehat{\Gamma}^2_{\mathcal{B}}(E) \cong \widehat{\Gamma}^2_{\mathcal{B}} \Gamma^2_{\mathcal{B}} \widehat{\Gamma}^1_{\mathcal{B}}(E) = 0$ as required. \medskip \noindent Proofs of (2),(3) and (4) are similar to that of (1). \end{proof} \begin{prop} \label{prop:tilt-bounds-FMT-B-cat-0-2} We have the following: \begin{enumerate}[label=(\arabic*)] \item if $E \in \mathcal{B}^{\scriptscriptstyle Y}$ then (i) $\widehat{\Gamma}^2_{\mathcal{B}}(E) \in \mathcal{T}^{\scriptscriptstyle X}_2 $, and (ii) $\widehat{\Gamma}^0_{\mathcal{B}}(E) \in \mathcal{F}^{\scriptscriptstyle X}_2$; \item if $E \in \mathcal{B}^{\scriptscriptstyle X}$ then (i) $\Gamma^2_{\mathcal{B}}(E) \in \mathcal{T}^{\scriptscriptstyle Y}_2$, and (ii) $\Gamma^0_{\mathcal{B}}(E) \in \mathcal{F}^{\scriptscriptstyle Y}_2$. \end{enumerate} \end{prop} \begin{proof} (1) \ Let $E \in \mathcal{B}^{\scriptscriptstyle Y}$. By the definition of torsion theory $\widehat{\Gamma}^2_{\mathcal{B}}(E)$ fits into the short exact sequence $$ 0 \to T \to \widehat{\Gamma}^2_{\mathcal{B}}(E) \to F \to 0 $$ in $\mathcal{B}^{\scriptscriptstyle X}$ for some $T \in \mathcal{T}^{\scriptscriptstyle X}_2$ and $F \in \mathcal{F}^{\scriptscriptstyle X}_2$. Now apply the Fourier-Mukai transform $\Gamma$ and consider the long exact sequence of $\mathcal{B}^{\scriptscriptstyle Y}$-cohomologies. By Lemma~\ref{prop:B-cohomo-vanishing-FMT-0-2}, $\Gamma^{i}_{\mathcal{B}}(F)=0$ for all $i$, and so $F=0$ as required. Similarly one can prove $\widehat{\Gamma}^0_{\mathcal{B}}(E) \in \mathcal{F}^{\scriptscriptstyle X}_2$. \\ \noindent (2) \ Similar to the proofs in (1). \end{proof} \begin{prop} \label{prop:tilt-bounds-FMT-B-1} We have the following: \begin{enumerate}[label=(\arabic*)] \item if $E \in \mathcal{F}^{\scriptscriptstyle Y}_2 $ then $\widehat{\Gamma}^1_{\mathcal{B}}(E) \in \mathcal{F}^{\scriptscriptstyle X}_2$, \item if $E \in \mathcal{T}^{\scriptscriptstyle Y}_2$ then $\widehat{\Gamma}^1_{\mathcal{B}}(E) \in \mathcal{T}^{\scriptscriptstyle X}_2$, \item if $E \in \mathcal{F}^{\scriptscriptstyle X}_2$ then $\Gamma^1_{\mathcal{B}}(E) \in \mathcal{F}^{\scriptscriptstyle Y}_2$, and \item if $E \in \mathcal{T}^{\scriptscriptstyle X}_2$ then $\Gamma^1_{\mathcal{B}}(E) \in \mathcal{T}^{\scriptscriptstyle Y}_2$. \end{enumerate} \end{prop} \begin{proof} Let us prove (1). Let $E \in \mathcal{F}^{\scriptscriptstyle Y}_2$. By the definition of torsion theory $\widehat{\Gamma}^1_{\mathcal{B}}(E)$ fits into the short exact sequence $$ 0 \to T \to \widehat{\Gamma}^1_{\mathcal{B}}(E) \to F \to 0 $$ in $\mathcal{B}^{\scriptscriptstyle X}$ for some $T \in \mathcal{T}^{\scriptscriptstyle X}_2$ and $F \in \mathcal{F}^{\scriptscriptstyle X}_2$. Now we need to show $T=0$. Apply the Fourier-Mukai transform $\Gamma$ and consider the long exact sequence of $\mathcal{B}^{\scriptscriptstyle Y}$-cohomologies. We get $\Gamma^1_{\mathcal{B}}(T) \hookrightarrow \Gamma^1_{\mathcal{B}} \widehat{\Gamma}^1_{\mathcal{B}}(E)$ and $T \in V^{\Gamma}_{\mathcal{B}^{\scriptscriptstyle Y}}(1)$. Also by the convergence of the Spectral Sequence~\ref{Spec-Seq-B} for $E \in \mathcal{F}^{\scriptscriptstyle Y}_2$, $\Gamma^1_{\mathcal{B}} \widehat{\Gamma}^1_{\mathcal{B}}(E)$ is a subobject of $E$. Hence, $\Gamma^1_{\mathcal{B}}(T) \in \mathcal{F}^{\scriptscriptstyle Y}_2$ implies $\operatorname{Im} Z_{\Omega'}(\Gamma^1_{\mathcal{B}}(T)) \le 0$. On the other hand, by Proposition \ref{prop:imgainary-part-central-charge}, $ \operatorname{Im} Z_{\Omega'}(\Gamma^1_{\mathcal{B}}(T)) = \frac{3!}{r \lambda^3 \ell_{\scriptscriptstyle{X}} ^3}\operatorname{Im} Z_{\Omega}(T) \ge 0$ as $T \in \mathcal{T}^{\scriptscriptstyle X}_2$. Hence, $\operatorname{Im} Z_{\Omega}(T) = 0$ and $T \in \mathcal{T}^{\scriptscriptstyle X}_2$ implies $v_1^{-D_{\scriptscriptstyle X}+ \frac{\lambda}{2}\ell_{\scriptscriptstyle{X}} , \ell_{\scriptscriptstyle{X}} } (T) =0$. So by Proposition \ref{prop:first-tilt-behaves-like-sheaves-surfaces}, $T \cong T_0$ for some $T_0 \in \mathop{\rm Coh}\nolimits_0(X)$. Since any object from $\mathop{\rm Coh}\nolimits_0(X)$ belongs to $V^{\Gamma}_{\mathcal{B}^{\scriptscriptstyle Y}}(0)$, $\Gamma^1_{\mathcal{B}}(T)= 0$. So $T=0$ as required. \medskip \noindent Proofs of (2), (3) and (4) are similar to that of (1). \end{proof} From Lemma \ref{prop:B-cohomo-vanishing-FMT-0-2}, Propositions \ref{prop:tilt-bounds-FMT-B-cat-0-2} and \ref{prop:tilt-bounds-FMT-B-1} we have \begin{equation*} \left.\begin{aligned} & \Gamma (\mathcal{T}^{\scriptscriptstyle X}_2) \subset \langle \mathcal{F}^{\scriptscriptstyle Y}_2 , \mathcal{T}^{\scriptscriptstyle Y}_2[-1] \rangle = \mathcal{A}^{\scriptscriptstyle Y}[-1]\\ & \Gamma(\mathcal{F}^{\scriptscriptstyle X}_2[1]) \subset \langle \mathcal{F}^{\scriptscriptstyle Y}_2 , \mathcal{T}^{\scriptscriptstyle Y}_2[-1] \rangle = \mathcal{A}^{\scriptscriptstyle Y}[-1] \end{aligned} \ \right\}, \end{equation*} and \begin{equation*} \left.\begin{aligned} & \widehat{\Gamma} (\mathcal{T}^{\scriptscriptstyle Y}_2) \subset \langle \mathcal{F}^{\scriptscriptstyle X}_2 , \mathcal{T}^{\scriptscriptstyle X}_2[-1] \rangle = \mathcal{A}^{\scriptscriptstyle X}[-1]\\ & \widehat{\Gamma}(\mathcal{F}^{\scriptscriptstyle Y}_2[1]) \subset \langle \mathcal{F}^{\scriptscriptstyle X}_2 , \mathcal{T}^{\scriptscriptstyle X}_2[-1] \rangle = \mathcal{A}^{\scriptscriptstyle X}[-1] \end{aligned} \ \right\}. \end{equation*} Since $\mathcal{A}^{\scriptscriptstyle X} = \langle \mathcal{F}^{\scriptscriptstyle X}[1] , \mathcal{T}^{\scriptscriptstyle X} \rangle$ and $\mathcal{A}^{\scriptscriptstyle Y} = \langle \mathcal{F}^{\scriptscriptstyle Y}[1] , \mathcal{T}^{\scriptscriptstyle Y} \rangle$, we have $\Gamma [1] (\mathcal{A}^{\scriptscriptstyle X} ) \subset \mathcal{A}^{\scriptscriptstyle Y}$ and $\widehat{\Gamma} [1] (\mathcal{A}^{\scriptscriptstyle Y} ) \subset \mathcal{A}^{\scriptscriptstyle X}$. Since we have the isomorphisms $\widehat{\Gamma}[1] \circ \Gamma[1] \cong \textrm{id}_{D^b(X)}$ and $\Gamma[1] \circ \widehat{\Gamma}[1] \cong \textrm{id}_{D^b(Y)}$, we deduce the following. \begin{thm} \label{prop:equivalence-stab-hearts-threefolds} The Fourier-Mukai transforms $\Gamma, \widehat{\Gamma}$ give the equivalences of the double tilted hearts: $$ \Gamma[1]\left(\mathcal{A}^{\scriptscriptstyle X} \right) \cong \mathcal{A}^{\scriptscriptstyle Y}, \ \text{ and } \ \widehat{\Gamma}[1]\left(\mathcal{A}^{\scriptscriptstyle Y}\right) \cong \mathcal{A}^{\scriptscriptstyle X}. $$ \end{thm} \providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR } \providecommand{\MRhref}[2]{% \href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \providecommand{\href}[2]{#2}
1,108,101,565,911
arxiv
\section{Introduction} \input{sec-intro.tex} \section{Related Work} \input{sec-related.tex} \section{Models} \input{sec-models.tex} \section{Experiments} \input{sec-exps.tex} \section{Conclusions and Further Work} \input{sec-conc.tex} \section*{Acknowledgments} This research was supported in part by DFG grant RI-2221/2-1 ``Grounding Statistical Machine Translation in Perception and Action'', and by an Amazon Academic Research Award (AARA) ``Multimodal Pivots for Low Resource Machine Translation in E-Commerce Localization''. \subsection{Bilingual Image-Caption Data} We constructed a German-English parallel dataset based on the MS COCO image corpus \cite{mscoco}. 1,000 images were selected at random from the 2014 training section\footnote{We constructed our parallel dataset using only the training rather than the validation section of MS COCO so as to keep the latter pristine for future work based on this research.} and, in a second step, one of their five English captions was chosen randomly. This caption was then translated into German by a native German speaker. Note that our experiments were performed with German as the source and English as the target language, therefore, our reference data was not produced by a single speaker but reflects the heterogeneity of the MS COCO dataset at large. The data was split into a development set of 250 captions, a development test set of 250 captions for testing work in progress, and a test set of 500 captions. For our retrieval experiments, we used only the images and captions that were not included in the development, development test or test data, a total of 81,822 images with 5 English captions per image. All data was tokenized and converted to lower case using the {\tt cdec}\footnote{\url{https://github.com/redpony/cdec}} utilities {\tt tokenized-anything.pl} and {\tt lowercase.pl}. For the German data, we performed compound-splitting using the method described by \newcite{Dyer2009}, as implemented by the {\tt cdec} utility {\tt compound-split.pl}. Table \ref{table:img_data} gives an overview of the dataset. Our parallel development, development test and test data is publicly available.\footnote{\url{www.cl.uni-heidelberg.de/decoco/}} \begin{table}[t] \begin{center} \resizebox{\columnwidth}{!}{ \begin{tabular}{lccc} \toprule Section & Images & Captions & Languages \\ \midrule DEV & 250 & 250 & DE-EN \\ DEVTEST & 250 & 250 & DE-EN \\ TEST & 500 & 500 & DE-EN \\ RETRIEVAL ($C$) & 81,822 & 409,110 & EN \\ \bottomrule \end{tabular} } \end{center} \caption{\label{table:img_data} Number of images and sentences in MS COCO image and caption data used in experiments.} \end{table} \subsection{Translation Baselines} \label{section:translation_system} We compare our approach to two baseline machine translation systems, one trained on out-of-domain data exclusively and one domain-adapted system. Table \ref{table:mt_data} gives an overview of the training data for the machine translation systems. \paragraph*{Out-of-Domain Baseline.} Our baseline SMT framework is hierarchical phrase-based translation using synchronous context free grammars \cite{Chiang2007}, as implemented by the {\tt cdec} decoder \cite{Dyer2010}. Data from the Europarl \cite{Koehn2005}, News Commentary and Common Crawl corpora \cite{Smith2013} as provided for the WMT15 workshop was used to train the translation model, with German as source and English as target language. Like the retrieval dataset, training, development and test data was tokenized and converted to lower case, using the same {\tt cdec} tools. Sentences with lengths over 80 words in either the source or the target language were discarded before training. Source text compound splitting was performed using {\tt compound-split.pl}. Alignments were extracted bidirectionally using the {\tt fast-align} utility of {\tt cdec} and symmetrized with the {\tt atools} utility (also part of {\tt cdec}) using the {\tt grow-diag-final-and} symmetrization heuristic. The alignments were then used by the {\tt cdec} grammar extractor to extract a synchronous context free grammar from the parallel data. The target language model was trained on monolingual data from Europarl, as well as the News Crawl and News Discussions English datasets provided for the WMT15 workshop (the same data as was used for estimating term frequencies for the retrieval models) with the {\tt KenLM} toolkit \cite{Heafield-estimate,Heafield-kenlm}.\footnote{\url{https://kheafield.com/code/kenlm/}} We optimized the parameters of the translation system for translation quality as measured by IBM BLEU \cite{Papineni2002} using the Margin Infused Relaxed Algorithm (MIRA) \cite{Crammer2003}. For tuning the translation models used for extraction of the hypothesis lists for final evaluation, MIRA was run for 20 iterations on the development set, and the best run was chosen for final testing. \paragraph*{In-Domain Baseline.} We also compared our models to a domain-adapted machine translation system. The domain-adapted system was identical to the out-of-domain system, except that it was supplied with additional parallel training data from the image caption domain. For this purpose, we used 29,000 parallel German-English image captions as provided for the WMT16 shared task on multimodal machine translation. The English captions in this dataset belong to the Flickr30k corpus \cite{Flickr30k} and are very similar to those of the MS COCO corpus. The German captions are expert translations. The English captions were also used as additional training data for the target-side language model. We generated $k_n$- and $k_r$-best lists of translation candidates using this in-domain baseline system. \begin{table}[t] \begin{center} \resizebox{\columnwidth}{!}{ \begin{tabular}{lccc} \toprule Corpus & Sentences & Languages & System\\ \midrule Europarl & 1,920,209 & DE-EN & O/I \\ News Commentary & 216,190 & DE-EN & O/I \\ Common Crawl & 2,399,123 & DE-EN & O/I \\ Flickr30k WMT16 & 29,000 & DE-EN & I \\ \midrule Europarl & 2,218,201 & EN & O/I \\ News Crawl & 28,127,448 & EN & O/I\\ News Discussions & 57,803,684 & EN & O/I\\ Flickr30k WMT16 & 29,000 & EN & I \\ \bottomrule \end{tabular} } \end{center} \caption{\label{table:mt_data} Parallel and monolingual data used for training machine translation systems. Sentence counts are given for raw data without pre-processing. O/I: both out-of-domain and in-domain system, I: in-domain system only.} \end{table} \subsection{Optimization of TSR Hyperparameters} \label{section:retrieval_tuning} For each of our retrieval models, we performed a step-wise exhaustive search of the hyperparameter space over the four system hyperparameters for IBM BLEU on the development set: The length of the $k_n$-best list the entries of which are used as queries for retrieval; the number of $k_m$-best-matching captions retrieved; the length of the final $k_r$-best list used in reranking; the interpolation weight $\lambda$ of the relevance score $F$ relative to the translation hypothesis log probability returned by the decoder. The parameter ranges to be explored were determined manually, by examining system output for prototypical examples. Table \ref{table:tuning} gives an overview over the hyperparameter values obtained. For TSR-CNN, we initially set the cutoff distance $d$ to 90.0, after manually inspecting sets of nearest neighbors returned for various maximum distance values. After optimization of retrieval parameters, we performed an exhaustive search from $d=80.0$ to $d=100.0$, with step size 1.0 on the development set, while keeping all other hyperparameters fixed, which confirmed out initial choice of $d=90.0$ as the optimal value. Explored parameter spaces were identical for all models and each model was evaluated on the test set using its own optimal configuration of hyperparameters. \begin{table}[t] \begin{center} \begin{tabular}{lcccc} \toprule Model & $k_n$ & $k_m$ & $k_r$ &$\lambda$ \\ \midrule TSR-TXT & $300$ & $500$ & $5$ & $5 \cdot 10^{4}$ \\ TSR-CNN & $300$ & $300$ & $5$ & $70 \cdot 10^{4}$ \\ TSR-HCA & $300$ & $500$ & $5$ & $10 \cdot 10^{4}$ \\ \bottomrule \end{tabular} \end{center} \caption{\label{table:tuning} Optimized hyperparameter values used in final evaluation.} \end{table} \subsection{Significance Testing} \label{section:sigtesting} Significance tests on the differences in translation quality were performed using the approximate randomization technique for measuring performance differences of machine translation systems described in \newcite{Riezler2005} and implemented by \newcite{Clark2011} as part of the {\tt Multeval} toolkit.\footnote{\url{https://github.com/jhclark/multeval}} \subsection{Experimental Results} Table \ref{tab:scores} summarizes the results for all models on an unseen test set of 500 captions. Domain adaptation led to a considerable improvement of +4.1 BLEU and large improvements in terms of METEOR and Translation Edit Rate (TER). We found that the target-side retrieval model enhanced with multimodal pivots from a deep convolutional neural network, TSR-CNN and TSR-HCA, consistently outperformed both the domain-adapted {\tt cdec} baseline, as well as the text-based target side retrieval model TSR-TXT. These models therefore achieve a performance gain which goes beyond the effect of generic domain-adaptation. The gain in performance for TSR-CNN and TSR-HCA was significant at $p<0.05$ for BLEU, METEOR, and TER. For all evaluation metrics, the difference between TSR-CNN and TSR-HCA was not significant, demonstrating that retrieval using our CNN-derived distance metric could match retrieval based the human object category annotations. \begin{table}[t] \begin{center} \resizebox{\columnwidth}{!}{ \begin{tabular}{llllll} \toprule \bf System & BLEU $\uparrow$ & \bf $p_{c}$ & \bf $p_t$ & \bf $p_d$ & \bf $p_o$ \\ \midrule {\tt cdec} out-dom. & 25.5 & & & & \\ {\tt cdec} in-dom. & 29.6 & & & & 0.00 \\ TSR-TXT & 29.7 & & & 0.45 & 0.00 \\ TSR-CNN & \textbf{30.6} & & 0.04 & 0.02 & 0.00 \\ TSR-HCA & \textbf{30.3} & 0.42 & 0.01 & 0.00 & 0.00 \\ \midrule \bf System & METEOR $\uparrow$ & \bf $p_{c}$ & \bf $p_t$ & \bf $p_d$ & \bf $p_o$ \\ \midrule {\tt cdec} out-dom. & 31.7 & & & & \\ {\tt cdec} in-dom. & 34.0 & & & & 0.00 \\ TSR-TXT & 34.1 & & & 0.41 & 0.00 \\ TSR-CNN & \textbf{34.7} & & 0.00 & 0.00 & 0.00 \\ TSR-HCA & \textbf{34.4} & 0.09 & 0.00 & 0.00 & 0.00 \\ \midrule \bf System & TER $\downarrow$ & \bf $p_{c}$ & \bf $p_t$ & \bf $p_d$ & \bf $p_o$ \\ \midrule {\tt cdec} out-dom. & 49.3 & & & & \\ {\tt cdec} in-dom. & 46.1 & & & & 0.00 \\ TSR-TXT & 45.8 & & & 0.12 & 0.00 \\ TSR-CNN & \textbf{45.1} & & 0.03 & 0.00 & 0.00 \\ TSR-HCA & \textbf{45.3} & 0.34 & 0.02 & 0.00 & 0.00 \\ \bottomrule \end{tabular} } \end{center} \caption{\label{tab:scores} Metric scores for all systems and their significance levels as reported by {\tt Multeval}. $p_o$-values are relative to the {\tt cdec} out-of-domain baseline, $p_d$-values are relative to the {\tt cdec} in-domain baseline, $p_t$-values are relative to TSR-TXT and $p_c$-values are relative to TSR-CNN. Best results are reported in \textbf{bold} face.\footnotemark } \end{table} \footnotetext{A baseline for which a random hypothesis was chosen from the top-5 candidates of the in-domain system lies between the other two baseline systems: 27.5\,/\,33.3\,/\,47.7 (BLEU\,/\,METEOR\,/\,TER). } The text-based retrieval baseline TSR-TXT never significantly outperformed the in-domain {\tt cdec} baseline, but there were slight nominal improvements in terms of BLEU, METEOR and TER. This finding is actually consistent with \newcite{Waeschle2015} who report performance gains for text-based, target side retrieval models only on highly technical, narrow-domain corpora and even report performance degradation on medium-diversity corpora such as Europarl. Our experiments show that it is the addition of visual similarity information by incorporation of multimodal pivots into the image-enhanced models TSR-CNN and TSR-HCA which makes such techniques effective on MS COCO, thus upholding our hypothesis that visual information can be exploited for improvement of caption translation. \begin{figure}[t] \centering \includegraphics[scale=.6]{blindtest.pdf} \caption{\label{figure:blindtest} Results of the human pairwise preference ranking experiment, given as the joint distribution of both rankings: $a+$ denotes preference for TSR-CNN in terms of accuracy, $f+$ in terms of fluency; $a-$ denotes preference for the in-domain baseline in terms of accuracy, $f-$ in terms of fluency. } \end{figure} \subsection{Human Evaluation} The in-domain baseline and TSR-CNN differed in their output in 169 out of 500 cases on the test set. These 169 cases were presented to a human judge alongside the German source captions in a double-blinded pairwise preference ranking experiment. The order of presentation was randomized for the two systems. The judge was asked to rank fluency and accuracy of the translations independently. The results are given in Figure \ref{figure:blindtest}. Overall, there was a clear preference for the output of TSR-CNN. \subsection{Examples} Table \ref{tab:examples} shows example translations produced by both {\tt cdec} baselines, TSR-TXT, TSR-CNN, and TSR-HCA, together with source caption, image, and reference translation. The visual information induced by target side captions of pivot images allows a disambiguation of translation alternatives such as ``skirt'' versus ``rock (music)'' for the German ``Rock'', ``pole'' versus ``mast'' for the German ``Masten'', and is able to repair mistranslations such as ``foot'' instead of ``mouth'' for the German ``Maul''. \begin{table} \begin{center} \begin{footnotesize} \begin{tabular}{|p{2cm} |p{4.8cm}|} \hline Image: & \vspace{1pt} \includegraphics[scale=0.18]{example3.jpg}\vspace{3pt} \\ \hline Source:& Eine Person in einem Anzug und Krawatte und einem Rock.\\ \hline {\tt cdec} out-dom: & a person in a suit and tie and a rock . \\ \hline {\tt cdec} in-dom: & a person in a suit and tie and a rock .\\ \hline TSR-TXT:& a person in a suit and tie and a rock . \\ \hline TSR-CNN: & a person in a suit and tie and a skirt .\\ \hline TSR-HCA:& a person in a suit and tie and a rock . \\ \hline Reference:& a person wearing a suit and tie and a skirt\\ \hline \hline Image: &\vspace{1pt} \includegraphics[scale=0.17]{example4.jpg}\vspace{3pt}\\ \hline Source:& Ein Masten mit zwei Ampeln f\"ur Autofahrer. \\ \hline {\tt cdec} out-dom: & a mast with two lights for drivers . \\ \hline {\tt cdec} in-dom: & a mast with two lights for drivers . \\ \hline TSR-TXT:& a mast with two lights for drivers . \\ \hline TSR-CNN: & a pole with two lights for drivers . \\ \hline TSR-HCA:& a pole with two lights for drivers . \\ \hline Reference:& a pole has two street lights on it for drivers . \\ \hline \hline Image: &\vspace{1pt} \includegraphics[scale=0.18]{example1.jpg}\vspace{3pt}\\ \hline Source:& Ein Hund auf einer Wiese mit einem Frisbee im Maul.\\ \hline {\tt cdec} out-dom: & a dog on a lawn with a frisbee in the foot .\\ \hline {\tt cdec} in-dom: & a dog with a frisbee in a grassy field . \\ \hline TSR-TXT:& a dog with a frisbee in a grassy field . \\ \hline TSR-CNN: & a dog in a grassy field with a frisbee in its mouth . \\ \hline TSR-HCA:& a dog with a frisbee in a grassy field . \\ \hline Reference:& a dog in a field with a frisbee in its mouth \\ \hline \end{tabular} \end{footnotesize} \end{center} \caption{\label{tab:examples} Examples for improved caption translation by multimodal feedback.} \end{table} \subsection{Overview} \begin{figure}[t] \centering \includegraphics[scale=0.23]{pipeline_multimodal_noborder.pdf} \caption{\label{figure:pipeline} Overview of model architecture.} \end{figure} Following the basic approach set out by \newcite{Waeschle2015}, we use a crosslingual retrieval model to find sentences in a target language document collection $C$, and use these to rerank target language translations $e$ of a source caption $f$. The systems described in our work differ from that of \newcite{Waeschle2015} in a number of aspects. Instead of a two-step architecture of coarse-grained and fine-grained retrieval, our system uses relevance scoring functions for retrieval of matches in the document collection $C$, and for reranking of translation candidates that are based on inverse document frequency of terms \cite{SpaerckJones1972} and represent variants of the popular TF-IDF relevance measure. A schematic overview of our approach is given in Figure \ref{figure:pipeline}. It consists of the following components: \begin{description} \item[Input:] Source caption $f_i$, image $i$, target-side collection $C$ of image-captions pairs \item[Translation:] Generate unique list $N_{f_i}$ of $k_n$-best translations, generate unique list $R_{f_i}$ of $k_r$-best list of translations\footnote{In practice, the first hypothesis list may be reused. We distinguish between the two hypothesis lists $N_{f_i}$ and $R_{f_i}$ for notational clarity since in general, the two hypothesis lists need not be of equal length.} using MT decoder \item[Multimodal retrieval:] For list of translations $N_{f_i}$, find set $M_{f_i}$ of $k_m$-most relevant pairs of images and captions in a target-side collection $C$, using a heuristic relevance scoring function $S(m, N_{f_i},i), m \in C$ \item[Crosslingual reranking:] Use list $M_{f_i}$ of image-caption pairs to rerank list of translations $R_{f_i}$, applying relevance scoring function $F(r, M_{f_i})$ to all $r \in R_{f_i}$ \item[Output:] Determine best translation hypothesis $\hat{e}_i$ by interpolating decoder score $d_r$ for a hypothesis $r \in R_{f_i}$ with its relevance score $F(r, M_{f_i})$ with weight $\lambda$ s.t. $$ \hat{e}_i = \operatornamewithlimits{argmax}_{r \in R_{f_i}} d_r + \lambda \cdot F(r, M_{f_i})$$ \end{description} The central concept is the scoring function $S(m, N_{f_i},i)$ which defines three variants of target-side retrieval (TSR), all of which make use of the procedure outlined above. In the baseline text-based reranking model (TSR-TXT), we use relevance scoring function $S_{TXT}$. This function is purely text-based and does not make use of multimodal context information (as such, it comes closest to the models used for target-side retrieval in \newcite{Waeschle2015}). In the retrieval model enhanced by visual information from a deep convolutional neural network (TSR-CNN), the scoring function $S_{CNN}$ incorporates a textual relevance score with visual similarity information extracted from the neural network. Finally, we evaluate these models against a relevance score based on human object-category annotations (TSR-HCA), using the scoring function $S_{HCA}$. This function makes use of the object annotations available for the MS COCO corpus \cite{mscoco} to give an indication of the effectiveness of our automatically extracted visual similarity metric. The three models are discussed in detail below. \subsection{Target Side Retrieval Models} \paragraph*{Text-Based Target Side Retrieval.} \label{section:text_retrieval} In the TSR-TXT retrieval scenario, a match candidate $m \in C$ is scored in the following way: \begin{align*} &S_{TXT}(m, N_{f_i}) = \\ & Z_m\sum_{n \in N_{f_i}}\sum_{w_n \in tok(n)}\sum_{w_m \in typ(m)} \delta(w_m,w_n) idf(w_m), \\ \end{align*} where $\delta$ is the Kronecker $\delta$-function, $N_{f_i}$ is the set of the $k_n$-best translation hypotheses for a source caption $f_i$ of image $i$ by decoder score, $typ(a)$ is a function yielding the set of types (unique tokens) contained in a caption $a$,\footnote{The choice for per-type scoring of reference captions was primarily driven by performance considerations. Since captions rarely contain repetitions of low-frequency terms, this has very little effect in practice, other than to mitigate the influence of stopwords.} $tok(a)$ is a function yielding the tokens of caption $a$, $idf(w)$ is the inverse document frequency \cite{SpaerckJones1972} of term $w$, and $Z_m = \frac{1}{|typ(m)|}$ is a normalization term introduced in order to avoid biasing the system towards long match candidates containing many low-frequency terms. Term frequencies were computed on monolingual data from Europarl \cite{Koehn2005} and the News Commentary and News Discussions English datasets provided for the WMT15 workshop.\footnote{\url{http://www.statmt.org/wmt15/translation-task.html}} Note that in this model, information from the image $i$ is not used. \paragraph*{Multimodal Target Side Retrieval using CNNs.} In the TSR-CNN scenario, we supplement the textual target-side TSR model with visual similarity information from a deep convolutional neural network. We formalize this by introduction of the positive-semidefinite distance function $v(i_x, i_y) \rightarrow [0,\infty)$ for images $i_x$, $i_y$ (smaller values indicating more similar images). The relevance scoring function $S_{CNN}$ used in this model takes the following form: \begin{align*} &S_{CNN}(m, N_{f_i}, i) \\ &= \begin{cases} S_{TXT}(m, N_{f_i}) e^{ - bv(i_m,i)}, & v(i_m,i) < d \\ 0 & otherwise,\\ \end{cases} \end{align*} where $i_m$ is the image to which the caption $m$ refers and $d$ is a cutoff maximum distance, above which match candidates are considered irrelevant, and $b$ is a weight term which controls the impact of the visual distance score $v(i_m, i)$ on the overall score.\footnote{The value of $b =0.01$ was found on development data and kept constant throughout the experiments.} Our visual distance measure $v$ was computed using the VGG16 deep convolutional model of \newcite{SimonyanZisserman:15}, which was pre-trained on ImageNet \cite{DBLP:journals/corr/RussakovskyDSKSMHKKBBF14}. We extracted feature values for all input and reference images from the penultimate fully-connected layer ({\tt fc7}) of the model and computed the Euclidean distance between feature vectors of images. If no neighboring images fell within distance $d$, the text-based retrieval procedure $S_{TXT}$ was used as a fallback strategy, which occurred 47 out of 500 times on our test data. \paragraph*{Target Side Retrieval by Human Category Annotations.} \label{section:image_retrieval_human} For contrastive purposes, we evaluated a TSR-HCA retrieval model which makes use of the human object category annotations for MS COCO. Each image in the MS COCO corpus is annotated with object polygons classified into 91 categories of common objects. In this scenario, a match candidate $m$ is scored in the following way: \begin{align*} &S_{HCA}(m, N_{f_i}, i) \\ &= \delta(cat(i_m),cat(i)) S_{TXT}(m, N_{f_i}), \end{align*} where $cat(i)$ returns the set of object categories with which image $i$ is annotated. The amounts to enforcing a strict match between the category annotations of $i$ and the reference image $i_m$, thus pre-filtering the $S_{TXT}$ scoring to captions for images with strict category match.\footnote{Attempts to relax this strict matching criterion led to strong performance degradation on the development test set.} In cases where $i$ was annotated with a unique set of object categories and thus no match candidates with nonzero scores were returned by $S_{HCA}$, $S_{TXT}$ was used as a fallback strategy, which occurred 77 out of 500 times on our test data. \subsection{Translation Candidate Re-scoring} \label{section:rescoring} The relevance score $F(r, M_{f_i})$ used in the reranking model was computed in the following way for all three models: \begin{align*} &F(r, M_{f_i}) =\\ & Z_{M_{f_i}} \sum_{m \in M_{f_i}}\sum_{w_m \in typ(m)}\sum_{w_r \in tok(r)}\delta(w_m, w_r)idf(w_m) \end{align*} with normalization term \[ Z_{M_{f_i}} = (\sum_{m \in M_{f_i}}|tok(m)|)^{-1}, \] where $r$ is a translation candidate and $M_{f_i}$ is a list of $k_m$-top target side retrieval matches. Because the model should return a score that is reflective of the relevance of $r$ with respect to $M_{f_i}$, irrespective of the length of $M_{f_i}$, normalization with respect to the token count of $M_{f_i}$ is necessary. The term $Z_{M_{f_i}}$ serves this purpose.
1,108,101,565,912
arxiv
\section{Introduction} Leptoquarks are hypothetical particles that couple both to leptons and quarks and appear in several extensions of the Standard Model (SM) \cite{Carpentier:2010ue,Buchmuller:1986zs,Dorsner:2017ufx,Leurer:1993ap}. They are color triplets and were initially predicted in the Pati-Salam model \cite{Pati:1974yy} and other unified theories \cite{Georgi:1974sy,Georgi:1974yf,Fritzsch:1974nn}. Leptoquark vertices can violate the lepton univerality in the SM and introduce generation changing interactions. In resent years the observation of the $B$-meson anomalies in $R_{(D^{\ast})}$ and $R_{(K)}$ measurements \cite{Aaij:2014ora, Aaij:2017vbb} have raised an interest in both vector (spin 1) and scalar (spin 0) leptoquarks. Indeed leptoquarks have become prominent candidates responsible for these observed deviations from the SM. Other authors have used leptoquarks as a possible solution to the long standing problem of the $(g-2)_{\mu}$ anomaly \cite{Davier:2010nc,Cheung:2001ip,Chakraverty:2001yg,Bauer:2015knc}. In several of the theoretical models that try to fit the anomalies the predicted leptoquark particles are a vector $U_1^{\mu}$ and two scalars $S_1$ and $S_3$, where $S_1$ is a singlet under $SU(2)_L$ and $S_3$ transfors as a triplet in $SU(2)_L$ \cite{Cox:2016epl,Calibbi:2017qbu,Blanke:2018sro,Buttazzo:2017ixm,Greljo:2018tuh,Barbieri:2015yvd,Bordone:2018nbg, Hiller:2017bzc,Sakaki:2013bfa,Angelescu:2018tyl,Bauer:2015knc,Crivellin:2017zlb,Davidson:1993qk}. \par Current searches for leptoquark pair production at LHC at $\sqrt{s}=13$ TeV have set a lower mass limit at round $1.7$ TeV \cite{Sirunyan:2018kzh,Aad:2020iuy}. Considering also the future upgrade of the LHC, if leptoquarks exist, in principle it would be possible to have one of these particles produced on-shell. Then the next natural step would be to study their properties in an effective field theory (EFT) approach. Soft collinear effective field theory (SCET) offers a consistent framework to describe the decay rates of these particles. \begin{comment} In the conventional EFT approach local operators of dimension $D\geqslant 5$ are added to the SM Lagrangian to build an EFT of the SM, the so called SMEFT\cite{Buchmuller:1985jz,Weinberg:1979sa,Grzadkowski:2010es}. These operators describe in a model independent way the low energy physics when off-shell higher degrees of freedom have been integrated out. The higher dimension operators are suppressed by ratios of the scales present in the problem, namely $v/\Lambda$, where $v\sim 245 GeV$ is the electroweak scale and $\Lambda$ is the higher scale of $\mathcal{O}(TeV)$. Instead in the scenario we are considering the on-shell discovered leptoquark with mass $M\sim\Lambda$ would be only one of a few other heavy particles all with a mass range around the large scale $\Lambda$. Then the argument that higher dimension operators would be suppressed by $\mathcal{O}(1/{M^{D-4}})$ is not valid any more and a local EFT would fail to systematically describe the decay rates of this particle. The decay products will carry large momentum fraction of the order of the mass of the heavy leptoquark and therefore operators containing higher order derivatives would still contribute at the order $\mathcal{O}({M}/{\Lambda})\sim 1$ even though naively they are of higher dimensions. To have a consistent description of the problem it requires not only disentangling the scales but also being able to include all these non-suppressed contributions from higher order operators. One framework that successfully achieves both of these tasks is the soft collinear effective field theory (SCET). \end{comment} SCET is a non-local EFT that quantitatively describes the decays of heavy particles into light and energetic ones, initially developed to study the $B$-meson decays \cite{Bauer:2000yr,Bauer:2001yt,Bauer:2000ew}. The approach of using SCET to analyse the decays of beyond the SM particles was intially introduced in \cite{Alte:2018nbn} as the SCET-BSM framework for the decays of a heavy singlet and was later applied to a model with vector-like quark mediators in \cite{Alte:2019iug}. In this work we use the SCET formalism to build the effective Lagrangians that describe the decays of the leptoquark $U_1^{\mu}$, $S_1$ and $S_3$. We construct the operator basis for two and three body final states at leading and subleading order in the power counting parameter $\lambda$. The parameter $\lambda$ is of the order $ v/\Lambda$ where $\Lambda$ is some large scale, $v$ is the electroweak scale and $\Lambda \gg v$. In addition we use renormalization group techniques to resum the QCD and electroweak large logarithms in the Wilson coefficients of leading order operators. \par Here we assume that a leptoquark can couple to different families of leptons and quarks at the same time, which is different from the original assumption on leptoquark couplings in the Buchmüller-Rückl-Wyler model \cite{Buchmuller:1986zs}. The final states consisit of SM particles and, to make the discussion more interesting we also allow for the existance of a light right handed neutrino in the particle spectrum. This is a singlet under the SM gauge group $\nu_R(1,1,0)$ and enters in several models with neutrino mass generation \cite{Khalil:2006yi,Abbas:2007ag}. \par We begin in Section \ref{BasicSCET} with a short introduction on the basic elements of SCET relevant for this work. In Section \ref{HSEFT} we introduce heavy field effective theory that we need to describe gauge boson interactions of charged heavy scalars and vectors. Then in Sections \ref{sections1}, \ref{sections3} and \ref{sectionU1} we construct the operator basis for the leptoquarks $S_1$, $S_3$ and $U_1^{\mu}$ at $\mathcal{O}(\lambda^2)$ and $\mathcal{O}(\lambda^3)$. In Section \ref{secwilsoncoeff} we show the running of the Wilson coefficients of the operators and sum their large logarithms using renormalization group equations. Lastly in Section {\ref{sectionmatching} we present a tree level matching for certain extensions of the SM and we finally conclude our results in Section \ref{conclusions}. We collect some of the expressions concerning technical details in the appendices. \section{Basic elements of SCET} \label{BasicSCET} The central idea of SCET lies in identifying the leading order momentum regions with respect to the parameter $\lambda$ for any given process and assign those momentum regions to quantum fields. The relevant momenta for the on-shell decays of a heavy particle are the collinear momenta carried by the energetic decay products, the hard momenta that has been integrated out and the soft momenta. The large energy flow in the final states defines the so called collinear directions $ \vec{n}_i$. For each such directions we define two vectors $n_i^{\mu}=\lbrace 1, \vec{n}_i \rbrace$ and $\bar{n}_i^{\mu}=\lbrace 1, - \vec{n}_i \rbrace$ such that $n_i\cdot \bar{n}_i=2$. The freedom to rescale these light-like reference vectors leads to the so called reparametrization invariance in SCET \cite{Manohar:2002fd}, which is a remnant of Lorentz invariance. All the operators of a SCET Lagrangian must be reparametrization invariant. \par The four momentum of a particle moving in the $n_i$ collinear direction is written in terms of the reference vectors \begin{equation} p^{\mu}_{i}=p_i\cdot\bar{n}_i\,\frac{n_i^{\mu}}{2}+p_i\cdot n_i\,\frac{\bar{n}_i^{\mu}}{2}+{p_i}_{\perp}^{\mu}\,, \end{equation} The components of the collinear momenta scale as $(p_i\cdot n_i,p_i\cdot \bar{n}_i,{p_i}_{\perp})\sim M(\lambda^2,1,\lambda)$, where $M$ is the mass of the decaying leptoquark. In fact $M$ represents the scale of a whole physics sector that has been integrated out and in principle could contain other heavy particles with masses around the scale $M$. The soft momentum components are defined such that they all vanish in the limit where $\lambda\rightarrow 0$. The exact $\lambda$- scaling depends on the specific process but for most cases they are either ultra-soft, where $k_{us}\sim M(\lambda^2,\lambda^2,\lambda^2)$ or soft, where $k_{s}\sim M(\lambda,\lambda,\lambda)$. \par The fields in SCET have a well defined scaling with respect to the power counting parameter $\lambda$. Acting with an arbitrary number of anti-collinear derivatives on a collinear field leaves its $\lambda$-scaling unchanged. Then a collinear field $\psi_{n_i}(x)$ can contain a series of such derivatives. To account for this effect operators built out of collinear fields are non-local along the light like direction. For instance: \begin{equation} \psi_{n_i}(x+t\bar{n}_i)=\sum_{k=0}^{\infty}\,\frac{t^{k}}{k!}\,(\bar{n}_{i}\cdot\partial)^{k}\,\psi_{n_i}(x)\,, \label{collinearfield} \end{equation} where $t$ is the displacement in the anti-collinear direction. In the Lagrangian operators built out of such collinear fields always appear multiplied by Wilson coefficients that also depend on the $t$ parameters, and these products are integrated over the variables $t$. In this way an arbitrary dependence on the large derivatives $\bar{n}_{i}\cdot\partial$ is allowed. This property of collinear fields makes SCET a non local EFT and it is therefore necessary to introduce Wilson lines in each collinear direction $n_i$ defined as \cite{Bauer:2002nz,Eichten:1989zv}: \begin{equation} W^{(A)}_{n_i}(x)\,=P\, \exp\left[i\, g_{A}\,t^{a}_{A}\,\int_{-\infty}^0 ds\, \bar{n}_i \cdot A\,^{a}_{n_i}\,(x+s\bar{n}_i)\right]\,, \label{Wilsonline} \end{equation} where $A_{n_i}$ is a collinear gauge field and the $t^{a}$ is the corresponding group generator in the representation of the field where the Wilson line is acting on. The $g_{A}$ here is the gauge coupling constant corresponding to the gauge field $A$. For the gauge group $U(1)_{Y}$ the gauge field $A^{a}$ is replaced by the gauge field $B$ and the generators are the hypercharge $Y$ of the field. \par In SCET collinear spinor fields in $n_i$ direction are defined with the help of a projector operator $P_{n_i}=\frac{\slashed{n_i}\slashed{\bar{n_i}}}{4}$, such that this operator projects out only the large momentum component of the spinor and $P_{n_i}^2=P_{n_i}$. Then at leading order in $\lambda$ a collinear SM fermion scales as $\mathcal{O}(\lambda)$ and it is defined as: \begin{equation} \Psi_{n_i}(x)=\frac{\slashed{n}_i\slashed{\bar{n}}_i}{4}\,W_{n_i}^{\dagger}(x)\,\psi(x)\,. \label{fielddef} \end{equation} Here the Wilson line $W_{n_i}$ without the superscript $(A)$ is a product of Wilson lines $W^{(A)}_{n_i}$, one for each gauge group where the field $\psi (x)$ is transformed. The fermionic field $\Psi_{n_i}(x)$ obeys the constraint: \begin{equation} \slashed{\bar{n}}_i\Psi_{n_i}(x)=0 \end{equation} A SM collinear scalar field is also dressed in Wilson lines such that: \begin{align} \label{fielddef} \Phi_{n_i}(x)&=W_{n_i}^{\dagger }(x)\,\phi(x)\,, \end{align} and it scales $\sim \lambda$. These collinear fields in SCET defined in terms of Wilson lines are referred to as collinear gauge invariant building blocks. The gauge invariant building block for a boson $\mathcal{A}_{n_i}^{\mu}$ is defined as the corresponding strength tensor sandwiched between two Wilson lines \cite{Bauer:2001yt,Hill:2002vw}: \begin{equation} \mathcal{A}_{n_i}^{\mu}(x)=g_A\,\int^0_{-\infty} ds\, \bar{n}_{i,\nu} \left[W_{n_i}^{(A)^{\dagger}}\,F_{n_i}^{\nu\mu}\,W_{n_i}^{(A)}\right](x+s\bar{n}_i) \label{scetgaugeboson} \end{equation} For an Abelian gauge group such as $U(1)_Y$ the definition simplifies to: \begin{equation} \mathcal{B}_{n_i}^{\mu}(x)=g_{B}\,Y\,\int_{-\infty}^{0} ds\, \bar{n}_{i\nu}\,B^{\nu\mu}_{n_i}\,(x+s\bar{n}_i)\,, \label{Bgaugeinv} \end{equation} The different components of a vector field in SCET follow the same scaling as the corresponding momentum component. Then from the definition of the Wilson line in (\ref{Wilsonline}) a collinear field can emit any number of gauge bosons along its collinear direction suppressed only by powers of the coupling constant. In addition since $n_i^{\mu}$ in equation (\ref{scetgaugeboson}) is a light-like vector, the gauge invariant building block for a gauge field obeys the following constraint: \begin{equation} \bar{n}_i\cdot \mathcal{A}_{n_i}=0\,. \label{gauge fixing} \end{equation} The remaining components of a collinear gauge field with momentum $p\sim M(\lambda^2,1,\lambda)$ will scale as the corresponding momentum component \cite{Becher:2014oda}: \begin{equation} \mathcal{A}_{n_i\,,\perp}^{\mu}\, \sim\, \lambda ,\hspace{3.6cm} n_i\,\cdot\,\mathcal{A}_{n_i}\,\sim\, \lambda^2\ \,. \label{gaugescaling} \end{equation} where $\mathcal{A}_{n_i\,,\perp}^{\mu}\,$ is defined: \begin{equation} \mathcal{A}_{n_i\,,\perp}^{\mu}=\mathcal{A}_{n_i}^{\mu}-n_i\,\cdot\mathcal{A}_{n_i}\,\frac{\bar{n}_i^{\mu}}{2}\,. \end{equation} The component $n_i\cdot\mathcal{A}_{n_i}$ is power suppressed and in fact it can always be eliminated using a field redefinition \cite{Marcantonini:2008qn}. This implies that only the transverse component of a collinear gauge boson will be produced in the decay of a heavy particle. \par In principle it is possible to introduce SCET fields to describe also SM particles carrying soft momenta and these fields have well defined scaling with respect to $\lambda$, similarly to the collinear fields. In this work though we are interested in analysing two and three particle final states, at leading and subleading order in $\lambda$. Operators with soft field are further suppressed as they appear starting from $\mathcal{O}(\lambda^3)$. The additonal particle we allow as a final state, the right handed neutrino is represented by a collinear field $\nu_R(x)$. Contrary to the SM fields this collinear field is not dressed in Wilson lines since it transforms trivially under the gauge group. For a heavy particle charged under SM such as leptoquaks a consistent description requires them to be treated within a heavy particle effective theory framework, similarly to the heavy quark effective theory (HQET) \cite{Eichten:1989zv,Georgi:1990um,Neubert:1993mb}. In the next section we present a short overview of the heavy particle effective theory, which we will apply for all three leptoquarks. \section{Heavy Particle Effective Theory} \label{HSEFT} In our frame work of describing the decays of leptoquarks into SM particles we are integrating out the heavy degrees of freedom around the scale of the leptoquark mass. This restricts the interactions of these heavy particles only through soft momentum transfer $k\sim M(\lambda^2,\lambda^2,\lambda^2)$ such that the leptoquarks would still remain on shell via such interactions. The four momentum of such a particle with mass $M$ can be written: \begin{equation} p^{\mu}=Mv^{\mu}+k^{\mu}\,, \end{equation} where $v^{\mu}=(1,0,0,0)$, $k^{\mu}$ is some residual momentum of the order of the EW scale in this case. In the on shell limit the heavy scalar can be described by a quantum field $S_v(x)$ such that it admits the field redefinition where $S(x)\rightarrow e^{-iM_{S}v\cdot x}{S}_v(x)$. Inserting this expression in the Lagrangian of a complex scalar field: \begin{equation} \mathcal{L}_{\text{scalar}}=(D^{\mu}S)^{\dagger} \,D_{\mu}S-M^2\,S^{\dagger}S, \end{equation} we get: \begin{equation} \label{LHSET} \mathcal{L}_{\text{scalar}}=2M_{S}\,{S}_v^{\dagger}\,\left( i v\cdot D{S}_v\right) + (D^{\mu}{S}_v)^{\dagger}D_{\mu}{S}_v, \end{equation} where $D_{\mu}=\partial _{\mu}-igG_{\mu}^at^a-ig_2W_{\mu}^a\tau^a-ig_1YB_{\mu}$. The covariant derivative in this case will pick up only the soft component of the momentum $p^{\mu}$ since $S_{v}(x)=e^{-ix\cdot k}S_v(k)$. The second term in (\ref{LHSET}) is suppressed by $(1/M_{S})$ relative to the first term. At leading power then the scalar Lagrangian becomes the effective Lagrangian $\mathcal{L}_{\text{HSET}}$ describing the propagation of the heavy field $S_{v}$ such that: \begin{equation} \label{LHSETfinal} \mathcal{L}_{\text{HSET}}=2M_{S}\,\Bigg[{S}_v^{\dagger}\,\left( i v\cdot D{S}_v\right) +\mathcal{O}\left(\frac{1}{M_{S}}\right) \Bigg] \end{equation} The physical quantities described by $\mathcal{L}_{\text{HSET}}$ are mass independent at leading order. This observation is similar to HQET and the accident symmetries that arise there \cite{Neubert:1993mb}. This is due to the fact that the exchange of an ultra soft gauge boson cannot probe the quantum numbers of the particle, instead one would need a hard momentum exchange. In the analysis for the two scalar leptoquarks $S_1$ and $S_3$ we will neglect the second term in (\ref{LHSET}). \par In a similar fashion, a consistent description of the decay rates of a heavy vector particle such as $U_1^{\mu}$ requires a heavy vector effective theory (HVET) that separates the leading power contribution from the vector field $U_1^{\mu}(x)$. We separate the transverse and longitudinal components of the vector $U_1^{\mu}$ using the projector operators defined in terms of the reference vector $v^{\mu}$: \begin{equation} T^{\mu\nu}_{\parallel}={v^{\mu}v^{\nu}}\,\,,\hspace{2cm} T^{\mu\nu}_{\perp}=g^{\mu\nu}-{v^{\mu}v^{\nu}} \end{equation} where $v^{\mu}$ is a reference vector in the direction of the four momentum of $U_1^{\mu}$. The way we define $v^{\mu}$ here we have $ v^2=1$. Then: \begin{equation} U_{1\parallel}^{\mu}={T_{ \parallel}}_{ \mu\nu} U_{1}^{\nu}\,\,,\hspace{2cm} U_{1\perp}^{\mu}={T_{\perp}}_{\mu\nu}U_{1}^{\nu}\,, \end{equation} where $U_{1\parallel}^{\mu}$ is the component parallel to the direction of four momentum of the leptoquark and $U_{1\perp}^{\mu}$ is the component perpendicular to its four momentum. Looking at the two point correlation functions \cite{Becher:2014oda} for these two fields it is not difficult to show that the $U_{1\parallel}^{\mu}$ is powers suppressed compared to $U_{1\perp}^{\mu}$. This means at leading power the $U_{1}^{\mu}$ is produced perpendicularly polarized and $v\cdot U_{1}=0$. Its longitudinal component is integrated out in the effective Lagrangian. We derive this Lagrangian starting from the most general gauge invariant Lagrangian for a massive vector field $U_1^{\mu}(x)$\footnote{The minus sing in front of the second term is necessary to get the equation $D_{\mu}U^{\mu}_1=0$ in the massless limit.}: \begin{equation} \mathcal{L}=(D^{\mu}{U_1}^{\nu})^{\dagger}\,D_{\mu}{U_1}_{\nu}-(D^{\nu}{U_1}^{\mu})^{\dagger}\,D_{\mu}{U_1}_{\nu}-{M^2}\,{{U_1}^{\mu}}^{\dagger}{U_1}_{\mu} \label{KinetictermU1} \end{equation} with $D_{\mu}=\partial_{\mu}-igG_{\mu}^{a}t^{a}-\frac{2}{3}i g_1B_{\mu} $. We perform a field transformation on $U_1^{\mu}(x)$ such that: $U_1^{\mu}(x)\rightarrow e^{-i\,M_{U_1}v\cdot x}U_{1v}^{\mu}(x)$, where $U_{1v}^{\mu}(x)$ contains only the soft momentum fluctuations. Then the Lagrangian that describes the interactions of the heavy field $U_{1v}^{\mu}(x)$ reads: \begin{equation} \mathcal{L}=2 M_{U_{1}}\,{U_{1v}^{\mu}}^{\dagger}\left(i v\cdot D {U_{1v}}_{\mu}\right)+(D_{\mu}{U_{1v}}_{\nu})^{\dagger}(D^{\mu}{U_{1v}}^{\nu}) -(D^{\nu}{U_{1v}}^{\mu})^{\dagger}(D_{\mu}{U_{1v}}_{\mu})+\mathcal{L}\left(v\cdot U_{1}\right)\,, \label{HVEFT} \end{equation} where the second and the third term are suppressed relative to the first one by $1/M_{U_1}$. The $\mathcal{L}\left(v\cdot U_{1}\right)$ contains power suppressed terms that are integrated out. We then define the HVET Lagrangian: \begin{equation} \begin{aligned} \mathcal{L}_{\text{HVET}}&=2 M_{U_{1}}\,\Bigg[ {U_{1\perp v}^{\mu}}^{\dagger}\left(i v\cdot D {U_{1 v\perp}}_{\mu}\right)+\mathcal{O}\left(\frac{1}{M_{U_1}}\right)\Bigg]\,\\ & \equiv 2 M_{U_{1}} \Bigg[{T_{\perp}}_{\mu\nu}\Big[\,{U_{1 v}^{\mu}}^{\dagger}\left(i v\cdot D U_{1 v}^{\nu}\right)\Big]+\mathcal{O}\left(\frac{1}{M_{U_1}}\right)\Bigg]\,, \end{aligned} \end{equation} where ${U_{1\perp v}}_{\mu}$ is the perpendicular component of the field ${U_{1v}}_{\mu}$. The leading term in the above Lagrangian is the same as the one in (\ref{LHSET}). In other words at leading power the renormalization of the heavy vector field $U_{1v}^{\mu}(x)$ and of its interactions with SM particles is similar to renormalizing the field and the interactions of a heavy scalar. \section{\boldmath SCET formalism for the scalar leptoquark $S_1(3,1,-\frac{1}{3})$} \label{sections1} The scalar leptoquark $S_1$ is a color triplet, an $SU(2)_L$ singlet and has hypercharge $Y=-1/3$. It couples to SM particles similarly to a right handed down type quark. This particular leptoquark has been studied as a viable solution both to the flavour anomalies and the $(g-2)_{\mu}$ anomaly. Note that its quantum numbers allow the $S_1$ to couple in operators that would induce proton decays though we neglect those operators here. In literature they are usually suppressed assuming the realization of certain symmetries such as Peccei-Quinn symetry or other discrete symmetries \cite{Bauer:2015knc,Bajc:2005zf,Cox:2016epl}. \subsection[Leading power two jet operators for $S_1$]{\boldmath Leading power two jet operators for $S_1$ }\label{Sec1} We start with the SCET Lagrangian that describes the decays of the $S_1$ at leading order in scale ratios $\lambda$. The decay products are all collinear gauge invariant building blocks presented in the introduction and the leptoquark is described within the heavy scalar effective theory by the heavy field ${S_1}_v(x)$. At lowest order in $\lambda$ the symmetries allow for $S_1$ to couple to two collinear fermions moving in opposite directions so the leading order operators are $\mathcal{O}(\lambda^2)$. We use the subscript ${n_i}$ to denote a collinear field in SCET moving in the $n_i$ direction. Then the SCET Lagrangian at $\mathcal{O}(\lambda^2)$ reads: \begin{equation} \begin{aligned} \label{Lagrangian1} \mathcal{L}_{S_1}^{(\lambda^2)}\,=&\,C^{ij}_{S_1^{\ast}u^{c}_{R}\ell_R}\, {\bar{u}}\,^{c,i}_{R,n_1}\, \ell^j_{R,n_2} \,S_{1v}^{\ast}\, +\,C^{ij}_{S_1^{\ast}Q^{c}_{L}L_L}\,\bar{Q}\,^{c,i}_{L,n_1}\,i\sigma_2\,L^j_{L,n_2}\,S_{1v}^{\ast} \,\\ &+ C^{ij}_{S_1^{\ast}d^{c}_{R}\nu_R}\,\bar{d}\,^{c,i}_{R,n_1}\, \nu_{R,n_2}^j\,S^{\ast}_{1v}\,+\,(n_1\leftrightarrow n_2)\, +\,\text{ h.c.}\,, \end{aligned} \end{equation} where $C^{ij}_{S_1f_1f_2}$ are the Wilson coefficients of the corresponding operators. We label the operators and their Wilson coefficient by their field content. The fields $Q_{L,n_i}$ and $L_{L,n_1}$ are the collinear quark and lepton doublets while $u_{R,n_i}$ and $d_{R,n_i}$, $\ell_{R,n_i}$ and $\nu_{R,n_i}$ stand for up and down type collinear quarks, right handed collinear lepton and right handed collinear neutrino respectively. The indices $i,j$ where $\lbrace i,j\rbrace\in \lbrace 1,2,3\rbrace$ labell the fermion families. As mentioned before we are considering here the most general case where the leptoquark can decay into a quark and a lepton of different generations. This is the case which gives rise to flavour changing neutral currents (FCNC) in the model \cite{Mitsou:2004hm}. The fields that carry a superscript $c$ are the charge conjugate field defined as $\Psi^c=C\bar{\Psi}^T$ with $C$ being the charge conjugate operator. As a result all the operators in (\ref{Lagrangian1}) violate fermion number conservation by $\Delta F=2$. The above Lagrangian contains all the non-vanishing operators at $\mathcal{O}(\lambda^2)$ that are SM gauge invariant, Lorentz invariant and reparametrization invariant in SCET. For simplicity we keep the coordinate and scale dependence of the fields and the Wilson coefficients implicit but according to equation (\ref{collinearfield}) the above operator products should be understood as products of non-local fields. Considering for instance the first term in (\ref{Lagrangian1}) we would have: \begin{equation} C^{ij}_{S_1^{\ast}u^{c}_R\ell_R} \,\bar{u}^{c,i}_{R,n_1} \,\ell^j_{R,n_2} S_{1v}^{\ast}\equiv \int ds \,dt\, \bar{C}_{S_1^{\ast}u^{c}_{R}\ell_R}^{ij}\,(s,t,\Lambda,\mu)\,\bar{u}^{c,i}_{R,n_1}\,(x+s\bar{n}_1)\, \ell^j_{R,n_2}(x+t\bar{n}_2)\, S_{1v}^{\ast}(x)\,, \end{equation} where $\Lambda$ represents the large scale that has been integrated out and $\mu$ is the factorization scale of the operators. Inserting the exponential form of the series in (\ref{collinearfield}) and apply it on the Fourier transform of the fields we end up with the following: \begin{equation} \int\, ds\, dt \,\bar{C}_{S_1^{\ast}u^{c}_{R}\ell_R}^{ij}(s,t,\Lambda,\mu)\,e^{i t\bar{n}_1\cdot\mathcal{P}_1}\, e^{i s\bar{n}_2\cdot\mathcal{P}_2} \,\bar{u}^{c,i}_{R,n_1}(x)\, \ell^j_{R,n_2}(x) \,S_{1v}^{\ast}(x), \end{equation} where $\mathcal{P}_i$ is a momentum label. This is a generalization to the four momentum $p_i$ carried by the field with the index $i$ where now $\mathcal{P}_i$ denotes the total momentum carried by all the fields with the index $i$ and $\bar{n}_i\cdot \mathcal{P}_i$ will pick up the total large momentum component in the direction $n_i$. The Wilson coefficients appearing in the Lagrangian in (\ref{Lagrangian1}) are defined as the Fourier transform of the Wilson coefficients $\bar{C}(s,t,\Lambda,\mu)$ such that: \begin{equation} C\,\equiv\, C(\bar{n}_1\cdot\mathcal{P}_1,\bar{n}_2\cdot\mathcal{P}_2,\Lambda,\mu)\,=\,\int\, ds\, dt\, \bar{C}(s,t,\Lambda,\mu)\,e^{is\bar{n}_1\cdot\mathcal{P}_1}\,e^{it\bar{n}_2\cdot\mathcal{P}_2}\,. \label{Wilsoncoeff} \end{equation} Using arguments of Lorentz and reparametrization invariance it follows that at leading order the dependence of $C\,(\bar{n}_1\cdot\mathcal{P}_1,\bar{n}_2\cdot\mathcal{P}_2,\Lambda,\mu)$ on the momenta can only be proportional to the operator $\mathcal{P}^2$, where $\mathcal{P}$ is the operator carrying the total momentum of all the final states, whose eigenvalue is the mass of the leptoquark $M$ for the two jet operators \cite{Alte:2018nbn}. From now on it is implied that all the Wilson coefficients of the two jet operators are defined as in (\ref{Wilsoncoeff}). Because of reparametrization invariance the $\bar{n}_i\cdot \mathcal{P}$ scalar product can only depend on the leptoquark mass $M$ such that: \begin{equation} C\equiv C(\bar{n}_1\cdot\mathcal{P}_1,\bar{n}_2\cdot\mathcal{P}_2,\Lambda,\mu)\equiv C(\Lambda,M,\mu)\,. \end{equation} The Lagrangian in (\ref{Lagrangian1}) contains only dimension four operators and therefore the Wilson coefficients are dimensionless. We write the Lagrangian in a compact form: \begin{equation} \begin{aligned} \mathcal{L}_{S_1}^{(\lambda^2)}&=C^{ij}_{S^{\ast}_1u^{c}_R\ell_R}(\Lambda,M_{S_1},\mu)\, \mathcal{O}^{ij}_{S_1^{\ast}u^{c}_{R}\ell_R}(\mu)\, + \,C^{ij}_{S_1^{\ast}Q^{c}_{L}L_L}(\Lambda,M_{S_1},\mu)\,\mathcal{O}^{ij}_{S_1^{\ast}Q^{c}_{L}L_L}(\mu)\, \\ &+\,C_{S_1^{\ast}d^{c}_{R}\nu_R}^{ij}(\Lambda,M_{S_1},\mu)\, \mathcal{O}_{S_1^{\ast}d^{c}_{R}\nu_R}^{ij}(\mu)\, +\,\text{h.c.}\,, \end{aligned} \end{equation} with the operator basis: \begin{equation} \begin{aligned} \mathcal{O}^{ij}_{S_1^{\ast}u^{c}_{R}\ell_R}\,&=\,\bar{u}^{c,i}_{R,n_1}\, \ell^j_{R,n_2} S_{1v}^{\ast}+(n_1\leftrightarrow n_2)\,, \\ \mathcal{O}^{ij}_{S_1^{\ast}Q^{c}_{L}L_L}\,&=\,\bar{Q}^{c,i}_{L,n_1}\,i\,\sigma_2\, L^j_{L,n_2}\,S_{1v}^{\ast}+(n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_1^{\ast}d^{c}_{R}\nu_R}^{ij}\,&= \,\bar{d}^{c,i}_{R,n_1}\, \nu^j_{R,n_2}\,S^{\ast}_{1v}+(n_1\leftrightarrow n_2)\,. \end{aligned} \label{S1operators} \end{equation} The first two operators in the above equation define a two jet final state while the decay into a $\nu_R$ is a mono jet signature plus missing energy. From this operator basis it is straightforward to calculate the tree level decay rates of the leptoquark $S_1$. In this case the SM fields and the Wilson coefficients are transformed from the weak basis to the mass basis. We collect the components $\mathrm{C}^{ij}$ of the Wilson coefficients in the mass basis in the matrix $\boldsymbol{\mathrm{C}}$, which transforms with the transformation matrix of the various fields in that operator. \par The two body decays at $\mathcal{O}(\lambda^2)$ are fixed by kinematics and in the limit of massless final states the total decay rates for the singlet $S_1$ are: \begin{equation} \begin{split} \Gamma(S_1\rightarrow {u_R^{c,j}}\bar{\ell}_{R}^{i})&=\,\frac{M_{S_1}}{16\pi}\,|\, \mathrm{C}^{ij}_{S_1^{\ast}u^{c}_{R}\ell_R} \,|^2 \,,\\ \Gamma(S_1\rightarrow Q_L^{c,i}\bar{L}_{L}^{j})&=\,\frac{M_{S_1}}{16\pi}\,|\,\mathrm{C}^{ij}_{S_1^{\ast}Q^{c}_LL_L}\,|^2\,, \\ \Gamma(S_1\rightarrow d^{c,i}_R \bar{\nu}^{j}_R )&=\,\frac{M_{S_1}}{16\pi}\,|\,\mathrm{C}^{ij}_{S_1^{\ast}d^c_R\nu_R}\,|^2 \,,\\ \end{split} \end{equation} where $M_{S_1}$ is the mass of $S_1$. For different final states the decay rates differ only by their Wilson coefficients. \subsection[Subleading poer two jet operators for $S_1$]{\boldmath Subleading power two jet operators for $S_1$} \label{secs1sub} It is of interest to further explore the beyond SM SCET Lagrangian at $\mathcal{O}(\lambda^3)$. At subleading order in power counting $S_1$ decays into two and three jet final states. In the first case two collinear particles in $n_i$ direction share the total jet momentum $\mathcal{P}$ such that one of the particles will carry momentum $u\mathcal{P}$, with $0<u<1$ and the other momentum $(1-u)\mathcal{P}$. Since $u$ can have any value between $0$ and $1$ one has to integrate over this parameter space in the Lagrangian. Applying the same arguments of gauge invariance, Lorentz invariance and reparametrization invariance the two body decay Lagrangian for the leptoquark $S_1$ at subleading order reads: \begin{equation} \begin{aligned} \mathcal{L}_{S_1}^{(\lambda^3)}\bigg\rvert_{\text{2 jet}}&\,=\,\frac{1}{\Lambda}\,\Biggl[\, C_{S_1^{\ast}L_L\Phi d_R}^{(0)^{ij}}(\Lambda,M_{S_1},\mu)\,{\mathcal{O}_{S_1^{\ast}L_L\Phi d_R}^{(0)^{ij}}}(\mu)\,\\ &\hspace{1.9cm}+\,{C_{S_1Q_L\Phi \nu_R}^{(0)^{ij}}}(\Lambda,M_{S_1},\mu)\,\mathcal{O}_{S_1Q_L\Phi \nu_R}^{(0)^{ij}}(\mu)\,\Biggr]\,\\ &\,+\,\frac{1}{\Lambda}\,\Biggl [ \sum_{k=1,2}\int_{0}^{1}du\Biggl(\,{C_{S_1^{\ast}L_L\Phi d_R}^{(k)^{ij}}}(\Lambda,M_{S_1},\mu,u)\,{\mathcal{O}_{S_1^{\ast}L_L\Phi d_R}^{(k)^{ij} }}(\mu,u)\,\\ &\hspace{1.9cm}+\,{C_{S_1Q_L\Phi \nu_R}^{(k)^{ij}}}(\Lambda,M_{S_1},\mu,u)\,{\mathcal{O}_{S_1Q_L\Phi\nu_R}^{(k)^{ij}}}(\mu,u) \,\\ &\hspace{1.9cm}+\,{C_{S_1d_R B\nu_R}^{(k)^{ij}}}(\Lambda,M_{S_1},\mu,u)\,{\mathcal{O}_{S_1d_R B\nu_R}^{(k)^{ij}}} (\mu,u)\, \Biggr)\,+\,\text{h.c.}\, \Biggr]\,. \end{aligned} \label{subS1} \end{equation} We label the operators by their field content and $B$ is the $U(1)_Y$ gauge boson. To distinguish the two jet operators at $\mathcal{O}(\lambda^3)$ we use the superscript $(k)$ for $k=1\,,2$ which denotes the collinear direction in which the third field with momentum $u\mathcal{P}_i$ is emitted. The operators labelled by $(0)$ contain a zero momentum field $\Phi^{(0)}$ such that: \begin{equation} \begin{aligned} &{\mathcal{O}_{S_1^{\ast}L_L\Phi d_R}^{(0)^{ij}}}\,=\,\bar{L}^{i}_{L,n_1}\,\tilde{\Phi}^{(0)}\,d^{j}_{R,n_2} \,S_{1v}^{\ast}\, +\,(n_1\leftrightarrow n_2)\,,\\ &{\mathcal{O}_{S_1Q_L\Phi \,\nu_R}^{(0)^{ij}}}\,=\,\bar{Q}^{i}_{L,n_1}\,{\Phi}^{(0)}\nu_{R,n_2}^{j}\,S_{1v}\, +\, (n_1\leftrightarrow n_2)\,, \end{aligned} \label{Eq.1} \end{equation} where $\tilde{\Phi}^{(0)}=i\sigma_2{\Phi^{(0)}}^{\ast}$. The zero momentum field $ \Phi^{(0)} $ has the gauge quantum numbers of the Higgs doublet but it does not transform under gauge transformations in SCET. After electroweak symmetry breaking it can be rotated to: \begin{equation} \Phi^{(0)}\,=\,\frac{1}{\sqrt{2}}\,(0,v)^T\,. \label{phizero} \end{equation} These operators will give a non-vanishing contribution to the two body decay rates of $S_1$ at $\mathcal{O}(\lambda^3)$. The second equation in (\ref{Eq.1}) describes a mono-jet signature in the detector plus missing energy from the $\nu_R$. All the fields in the remaining operators in the Lagrangian (\ref{subS1}) carry momentum different from zero. \par The Wilson coefficients for the two jet Lagrangian at $\mathcal{O}(\lambda^3)$ depend on the parameter $u$ if a particle with non zero momentum is emitted within the same jet. The superscript $(u)$ on the field implies the presence of a $\delta$- function which is there to ensure that the large momentum component of the second particle in the $i^{th}$ jet is fixed by $u\left(\bar{n}_i\cdot\mathcal{P}_i\right)$. For an explicit derivation see \cite{Alte:2018nbn}. For example: \begin{equation} \begin{aligned} {\mathcal{O}_{S_1^{\ast}L_L\Phi d_R}^{(1){ij}}}(u)&\,=\,\bar{L}^{i}_{L,n_1}(x)\, \tilde{ \Phi}_{n_1}^{(u)}(x)\,d^{j}_{R,n_2}(x)\, S_{1v}^{\ast}(x)\,\\ &\,\equiv\, \bar{L}^{i}_{L,n_1}(x)\, \delta(u-\frac{\bar{n}_1\cdot\mathcal{P}_{\Phi}}{\bar{n}_1\cdot\mathcal{P}_1})\,\tilde{ \Phi}_{n_1}(x)\,d^{j}_{R,n_2}(x)\, S_{1v}^{\ast}(x)\,. \end{aligned} \end{equation} The other three operators contain only fermionic fields and the SM scalar $\Phi(x)$: \begin{equation} \begin{aligned} {\mathcal{O}_{S_1^{\ast}L_L\Phi d_R}^{(2)^{ij}}}(u)&\,=\,\bar{L}^{i}_{L,n_1}\, \tilde{ \Phi}_{n_2}^{(u)}\,d^{j}_{R,n_2}\, S_{1v}^{\ast}\, +\, (n_1 \leftrightarrow n_2)\,,\\ {\mathcal{O}_{S_1Q_L\Phi\nu_R}^{(1)^{ij}}}(u)\,&=\,\bar{Q}^{i}_{L,n_1}\,{\Phi}_{n_1}^{(u)}\,\nu^{j}_{R,n_2}\,S_{1v} \,+\, (n_1\leftrightarrow n_2)\,,\\ {\mathcal{O}_{S_1Q_L\Phi\nu_R}^{(2)^{ij}}}(u)&\,=\,\bar{Q}^{i}_{L,n_1}\,{\Phi}_{n_2}^{(u)}\,\nu^{j}_{R,n_2}\,S_{1v} \,+ \,(n_1\leftrightarrow n_2)\,. \end{aligned} \end{equation} Note that two fermionic fields cannot be emitted in the same $n_i$ direction since that would give a vanishing contribution due to $n^2=\bar{n}^2=0$. The last line in (\ref{subS1}) contains the same chirality operators built out of a down-type quark, a right handed neutrino and the $U(1)_Y$ gauge boson. To maintain the subleading order power counting we can only include the perpendicular component of the gauge invariant building block $\mathcal{B}_{\mu}^{\perp}\sim \mathcal{O}(\lambda)$ of the gauge field. The last two operators in (\ref{subS1}) then are: \begin{equation} \begin{aligned} {\mathcal{O}_{S_1d_R B\nu_R}^{(1)^{ij}}}(u)\,&=\,{\bar{d}}^{i}_{R,n_1}\, \slashed{\mathcal{B}}^{\perp,(u)}_{n_1}\,\nu^{j}_{R,n_2}\,S_{1v}\, +\, (n_1\leftrightarrow n_2)\,,\\ {\mathcal{O}_{S_1d_R B\nu_R}^{(2)^{ij}}}(u)\,&=\,{\bar{d}}^{i}_{R,n_1}\, \slashed{\mathcal{B}}^{\perp,(u)}_{n_2}\,\nu^{j}_{R,n_2}\,S_{1v}\, +\, (n_1\leftrightarrow n_2)\,, \end{aligned} \end{equation} where the $\slashed{\mathcal{B}}^{\perp}_n=\gamma_{\perp}^{\mu}\,\mathcal{B}^{\perp,\mu}_n$ such that the perp component of the $\mathcal{B}_{n}$ is defined as: \begin{equation} \mathcal{B}_{n}^{\perp,\mu}\,=\,\mathcal{B}_{n}^{\mu}\,-n\cdot\mathcal{B}\,\frac{\bar{n}^\mu}{2}\,, \label{vecperp} \end{equation} and \begin{equation} \gamma^{\mu}_{\perp}\,=\,\gamma^{\mu}\,-\,\frac{\slashed{n}_1}{n_1\cdot {n}_2}\,{n}_2^{\mu}\,-\,\frac{\slashed{n}_2}{n_1 \cdot n_2}\,n_1^{\mu}\,. \label{gamma} \end{equation} There are no charge conjugate fields arising at $ \mathcal{O}(\lambda^3)$ and therefore all the operators conserve the fermion number. Moreover this implies no mixing between the leading and sub-leading order operators for the $S_1(3,1,-\frac{1}{3})$. Since all the operators are of canonical dimension five we multiply each term by $1/\Lambda$ so that the Wilson coefficients are dimensionless. It is instructive to do so because effectively the Wilson coefficients play the role of coupling constants. \par From the operator basis at $\mathcal{O}(\lambda^3)$ it is not difficult to derive the two body decays of $S_1$ at subleading order: \begin{equation} \begin{aligned} \Gamma(S_1\rightarrow \bar{L}_{L}^{i}d^{j}_R)\,&=\,\frac{v^2}{2}\,\frac{M_{S_1}}{16\pi}\, \frac{ |\, \mathrm{C}_{S_1^{\ast}L_L\Phi d_R}^{(0)^{ij}} \,|^2}{\Lambda^2}\,, \\ \Gamma(S_1\rightarrow \bar{Q}_{L}^{i}\bar{\nu}^{j}_R )\,&=\,\frac{v^2}{2}\frac{M_{S_1}}{16\pi}\,\frac{ |\,\mathrm{C}_{S_1Q_L\Phi\nu_R}^{(0)^{ij}}\,|^2 }{\Lambda^2}\,. \end{aligned} \end{equation} They are both suppressed by a factor of $ {v^2}/{\Lambda^2}$ compared to the two body decay rates from $\mathcal{O}(\lambda^2)$ operators. \subsection[Leading power three body decays for $S_1$]{\boldmath Leading power three body decays for $S_1$} It is possible to have the same field content as in (\ref{subS1}) with each collinear field emitted in one separate collinear direction $n_i$. These operators describe the decays of the leptoquark $S_1$ into three jet final states though the phase space would be much smaller and the decay rates further suppressed. Then the $S_1$ three jet Lagrangian at $\mathcal{O}(\lambda^3)$ is: \begin{equation} \begin{split} \mathcal{L}_{S_{1v}}^{(\lambda^3)}\bigg\rvert_{\text{3 jet}}\,=\,\frac{1}{\Lambda}\,\Biggl [&\,C_{S_1^{\ast}L_L\Phi d_R}^{ij}(\Lambda,M_{S_1},\lbrace m^2_{k\ell}\rbrace,\mu)\,\mathcal{O}_{S_1^{\ast}L_L\Phi d_R}^{ij} (\mu)\, \\ &\,\,+\,C^{ij}_{S_1Q_L\Phi\nu_R}(\Lambda,M_{S_1},\lbrace m^2_{k\ell}\rbrace,\mu)\,\mathcal{O}^{ij}_{S_1Q_L\Phi\nu_R}(\mu)\, \\ & \,\,+\,C_{S_1d_R B\nu_R}^{ij}(\Lambda,M_{S_1},\lbrace m^2_{k\ell}\rbrace,\mu)\,\mathcal{O}_{S_1d_RB\nu_R}^{ij} (\mu)\,+\,\text{h.c.}\, \Biggr]\,. \end{split} \label{3jetS1} \end{equation} The Wilson coefficients in this case depend also on the invariant mass $m^2_{k\ell}$ for any $(k,\ell)$ pair of final state particles, where $k\neq \ell\in \lbrace 1,2,3\rbrace$ . The operators read: \begin{equation} \begin{aligned} \mathcal{O}_{S_1^{\ast}L_L\Phi d_R}^{ij}\,&=\,\bar{L}^{i}_{L,n_1} \, \tilde{ \Phi}_{n_3}d^{j}_{R,n_2}\, S_{1v}^{\ast}\, +\, (n_{1}\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_1Q_L\Phi\nu_R}^{ij}\,&=\,\bar{Q}^{i}_{L,n_1}\,{\Phi}_{n_3}\nu^{j}_{R,n_2}\,S_{1v} \,+\, (n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_1d_R B \nu_R}^{ij}\,&=\,\bar{d}^{i}_{R,n_1}\, \slashed{\mathcal{B}}^{\perp}_{n_3}\,\nu^{j}_{R,n_2}\,S_{1v}\, +\, (n_1\leftrightarrow n_2)\,, \label{ZandgammadecayS1} \end{aligned} \end{equation} where $n_1, n_2, n_3$ are the three collinear directions each defining a jet signature in a possible event in the experiment. From the three jet Lagrangian in (\ref{3jetS1}) we compute the squared matrix element for a three body decay of the leptoquark $S_1$ into two fermions and a Higgs boson: \begin{equation} \begin{aligned} \mid\mathcal{M}(S_1\rightarrow \bar{L}_{L}^{i} h {d}^{j}_R)\mid^2\,&=\,\frac{|\, \mathrm{C}_{S_1^{\ast}L_L\Phi d_R}^{ij}\,| ^2}{4\Lambda^2}\,(n_1\cdot n_2)\,(\bar{n}_1\cdot p_1)\,(\bar{n}_2\cdot p_2)\,\\ &\approx\,\frac{| \mathrm{C}_{S_1L_L\Phi d_R}^{ij}| ^2}{2\Lambda^2}\, m_{Ld}^2\,,\\ \mid\mathcal{M}^2(S_1\rightarrow \bar{Q}_{L}^{i}h{\nu}^{j}_R)\mid^2\,&=\,\frac{|\, \mathrm{C}_{S_1Q_L\Phi\nu_R}^{ij}\,| ^2}{4\Lambda^2}\,(n_1\cdot n_2)\,(\bar{n}_1\cdot p_1)\,(\bar{n}_2\cdot p_2)\,\\ &\approx\,\frac{|\, \mathrm{C}_{S_1Q_L\Phi\nu_R}^{ij}\,| ^2}{2\Lambda^2}\, m_{Q\nu}^2\,, \end{aligned} \end{equation} where $m_{Ld}^2=(p_L+p_d)^2$, $m_{Q\nu}^2=(p_Q+p_{\nu})^2$ and we have used the following approximation: \begin{equation} \begin{aligned} m^{2}_{12}&=\frac{1}{2}(n_{1}\cdot n_{2})(\bar{n}_{1}\cdot p_1)(\bar{n}_2\cdot p_2)+\mathcal{O}(\lambda^2)\,\\ &\approx \frac{1}{2}(n_{1}\cdot n_{2})(\bar{n}_{1}\cdot p_1)(\bar{n}_2\cdot p_2) \end{aligned} \end{equation} Then the differential decay rates from the above contributions read: \begin{equation} \begin{aligned} \frac{d^2\Gamma(S_1\rightarrow\bar{L}^{i}h{d}^{j}_R)}{dm_{hd}^2\,dm_{Ld}^2}\,&=\,\frac{1}{512\pi^3}\,\frac{|\, \mathrm{C}_{S_1^{\ast}Ld\Phi}^{ij}\,| ^2}{\Lambda^2} \,\frac{m_{Ld}^2}{M_{S_1}^3}\,,\\ \frac{d^2\Gamma(S_1\rightarrow \bar{Q}^{i}h{\nu}^{j}_R)}{dm_{Qh}^2\,dm_{Q\nu}^2}\,&=\,\frac{1}{512\pi^3}\,\frac{|\, \mathrm{C}_{S_1Q\nu\Phi}^{ij}\,| ^2}{\Lambda^2} \,\frac{m_{Q\nu}^2}{M_{S_1}^3}\,. \end{aligned} \end{equation} From equation (\ref{ZandgammadecayS1}) we find the following differential decay rate for photon and $Z$ boson decays: \begin{equation} \begin{aligned} \frac{d^2\Gamma(S_1\rightarrow d_R^{i}\gamma\nu_R^{j})}{dm^2_{\gamma d}\,dm^2_{\gamma\nu}}\,&=\,\frac{\alpha_1}{64\pi^2}\,\frac{1}{\cos\theta_w}\,\frac{| \mathrm{C}_{S_1d_R B\nu_R}^{ij}| ^2\, |Y|^2}{\Lambda^2}\,\frac{m^2_{d\nu}}{M^3_{S_1}}\,\frac{(m_{d\gamma}^2)^2\,+\,(m^2_{\nu \gamma})^2}{(M^2_{S_1}-m^2_{d\nu})^2}\,,\\ \frac{d^2\Gamma(S_1\rightarrow d_R^{i}\nu_R^{j}Z)}{dm^2_{d\nu}\,dm^2_{Z\nu}}\,&=\frac{\alpha_1}{64\pi^2}\,\frac{1}{\sin\theta_w}\,\frac{| \mathrm{C}_{S_1d_R B\nu_R}^{ij}| ^2\, |Y|^2}{\Lambda^2}\,\frac{m^2_{d\nu}}{M^3_{S_1}}\,\frac{(m_{dZ}^2)^2\,+\,(m^2_{\nu Z})^2}{(M^2_{S_1}-m^2_{d\nu})^2}\,. \end{aligned} \label{3bodys12} \end{equation} In computing the squared matrix element we have summed over the two perp polarization vectors of the gauge bosons such that: \begin{equation} \sum^{2}_{i=1}\,\epsilon^{\mu}_{\perp}(p_3)\,\epsilon_{\perp}^{\star\nu}(p_3)\,=\,-g^{\mu\nu}_{\perp}, \end{equation} where \begin{equation} g^{\mu\nu}_{\perp}\,=\,g^{\mu\nu}\,-\,\frac{n_3^{\mu}\,\bar{n}_3^{\nu}}{2}\,-\,\frac{\bar{n}_3^{\mu}\,n_3^{\nu}}{2}\,. \end{equation} The gauge coupling and the $U(1)$ generator $Y$ dependence in the decay rates in the above equations follow from the Feynman rule for the physical $B_{\perp}^{\mu}$ derived from the corresponding SCET field $\mathcal{B}_{\perp}^{\mu}$ that is: $g^{\prime}Y\epsilon^{\star}_{\perp}(p)$. \begin{comment} \subsection{Tree level decay rates of the $S_1(3,1,-\frac{1}{3})$ } For a given operator basis it is straightforward to calculate the decay rates of the singlet LQ at tree level, with the final states transformed into the mass basis. In this case also the Wilson coefficients need to be transformed from the weak basis of the operators to the mass basis such that for a general operator of the form $C^{ij}\bar{\psi}^{i}\chi^{j}$ we will have: \begin{equation} C^{ij}\bar{\psi}^{i}\chi^{j}\rightarrow C^{ij}V^{\dagger}\bar{\psi}^{i}U\chi^{j}, \end{equation} where $V^{\dagger}\psi$ and $U\chi$ define the rotation of the these two collinear fields to their mass basis after EW symmetry breaking. We define the Wilson coefficients in the new basis as $\tilde{C}^{ij}$, such that: \begin{equation} \tilde{C}^{ij}=V^{\dagger}C^{ij}U \label{Wilsoncoef} \end{equation} For a two body decay everything is fixed by kinematics and we compute the total decay rate in the limit of massless final states. The contributions from the Lagrangian in (\ref{Lagrangian1}) then yields the following decay rates: \begin{equation} \begin{split} &\Gamma(S_1\rightarrow \bar{\ell_R}^{i}{u_R^c})=N_f\frac{M_{S_1}}{16\pi}| \tilde{C}^{ij}_{1R} |^2 \\ &\Gamma(S_1\rightarrow Q^{c,i}\bar{ L}^{j})=N_f\frac{M_{S_1}}{16\pi}|\tilde{C}^{ij}_{1L}|^2 \\ &\Gamma(S_1\rightarrow d^{c,i}_R \bar{\nu}^{j}_R )=N_f\frac{M_{S_1}}{16\pi}|\tilde{C}^{ij}_{1\nu}|^2 \\ \end{split} \end{equation} where the $N_f$ is the number of coloured fermions that can possibly appear in the final state in the respective decay process. Note that the above rates differ only by their Wilson coefficients. The order $\mathcal{O}(\lambda^3)$ operators in Eq.(\ref{Eq.2}) and Eq.(\ref{Eq.1}) also give a contribution to the two body decay rate from the two jet and mono jet signatures: \begin{equation} \begin{split} &\Gamma(S_1\rightarrow \bar{L}^{i}d^{j}_R)=N_f\frac{v^2}{2}\frac{M_{S_1}}{16\pi} \frac{ | {\tilde{C_1}}_{Ld}^{{(0)}ij} |^2}{M^2} \\ &\Gamma(S_1\rightarrow Q^{i}\bar{\nu}^{j}_R )=N_f\frac{v^2}{2}\frac{M_{S_1}}{16\pi}\frac{ |{\tilde{C_1}}_{Q\nu}^{(0)ij}|^2 }{M^2} \end{split} \end{equation} They are both suppressed by a factor of $ \frac{v^2}{M^2}$ compared to the two body decay rates from $\mathcal{O}(\lambda^2)$ operators. Lastly from the three jet Lagrangian in (\ref{3jetS1}) we compute the squared matrix element for a three body decay of $S_1(3,1,-\frac{1}{3})$ into two fermions and a Higgs boson: \begin{align} \mathcal{M}^2(S_1\rightarrow L^{i}h\bar{d}^{j}_R)&=N_f\frac{| {\tilde{C_1}}_{Ld}^{ij}| ^2}{4M^2}(n_1\cdot n_2)(\bar{n}_1\cdot p_1)(\bar{n}_2\cdot p_2)\sim N_f\frac{| {\tilde{C_1}}_{Ld}^{ij}| ^2}{M^2} (m_{Ld}^2)\\ \mathcal{M}^2(S_1\rightarrow Q^{i}h\bar{\nu}^{j}_R)&=N_f\frac{| {\tilde{C_1}}_{Q\nu}^{ij}| ^2}{4M^2}(n_1\cdot n_2)(\bar{n}_1\cdot p_1)(\bar{n}_2\cdot p_2)\sim N_f\frac{| {\tilde{C_1}}_{Q\nu}^{ij}| ^2}{M^2} (m_{Q\nu}^2) \end{align} where $m_{Ld}^2=(p_L+p_d)^2$ and $m_{Q\nu}^2=(p_Q+p_{\nu})^2$ and we are neglecting the masses of the other particles compared to the mass of the LQ. Then the differential decay rates from the above contributions read: \begin{align} \frac{d^2\Gamma(S_1\rightarrow L^{i}h\bar{d}^{j}_R)}{dm_{hd}^2dm_{Ld}^2}&=\frac{1}{512\pi^3}N_f\frac{| {\tilde{C_1}}_{Ld}^{ij}| ^2}{M^2} \frac{(m_{Ld}^2)}{M_{S_1}^3}\\ \frac{d^2\Gamma(S_1\rightarrow Q^{i}h\bar{\nu}^{j}_R)}{dm_{Qh}^2dm_{Q\nu}^2}&=\frac{1}{512\pi^3}N_f\frac{| {\tilde{C_1}}_{Q\nu}^{ij}| ^2}{M^2} \frac{(m_{Q\nu}^2)}{M_{S_1}^3} \end{align} Lastly from Eq.(\ref{ZandgammadecayS1}) we find the following decay rate of the $S_1(3,1,-\frac{1}{3})$ into $\gamma$ and $Z^0$: \begin{equation} \frac{d^2\Gamma(S_1\rightarrow d_R^{i}\gamma\nu_R^{j})}{dm^2_{\gamma d}dm^2_{B\nu}}=\frac{\alpha_1}{32\pi^2}\frac{N_f}{2\cos\theta_w}\frac{| {\tilde{C_1}}_{d\nu}^{ij}| ^2 |Y|^2}{M^2}\frac{(m^2_{d\nu})}{M^3_{S_1}}\frac{(m_{dZ}^2)^2+(m_{\nu Z})^2}{(M^2_{S_1}-m^2_{d\nu})^2} \label{3bodys11} \end{equation} \begin{equation} \frac{d^2\Gamma(S_1\rightarrow d_R^{i}\nu_R^{j}Z^{0})}{dm^2_{Zd}dm^2_{Z\nu}}=\frac{\alpha_1}{32\pi^2}\frac{N_f}{2\sin\theta_w}\frac{| {\tilde{C_1}}_{d\nu}^{ij}| ^2 |Y|^2}{M^2}\frac{(m^2_{d\nu})}{M^3_{S_1}}\frac{(m_{dZ}^2)^2+(m_{\nu Z})^2}{(M^2_{S_1}-m^2_{d\nu})^2} \label{3bodys12} \end{equation} In computing the squared matrix element we have summed over the two perp polarization vectors of the gauge bosons such that: \begin{equation} \sum^{2}_{i=1}\epsilon^{\mu}_{\perp}(p_3)\epsilon_{\perp}^{\star\nu}(p_3)=-g^{\mu\nu}_{\perp}, \end{equation} where \begin{equation} g^{\mu\nu}_{\perp}=g^{\mu\nu}-\frac{n_3^{\mu}\bar{n}_3^{\nu}}{2}-\frac{\bar{n}_3^{\mu}n_3^{\nu}}{2} \end{equation} The gauge coupling and the $U(1)$ generator $Y$ dependence in the decay rates in Eq.(\ref{3bodys11}) and Eq.(\ref{3bodys12}) follows from the Feynman rule for the physical $B_{\perp}^{\mu}$ derived from the corresponding SCET field $\mathcal{B}_{\perp}^{\mu}$ that is: $g^{\prime}Y\epsilon^{\star}_{\perp}(p)$. \color{black} \end{comment} \section{\boldmath{SCET formalism for the scalar leptoquark $S_3(3,3,-\frac{1}{3})$}} \label{sections3} There are several possible extensions to the SM that tempt to interpret the observed anomalies in $B$-physics systems. Most of these theoretical models that use scalar leptoquarks as a viable explanation contain both the singlet $S_1$ and another scalar leptoquark $S_3$ that transforms as a triplet under $SU(2)_L$ with hypercharge $-1/3$ \cite{Buttazzo:2017ixm,Crivellin:2017zlb}. Such models seem to give promising solution both to the $R_{(D^{\ast})}$ and to the neutral current process $b\rightarrow s\mu^{+}\mu^{-}$. It is therefore of interest to apply our framework to the triplet $S_3$ and find its tree level decay rates. We present in the following section the $S_3$ effective Lagrangian in SCET at $\mathcal{O}(\lambda^2)$ and $\mathcal{O}(\lambda^3)$ for two and three body final states. \subsection[Leading power two jet operators for the $S_3$]{\boldmath Leading power two jet operators for the $S_3$} \label{s3sec} We start by constructing the leading order Lagrangian which we refer to as $\mathcal{L}_{S_3}^{(\lambda^2)}$. Since $S_3$ is an $SU(2)$ triplet it should be understood as $S_3\equiv t^{a}S_3^{a}$, for $a=1,2,3$ and $t^{a}$ generators of the $SU(2)$. As a result gauge invariance constrains the operator basis a lot more in this case. Indeed we find only one operator that describes the decays of the $S_3$ into two energetic SM particles going into the collinear directions $n_1$ and $n_2$. It is a dimension four operator built out of a quark and a lepton doublet and the $S_3$ where the SM doublets couple to the $S_3$ triplet similarly to an $SU(2)$ gauge field: \begin{equation} \mathcal{L}_{S_3}^{(\lambda^2)}\,=\,C^{ij}_{S_3^{\ast}Q_L^cL_L}(\Lambda,M_{S_3},\mu)\,\mathcal{O}_{S_3^{\ast}Q_L^cL_L}^{ij}(\mu)\,+\,\text{h.c}., \label{Ltripletleading} \end{equation} with \begin{equation} \mathcal{O}_{S_3^{\ast}Q_L^cL_L}^{ij}=\,{\bar{Q^i}}^{c,a}_{L,n_1} \, \epsilon^{ab} \,S_{3v}^{\ast bd}\,{L^{j,d}}_{L,n_2}\,+\,(n_1\leftrightarrow n_2)\,. \label{LOopS3} \end{equation} where ${i,j}$ are flavor indices and the ${a,b,c}$ are $SU(2)$ indices. The heavy particle $S_3$ is treated within the HSEFT as described before where $S_{3v}(x)$ contains only the soft momentum fluctuations. The Wilson coefficients are defined in the same way as in equation (\ref{Wilsoncoeff}) and they are dimensionless. We notice that similarly to the $\mathcal{L}_{S_1}^{(\lambda^2)}$ the Lagrangian in (\ref{Ltripletleading}) violates fermion number conservation. The leading order two body decay rates of the leptoquark $S_3$ are governed by the matrix elements of the Lagrangian in (\ref{Ltripletleading}), which allows for a decay into a left handed quark and a left handed lepton. The total two body decay rate at $\mathcal{O}(\lambda^2)$ for $S_3$ evaluates to: \begin{equation} \begin{split} &\Gamma(S_3\rightarrow Q^{c,i}_{L}\bar{ L}_{L}^{j})\,=\,\frac{M_{S_3}}{32\pi}\,|\mathrm{C}_{S_3^{\ast}Q_L^{c}L_L}^{ij}|^2 \,.\\ \end{split} \end{equation} \subsection[Subleading power two jet operators for the $S_3$]{\boldmath Subleading power two jet operators for the $S_3$} At $\mathcal{O}(\lambda^3)$ the symmetries allow for a larger number of operators both for two and three collinear directions. For two jet final states we find six operators of mixed chirality and two operators of the same chirality fields such that: \begin{equation} \begin{aligned} \mathcal{L}_{S_3}^{(\lambda^3)}\bigg\rvert_{\text{2 jet}}&\,=\,\frac{1}{\Lambda}\,\Biggl[\,C_{S_3Q_L\Phi\nu_R}^{(0)^{ij}} (\Lambda,M_{S_3},\mu)\,\mathcal{O}_{S_3Q_L\Phi\nu_R}^{(0)^{ij}}\,\\ &\,\hspace{1.7cm}+\,C_{S_3d_R\Phi L_L}^{(0)^{ij}}(\Lambda,M_{S_3},\mu)\,\mathcal{O}_{S_3d_R\Phi L_L}^{(0)^{ij}}(\mu)\,\Biggr]\,\\ &+\,\frac{1}{M}\,\Biggl[\,\sum_{k=1,2}\int_{0}^{1}\,du\,\Biggl( \,C_{S_3Q_L\Phi\nu_R}^{(k)^{ij}}(\Lambda,M_{S_3},\mu,u)\, \mathcal{O}_{S_3Q_L\Phi\nu_R}^{(k)^{ij}}(\mu,u) \,\\ &\hspace{1.9cm}+\,C_{S_3d_R\Phi L_L}^{(k)^{ij}}(\Lambda,M_{S_3},\mu,u)\, \mathcal{O}_{S_3d_R\Phi L_L}^{(k)^{ij}}(\mu,u)\,\\ &\hspace{1.9cm}+\,C_{S_3d_R W\nu_R}^{(k)^{ij}} (\Lambda,M_{S_3},\mu,u)\,\mathcal{O}_{S_3d_R W\nu_R}^{(k)^{ij}}(\mu,u)\, \Biggr)\,+\,\text{h.c}.\,\Biggl]\,. \end{aligned} \label{S32jetsub} \end{equation} We name the operators and the Wilson coefficients based on their field content, where $W$ here is the $SU(2)$ gauge boson. The operators in the first line in (\ref{S32jetsub}) contain the zero momentum field $\Phi^{(0)}(x)$ and $\tilde{\Phi}^{(0)}(x)$ with the following explicit structure: \begin{equation} \begin{aligned} &{\mathcal{O}_3^{(0)}}^{ij}_{Q_L\Phi\nu_R} \,=\,\bar{Q}^{i}_{L,n_1}\,S_{3v} \,\Phi^{(0)}\,\nu^{j}_{R,n_2}\, +\,(n_1\leftrightarrow n_2)\,,\\ &{\mathcal{O}_3^{(0)}}_{d_R\Phi L_L}^{ij}\,=\,\bar{d}^{i}_{R,n_1}\,\tilde{\Phi}^{(0)}\,S_{3v}\,L^{j}_{L,n_2} \,+\,(n_1\leftrightarrow n_2)\,, \label{2bodyS3sub2} \end{aligned} \end{equation} which are mono-jet detector signatures. The rest of the mixed chirality operators read as follows: \begin{equation} \begin{aligned} \mathcal{O}_{S_3Q_L\Phi\nu_R}^{(1)^{ij}} (u)\,&=\,\bar{Q}^{i}_{L,n_1}\,S_{3v}\, \Phi_{n_1}^{(u)}\,\nu^{j}_{R,n_2}\,+\,(n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_3Q_L\Phi\nu_R}^{(2)^{ij}} (u)\,&=\,\bar{Q}^{i}_{L,n_1}\,S_{3v}\, \Phi_{n_2}^{(u)}\,\nu^{j}_{R,n_2}\,+\,(n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_3d_R\Phi L_L}^{(1)^{ij}}(u)\,&=\,\bar{d}^{i}_{R,n_1}\,\tilde{\Phi}^{(u)}_{n_1}\,S_{3v}\,L^{j}_{L,n_2}\, +\,(n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_3d_R\Phi L_L}^{(2)^{ij}}(u)\,&=\,\bar{d}^{i}_{R,n_1}\,\tilde{\Phi}^{(u)}_{n_2}\,S_{3v}\,L^{j}_{L,n_2}\, +\,(n_1\leftrightarrow n_2)\,, \end{aligned} \end{equation} The triplet $S_3$ very similarly to $S_1$ will decay into right handed neutrinos, left handed quarks and SM Higgs and into left handed leptons together with right handed down-type quarks. Lastly the same chirality operators in equation (\ref{S32jetsub}) contain the perp component of a $SU(2)_L$ gauge boson, a down type quark and the right handed neutrino: \begin{equation} \begin{aligned} \mathcal{O}_{S_3d_R W \nu_R}^{(1)^{ij}}(u)\,&=\,\bar{d}^{i}_{R,n_1}\,S_{3v}\,\slashed{\mathcal{W}}^{\perp,(u)}_{n_1}\,\nu^{j}_{R,n_2}\, +\,(n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_3d_R W \nu_R}^{(2)^{ij}}(u)\,&=\,\bar{d}^{i}_{R,n_1}\,S_{3v}\,\slashed{\mathcal{W}}^{\perp,(u)}_{n_2}\,\nu^{j}_{R,n_2}\, +\,(n_1\leftrightarrow n_2)\,, \end{aligned} \end{equation} where the perp component of the gauge invariant building block $\mathcal{W}^{\mu}$ is defined: \begin{equation} \mathcal{W}_{n}^{\perp,\mu}\,=\,\mathcal{W}_{n}^{\mu}\,-n\cdot\mathcal{W}\,\frac{\bar{n}^\mu}{2}\,. \label{Wvecperp} \end{equation} Subleading operators with the zero momentum field $\Phi^{(0)}$ in (\ref{2bodyS3sub2}) will give a contribution to the two body decay rates such that: \begin{equation} \begin{aligned} \Gamma(S_3\rightarrow \bar{Q}_{L}^{i}{\nu}^{j}_R )\,&=\,\frac{v^2}{2}\,\frac{M_{S_3}}{16\pi}\,\frac{ |\mathrm{C}_{S_3Q_L\Phi\nu_R}^{(0)^{ij}}|^2 }{\Lambda^2}\,,\\ \Gamma(S_3\rightarrow \bar{d}^{i}_RL_{L}^{j} )\,&=\,\frac{v^2}{2}\frac{M_{S_3}}{16\pi}\,\frac{ |\mathrm{C}_{S_3d_R\Phi L_L}^{(0)^{ij}}|^2 }{\Lambda^2}\,. \end{aligned} \end{equation} \subsection[Leading power three body decays for $S_3$]{\boldmath Leading power three body decays for $S_3$} We can also explore the leading power three jet final states for this leptoquark where the Lagrangian in this case is: \begin{equation} \begin{split} \mathcal{L}_{S_3}^{(\lambda^3)}\bigg\rvert_{\text{3 jet}}\,=\,\frac{1}{\Lambda}\,\Biggl [&\,C_{S_3d_R\Phi L_L}^{ij}(\Lambda,M_{S_3},\lbrace m^2_{k\ell}\rbrace,\mu)\,\mathcal{O}_{S_3d_R\Phi L_L}^{ij} (\mu) \,\\ &\,\,+\,C_{S_3Q_L\Phi\nu_R}^{ij}(\Lambda,M_{S_3},\lbrace m^2_{k\ell}\rbrace,\mu)\,\mathcal{O}_{S_3Q_L\Phi\nu_R}^{ij}(\mu)\, \\ & \,\,+\,C_{S_3d_RW \nu_R}^{ij}(\Lambda,M_{S_1},\lbrace m^2_{k\ell}\rbrace,\mu)\,\mathcal{O}_{S_3d_R W\nu_R}^{ij} (\mu)\,+\,\text{h.c.} \,\Biggr]\,. \end{split} \label{3jetS3} \end{equation} where the operators read: \begin{equation} \begin{aligned} \mathcal{O}_{S_3d_R\Phi L_L}^{ij}&\,=\,\bar{d}^{i}_{R,n_1}\,\tilde{\Phi}_{n_3}\,S_{3v}\,L^{j}_{L,n_2}\,\, +\,\,(n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_3Q_L\Phi \nu_R}^{ij}&\,=\,\bar{Q}^{i}_{L,n_1}\, \Phi_{n_3}\,S_{3v}\,\nu^{j}_{R,n_2}\, +\,(n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_3d_R W \nu_R}^{ij}&\,=\,\bar{d}^{i}_{R,n_1}\,\slashed{\mathcal{W}}^{\perp}_{n_3}\,S_{3v}\,\nu^{j}_{R,n_2}\,+\,(n_1\leftrightarrow n_2)\,. \label{3jetS3} \end{aligned} \end{equation} The field content of the above operators is the same as for the two jet decays, though experimentally they would have very different angular distributions. Three body decay rates for left-right chirality operators here are: \begin{equation} \begin{aligned} \frac{d^2\,\Gamma(\,S_3\rightarrow \bar{d}^{i}_R\,L^{j}_{L}\,h\,)}{dm_{hd}^2\,dm_{Ld}^2}&\,=\,\frac{1}{512\pi^3}\,\frac{| \mathrm{C}_{S_3L_L\Phi d_R}^{ij}| ^2}{\Lambda^2} \,\frac{m_{Ld}^2}{M_{S_3}^3}\,,\\ \frac{d^2\,\Gamma(\,S_3\rightarrow \bar{Q}^{i}_{L}\,h\,{\nu}^{j}_R\,)}{dm_{Qh}^2\,dm_{Q\nu}^2}&\,=\,\frac{1}{512\pi^3}\,\frac{| \mathrm{C}_{S_3Q_L\Phi \nu_R}^{ij}| ^2}{\Lambda^2} \,\frac{m_{Q\nu}^2}{M_{S_3}^3}\,, \end{aligned} \end{equation} where $m^2_{ij}$ is the invariant mass of the particle pair $(i,j)$ and for simplicity in notation we keep the field chirality labels implicit in the $m^2_{ij}$. The same chirality operator with the right handed neutrino in the final state yield a less trivial result: \begin{equation} \frac{d^2\Gamma(S_3\rightarrow \bar{d}_R^{i}\nu_R^{j}W)}{dm^2_{Wd}\,dm^2_{W\nu}}\,=\,\frac{\alpha_2}{32\pi^2}\,\frac{| \mathrm{C}_{S_3d\nu W}^{ij}| ^2\, |t^a|^2}{\Lambda^2}\,\frac{m^2_{d\nu}}{M^3_{S_1}}\,\frac{(m_{dW}^2)^2+(m^2_{\nu W})^2}{(M^2_{S_1}-m^2_{d\nu})^2}\,, \label{3bodys12} \end{equation} where $\mid t^a\mid^2$ is a color factor coming from the definition of the collinear field $\mathcal{W}$ in SCET defined in (\ref{scetgaugeboson}). \begin{comment} \subsection{Decay rates for the $S_3(3,3-\frac{1}{3})$} The leading order two body decay rate of the triplet LQ is governed by the matrix elements of the Lagrangian in (\ref{Ltripletleading}), which allows for a decay of the scalar $S_3(3,3,-\frac{1}{3})$ into a left handed chiral quark and a left handed chiral lepton. The final states are set to the mass basis and accordingly the Wilson coefficients as defined in Eq.(\ref{Wilsoncoef}). Then we have the two body decay rate from the LO operator in Eq.(\ref{LOopS3}): \begin{equation} \begin{split} &\Gamma(S_3\rightarrow Q^c\bar{ L})=N_f\frac{M_{S_3}}{32\pi}|{\tilde{C_{3}}_L}^{ij}|^2 \\ \end{split} \end{equation} and the two body decay rate from the $\mathcal{O}(\lambda^3)$ operator in Eq.(\ref{2bodyS3sub1}) and Eq.(\ref{2bodyS3sub2}): \begin{equation} \Gamma(S_3\rightarrow \bar{Q}^{i}{\nu}^{j}_R )=N_f\frac{v^2}{2}\frac{M_{S_3}}{16\pi}\frac{ |{\tilde{C_3}^{(0)ij}_{Q\nu}}|^2 }{M^2} \end{equation} \begin{equation} \Gamma(S_3\rightarrow \bar{d}^{i}_RL^{j} )=N_f\frac{v^2}{2}\frac{M_{S_3}}{16\pi}\frac{ |{\tilde{C_3}^{(0)ij}_{dL}}|^2 }{M^2} \end{equation} Then the rates for a three body decay of the mixed chirality operators from the Lagrangian in Eq.(\ref{3jetS3}) are: \begin{align} \frac{d^2\Gamma(S_1\rightarrow \bar{d}^{i}_RL^{j}h)}{dm_{hd}^2dm_{Ld}^2}&=\frac{1}{512\pi^3}N_f\frac{| {\tilde{C_1}}_{Ld}^{ij}| ^2}{M^2} \frac{(m_{Ld}^2)}{M_{S_3}^3}\\ \frac{d^2\Gamma(S_1\rightarrow \bar{Q}^{i}h{\nu}^{j}_R)}{dm_{Qh}^2dm_{Q\nu}^2}&=\frac{1}{512\pi^3}N_f\frac{| {\tilde{C_1}}_{Q\nu}^{ij}| ^2}{M^2} \frac{(m_{Q\nu}^2)}{M_{S_3}^3} \end{align} The decay rate for the same chirality operators give a less trivial result which reads: \begin{equation} \frac{d^2\Gamma(S_3\rightarrow d_R^{i}\nu_R^{j}W)}{dm^2_{Wd}dm^2_{W\nu}}=\frac{\alpha_1}{32\pi^2}N_f\frac{| {\tilde{C_1}}_{d\nu}^{ij}| ^2 |Y|^2}{M^2}\frac{(m^2_{d\nu})}{M^3_{S_1}}\frac{(m_{dW}^2)^2+(m_{\nu W})^2}{(M^2_{S_1}-m^2_{d\nu})^2} \label{3bodys12} \end{equation} \end{comment} \section{\boldmath{SCET formalism for the vector leptoquark $U_1^{\mu}(3,1,\frac{2}{3})$}} \label{sectionU1} The vector $U_1^{\mu}$ is another interesting example from the family of leptoquarks that has been introduced as a solution to the departures from SM in the flavour sector \cite{Barbieri:2015yvd,Buttazzo:2017ixm}. It is a color triplet, $SU(2)$ singlet and has hypercharge $2/3$. In the following section we analyse its decays in leading and subleading order in power counting. All interactions of the field $U_1^{\mu}$ here are described in the soft limit by the HVEFT shown in Section \ref{HSEFT}. \subsection[Leading power two jet operators for the $U_1^{\mu}$] {\boldmath Leading power two jet operators for the $U_1^{\mu}$} At leading order in SCET the Lagrangian for the leptoquark $U_1^{\mu}$ is: \begin{equation} \begin{aligned} \mathcal{L}_{U_1}^{(\lambda^2)}\,&=\,C^{ij}_{U_1Q_LL_L}(\Lambda,M_{U_1},\mu)\,\mathcal{O}^{ij}_{U_1Q_LL_L}(\mu)\,+\,C^{ij}_{U_1d_R\ell_R}(\Lambda,M_{U_1},\mu)\,\mathcal{O}^{ij}_{U_1d_R\ell_R}(\mu)\,\\ &+\,C^{ij}_{U_1u_R\nu_R}(\Lambda,M_{U_1},\mu)\,\mathcal{O}^{ij}_{U_1u_R\nu_R}(\mu)\,+\, \text{h.c.} \end{aligned} \label{LagrangianU1} \end{equation} with the following dimension four operators: \begin{equation} \begin{aligned} \mathcal{O}_{U_1Q_LL_L}^{ij}&\,=\,\bar{Q}^{i}_{L,n_1}\,\slashed{U}_{1v\perp}\,L^{j}_{L,n_2}+\,(n_1\leftrightarrow n_2)\,, \\ \mathcal{O}^{ij}_{U_1d_R\ell_R}&\,=\,\bar{d}^{i}_{R,n_1}\,\slashed{U}_{1v\perp}\,\ell^{j}_{R,n_2}\,+\, (n_1 \leftrightarrow n_2)\,,\\ \mathcal{O}^{ij}_{U_1u_R\nu_R}&\,=\,\bar{u}^{i}_{R,n_1}\,\slashed{U}_{1v\perp}\,\nu^{j}_{R,n_2} +\,(n_1\leftrightarrow n_2)\,. \end{aligned} \label{leadingU1operators} \end{equation} where $\slashed{U}_{1v\perp}=\gamma_{\perp}^{\mu}\cdot {U}_{1v\perp\mu}$ with $\gamma_{\perp}^{\mu}$ defined in (\ref{gamma}). Then at leading order the vector leptoquark will decay into two fermions of the same chirality with the following unpolarized decay rates: \begin{equation} \begin{aligned} \Gamma(U_1\rightarrow \bar{Q}^{i}_L L^{j}_L)&=\,\frac{M_{U_1}}{24\pi}\mid \mathrm{C}_{U_1Q_LL_L}^{ij}\mid ^2\,, \\ \Gamma(U_1\rightarrow \bar{d}^{i}_R \ell^{j}_R)&=\,\frac{M_{U_1}}{24\pi}\mid \mathrm{C}^{ij}_{U_1d_R\ell_R}\mid ^2\,, \\ \Gamma(U_1\rightarrow\bar{u}_R^{i} \nu^{j}_R)&=\,\frac{M_{U_1}}{24\pi}\mid \mathrm{C}^{ij}_{U_1u_R\nu_R}\mid ^2 \,.\\ \end{aligned} \end{equation} where the Wilson coefficients above are in the mass basis. \subsection[Subleading power two jet operators for the $U_1^{\mu}$]{\boldmath Subleading power two jet operators for the $U_1^{\mu}$} At $\mathcal{O}(\lambda^3)$ we find there are two operator constructions allowed by gauge invariance and space-time symmetries.They differ by the SM scalar doublet $\Phi$ and the zero momentum field $\Phi^{(0)}$ defined in (\ref{phizero}). It is also useful to define the reparametrization invariant quantity $\Pi^{\mu}$ such that: \begin{equation} \Pi^{\mu}=\frac{(v\cdot \bar{n})n^{\mu}-(v\cdot n)\bar{n}^{\mu}}{2}\,, \end{equation} where $\Pi^{\mu}\rightarrow -\Pi^{\mu}$ under hermitian conjugate and it is odd for $n\leftrightarrow \bar{n}$ \cite{Heiles:2020plj}. Then the Lagrangian describing the decays of the vector leptoquark at subleading power is: \begin{equation} \begin{split} \mathcal{L}_{U_1}^{(\lambda^3)}\bigg\rvert_{\text{2 jet}}&=\frac{1}{\Lambda}\, C_{U_1Q_L\ell_R}^{(0)\,ij}\,(\Lambda,M_{U_1},\mu)\,{\mathcal{O}^{(0)\,ij}_{U_1Q_L\ell_R}}\,(\mu)\,\\ &\,+\frac{1}{\Lambda} \sum_{k=1,2}\int_{0}^{1}du \,{C_{U_1Q_L\ell_R}^{(k)\,ij}}\,(\Lambda,M_{U_1},\mu,u) \,{\mathcal{O}_{U_1Q_L\ell_R}^{(k)\,ij}}\,(\mu,u)\,+\, \text{h.c.}\,, \end{split} \label{LagrangianU1sub} \end{equation} with: \begin{equation} \begin{aligned} {\mathcal{O}^{(0)ij}_{U_1Q_L\ell_R}}(\mu)&=\bar{Q}^{i}_{L,n_1}\Phi^{(0)}\,\Pi\cdot U_{1v}\,\ell^{j}_{R,n_2} \,-(n_1\leftrightarrow n_2)\,,\\ {\mathcal{O}_{U_1Q_L\ell_R}^{(1)ij}}(u)&=\bar{Q}^{i}_{L,n_1}\Phi^{(u)}_{n_1}\,\Pi\cdot U_{1v}\,\ell^{j}_{R,n_2} \,-(n_1\leftrightarrow n_2)\,,\\ {\mathcal{O}_{U_1Q_L\ell_R}^{(2)ij}}(u)&=\bar{Q}^{i}_{L,n_1}\Phi^{(u)}_{n_2}\,\Pi\cdot U_{1v}\,\ell^{j}_{R,n_2} \,-(n_1\leftrightarrow n_2)\,.\\ \end{aligned} \end{equation} We divide by the large scale $\Lambda$ so that the Wilson coefficients remain dimensionless. The sum in the second line in (\ref{LagrangianU1sub}) accounts for both cases when the field $\Phi^{(u)}$ with momentum fraction $u\mathcal{P}_i$ is emitted in the $n_1$ or $n_2$ direction. The vector $v$ is the reference vector $v^{\mu}=(1,0,0,0)$ and $v\cdot U_{1v}=0$. It is then straightforward to calculate the remaining power suppressed two body decay rate: \begin{equation} \Gamma(U_1\rightarrow \bar{Q}^{i}_L\ell^{j}_R)=\frac{v^2}{12\pi}\frac{M_{U_1}}{\Lambda^2}\mid {\mathrm{C}^{(0)ij}_{U_1Q_L\ell_R}} \mid ^2 \end{equation} \subsection[Leading power three body decays for $U_1^{\mu}$]{\boldmath Leading power three body decays for $U_1^{\mu}$} It is not difficult to find the operator basis for $U_{1}^{\mu}$ for three jet final states. The symmetries in this case allow for only one operator and the Lagrangian reads: \begin{equation} \begin{split} \mathcal{L}_{U_1}^{(\lambda^3)}\bigg\rvert_{\text{3 jet}}&=\,\frac{1}{\Lambda}\, C^{ij}_{U_1Q_L\ell_R}(\Lambda,M_{U_1},\lbrace m^2_{k\ell}\rbrace,\mu)\,\mathcal{O}^{ij}_{U_1Q_L\ell_R}(\mu)+ \text{h.c.}\,, \end{split} \label{3jetU1} \end{equation} where $m^2_{k,\ell}$ is the invariant mass of the particle pair $(k,\ell)$ and the operator is: \begin{equation} \mathcal{O}_{U_1Q_L\ell_R}^{ij}=\bar{Q}^{i}_{L,n_1}\,\Phi_{n_3}\,\Pi\cdot U_{1v}^{\mu}\,\ell^{j}_{R,n_2} -(n_1\leftrightarrow n_2)\,. \label{U13bodyL} \end{equation} In comparison to the scalars $S_{1}$ and $S_{3}$ there are no right handed neutrino interactions at subleading order for $U_1^{\mu}$. The differential decay rate for the three-body decays of $U_1^{\mu}$ can be derived from (\ref{3jetU1}): \begin{equation} \frac{d^2\Gamma}{dm^2_{Q\ell}\,dm^2_{h\ell}}=\frac{1}{1536\pi^3}\frac{\mid \mathrm{C}^{ij}_{U_1Q_L\ell_R} \mid ^2}{M_{U_1}\,\Lambda^2}\frac{(m^2_{Q\ell})^2}{(M^2_{U_1}-m^2_{h\ell})(M^2_{U_1}-m^2_{Qh})} \end{equation} In deriving the above expression we have averaged over the polarizations of the massive vector and use the following relation for massless particles collinear in $n_1$ and $n_2$ directions: \begin{equation} n_1\cdot n_2=\frac{k_1\cdot k_2}{E_1E_2}\,, \end{equation} where the energies of the two fermions in the rest frame of the leptoquark are respectively $E_1=\frac{M^2_{U_1}-m^2_{h\ell}}{2M_{U_1}}$ and $E_2=\frac{M^2_{U_1}-m^2_{Qh}}{2M_{U_1}}$. \section{Running of the Wilson coefficients } \label{secwilsoncoeff} In the process of scale separation in an EFT, Wilson coefficients are the functions that capture the hard scale dependence. They correspond to loop diagrams with vertices from the full theory that have been integrated out. Inherently they depend on logarithms of ratios of the hard scale $\Lambda$ and the factorization scale $\mu$. Thus a reliable result on the decay rates of heavy particles considered within an EFT framework requires resummation of these large logarithms in the Wilson coefficients. \par In SCET it is possible to achieve this using renormalization group techniques. Wilson coefficients obey well defined renormalization group (RG) equations which are derived from renormalization of their corresponding operators. At one loop the type of diagrams that contribute to the renormalization of the $\mathcal{O}(\lambda^2)$ operators are shown in Fig.(\ref{vertexdiag}). In the most general case the RG equation is a matrix equation such that: \begin{equation} \begin{aligned} \frac{d\,\boldsymbol{C}(\mu)}{d\ln\mu}&=\boldsymbol{\Gamma}(\mu)\otimes\,\boldsymbol{C}(\mu)\,,\\ \end{aligned} \label{RGEs} \end{equation} where $\boldsymbol{\Gamma}$ is the anomalous dimension matrix in generation space and $\boldsymbol{C}$ is the matrix of Wilson coefficients in generation space. The symbol $\otimes$ takes into account that the ordering of $\boldsymbol{\Gamma}$ and $\boldsymbol{C}$ matters. The solution of (\ref{RGEs}) can then be formally written as: \begin{equation} \boldsymbol{C}(\mu)\,=\,\boldsymbol{C}(\Lambda)\exp\Bigg[\int^{\mu}_{\Lambda}{d\ln\mu}\,\boldsymbol{\Gamma}(\mu) \Bigg] \label{CRGE} \end{equation} and it systematically resums the large logarithms of type $\ln(\Lambda^2/\mu^2)$. \par The anomalous dimension $\boldsymbol{\Gamma}$ of a SCET operator with three external lines, one heavy particle and two collinear particles in $n_1$ and $n_2$ directions depends on the cusps anomalous dimensions $\gamma^{(r)}_{\text{cusp}}(\alpha_r)$, on the single-particle anomalous dimensions $\boldsymbol{\gamma}^{i}$ and on the leptoquark anomalous dimension $\gamma^{LQ}$ such that \cite{Becher:2009kw,Alte:2018nbn,Becher:2009cu}: \begin{equation} \begin{split} \boldsymbol{\Gamma}\left( \lbrace p,p_1,p_2 \rbrace ,M,\mu\right) &=\frac{1}{2}\sum_{r}\left(C^{(r)}-C_{1}^{(r)}-C_{2}^{(r)}\right)\,\gamma^{(r)}_{\text{cusp}}(\alpha_r)\, \left(\ln\frac{\mu^2}{M^2}+i\pi\right)\,\\ & - \sum_{r}C^{(r)}\,\gamma^{(r)}_{\text{cusp}}(\alpha_r)\,\ln\frac{ \mu}{M}+ \sum_{i=1,2}\boldsymbol{\gamma}^{i}+\gamma^{LQ} \label{anomalousdim} \end{split} \end{equation} where $M$ is the mass of the leptoquark and $C^{(r)}\,,C^{(r)}_{1}$, $C_{2}^{(r)}$ are the Casimir operators of the leptoquark, the $n_1$ and $n_2$ collinear particles respectively for the gauge group $(r)$ where these particles transform. For a non-Abelian group $SU(N)$ the Casimir operator is $C_i=(N^2-1)/2N$ for the fundamental representation and $C_i=N$ for the adjoint representation. For the Abelian group $U(1)_{Y}$ we have $C_i=Y^2_i$, where $Y_i$ is the hypercharge of the particle. The $\gamma^{(r)}_{\text{cusp}}(\alpha_r)$ are functions of the coupling constant arising from light like Wilson loops \cite{Korchemskaya:1992je,Korchemskaya:1994qp}. Up to NNLO they depend only on $\alpha_r$ for each symmetry group $G^{(r)}$ \cite{Moch:2004pa, Korchemsky:1987wg,Jantzen:2005az}. For SM particles, $\boldsymbol{\gamma^{i}}$ are matrices in generation space that contain SM Yukawa matrices. They multiply the corresponding Wilson coefficient either from the left or from the right as described below. Here we present the resummation of the Wilson coefficients in the mass basis for the leading power two jet operators for the leptoquarks $S_1$, $S_3$ and $U_{1}^{\mu}$. We show in the appendix (\ref{anomalousdim3jet}) the anomalous dimensions for the three jet operators at $\mathcal{O}(\lambda^3)$ and a very similar procedure can be straightforwardly extended for the Wilson coefficients of these operators. We work at leading order in RG improved perturbation theory (PT), which is equivalent to resumming the large logarithms at next-to-leading logarithmic (NLL) order. This requires the two loop expressions for $\gamma^{(r)}_{\text{cusp}}(\alpha_r)$, one loop expression for $\boldsymbol{\gamma}^{i}$ and one loop $\gamma^{LQ}$. This estimate would give a prediction of the running effects to the tree level matching coefficients for various decay rates. \begin{figure} \center \includegraphics[scale=0.5]{vertexdiag} \caption{Soft and collinear gluon emissions for the one-loop renormalization of operators at $\mathcal{O}(\lambda^2)$. The double line indicates a heavy leptoquark. The first diagram corresponds to soft gauge boson emissions, the second diagram describes final state interactions, the last diagram accounts for the type of diagrams where gauge bosons are emitted from the collinear Wilson lines. In the first two diagrams gauge bosons have soft momentum scaling, in the third diagram they have collinear scaling.} \label{vertexdiag} \end{figure} In the appendix (\ref{cusps}) we collect the explicit expressions for the cusp anomalous dimensions and the beta functions at two loop. \subsection[Resummation effects on the singlet $S_1$]{\boldmath Resummation effects on the singlet $S_1$} \label{resumationS1} From the formula (\ref{anomalousdim}) we derive the evolution of the Wilson coefficients of the $\mathcal{O}(\lambda^2)$ operators for $S_1$ shown in (\ref{S1operators}}). They are governed by the following anomalous dimensions in generation space: \begin{equation} \begin{split} {\boldsymbol{\Gamma}_{S_1^{\ast}u_R^{c}\ell_R}} &=\left(-\frac{4}{3} \gamma^{(3)}_{\text{cusp}}- \frac{1}{9}\gamma^{(1)}_{\text{cusp}}\right) \ln\frac{\mu}{M_{S_1}} - \frac{2}{3} \gamma^{(1)}_{\text{cusp}}\left(\ln\frac{\mu^2}{M^2_{S_1}}+i\pi\right) +\gamma^ {S_1}\,\\ &+\left( \boldsymbol{\gamma}^{\ell_R},\,.\,\right) + \left(.\,,\boldsymbol{\gamma}^{u_R}\right) \,,\\ {\boldsymbol{\Gamma}}_{S_1^{\ast}Q^c_LL_L} &=\left(-\frac{4}{3} \gamma^{(3)}_{\text{cusp}}-\frac{1}{9}\gamma^{(1)}_{\text{cusp}}\right) \ln\frac{\mu}{M_{S_1}}-\left( \frac{3}{4}\gamma^{(2)}_{\text{cusp}}+\frac{1}{12}\gamma^{(1)}_{\text{cusp}} \right)\left( \ln\frac{\mu^2}{M^2_{S_1}}+i\pi \right)\,\\ &+\gamma^ {S_1}+\left(\boldsymbol{\gamma}^{L_L},\,.\right) + \left(.\,,\boldsymbol{\gamma}^{Q_L}\right)\,,\\ {\boldsymbol{\Gamma}}_{S_1d_R\nu_R} &=\left(-\frac{4}{3} \gamma^{(3)}_{\text{cusp}}-\frac{1}{9}\gamma^{(1)}_{\text{cusp}}\right) \ln\frac{\mu}{M_{S_1}}+\gamma^ {S_1}+\left(.\,,\boldsymbol{\gamma}^{d_R}\right)\,, \end{split} \label{GammaS1LP} \end{equation} where we use the notations $\left( .\,,\boldsymbol{\gamma}\right)$ and $\left( \boldsymbol{\gamma}\,,.\right)$ for the single particle anomalous dimensions to indicate a multiplication with the Wilson coefficient from the left and from the right respectively, such that: \begin{equation} \begin{aligned} \left( .\,,\boldsymbol{\gamma}\right)\, \boldsymbol{C}\,&\equiv\, \boldsymbol{C}\, \boldsymbol{\gamma}\,\\ \left(\boldsymbol{\gamma}\,,.\right)\,\boldsymbol{C}\,&\equiv\,\boldsymbol{\gamma}\, \boldsymbol{C} \end{aligned} \end{equation} If $\boldsymbol{\gamma}$ is the anomalous dimension of an antiparticle the multiplication with the Wilson coefficient is from the left and for a particle it becomes a multiplication from the right. The various single field anomalous dimensions in (\ref{GammaS1LP}) are \cite{Alte:2018nbn}: \begin{equation} \begin{split} \boldsymbol{\gamma}^{\ell_R}&=-\frac{\alpha_1}{4\pi}+\frac{1}{16 \pi^2}{\textbf{Y}}_{\ell}^{\dagger}\textbf{Y}_{\ell} \, ,\\ \boldsymbol{\gamma}^{L_L}&=-\frac{9\alpha_2}{16\pi}-\frac{\alpha_1}{16\pi}+\frac{1}{32\pi^2}{\textbf{Y}}_{\ell}\textbf{Y}_{\ell}^{\dagger} \,\\ \boldsymbol{\gamma}^{u_R}&=-\frac{\alpha_3}{\pi}-\frac{\alpha_1}{9\pi}+\frac{1}{16\pi^2}\textbf{Y}_{u}^{\dagger}\textbf{Y}_{u}\,,\\ \boldsymbol{\gamma}^{d_R}&=-\frac{\alpha_3}{\pi}-\frac{\alpha_1}{36\pi}+\frac{1}{16\pi^2}\textbf{Y}_{u}^{\dagger}\textbf{Y}_{u}\,,\\ \boldsymbol{\gamma}^{Q_L}&=-\frac{\alpha_3}{\pi}-\frac{9\alpha_2}{16\pi}-\frac{\alpha_1}{144\pi}+\frac{1}{32\pi^2}\left( \textbf{Y}_u\textbf{Y}_u^{\dagger}+\textbf{Y}_{d}\textbf{Y}_{d}^{\dagger}\right)\,,\\ \gamma^{S_1}&=-\frac{2\,\alpha_3}{3\pi}-\frac{\alpha_1}{18\pi}\,, \end{split} \label{fieldanomdim} \end{equation} where $\textbf{Y}_{\ell}$ is the Yukawa matrix for the lepton $\ell$, $\textbf{Y}_{u}$ and $\textbf{Y}_{d}$ are the Yukawa matrices for up and down - type quarks. In practice we transform the Wilson coefficient in mass basis since this is the relevant basis for physical quantities such as decay rates. In the mass basis the Yukawa matrices in (\ref{fieldanomdim}) become diagonal expect for the case of $\boldsymbol{\gamma}^{Q_L}$. In this case one needs to distinguish between the up-type quark and the down-type quark in the doublet \cite{Heiles:2020plj}: \begin{equation} \begin{aligned} \boldsymbol{\gamma}^{u_L}&=-\frac{\alpha_3}{\pi}-\frac{9\alpha_2}{16\pi}-\frac{\alpha_1}{144\pi}+\frac{1}{32\pi^2}\Big[(\text{diag}\left( y^{2}_{u},y^{2}_{c},y^{2}_{t} \right)+\boldsymbol{V}\text{diag}\left( y^2_{d},y^2_{s},y_{b}^{s}\right)\boldsymbol{V}^{\dagger}\Big]\,,\\ \boldsymbol{\gamma}^{d_L}&=-\frac{\alpha_3}{\pi}-\frac{9\alpha_2}{16\pi}-\frac{\alpha_1}{144\pi}+\frac{1}{32\pi^2}\Big[\boldsymbol{V}^{\dagger}(\text{diag}\left( y^{2}_{u},y^{2}_{c},y^{2}_{t} \right)\boldsymbol{V}+\text{diag}\left( y^2_{d},y^2_{s},y_{b}^{s}\right)\Big]\,,\\ \end{aligned} \label{gammaleft} \end{equation} where $y_{q}$ is the Yukawa coupling of the quark $q$ and $\boldsymbol{V}=\boldsymbol{U}_{u}^{\dagger}\boldsymbol{U}_{d}$ is the CKM matrix. For the numerical estimates we present here we take into account only the top quark Yukawa coupling. All the other quark Yukawas, including in $\boldsymbol{\gamma}^{u_R}$ have tiny effects in the resummation. We also neglect the Yukawa coupling of the leptons in $\boldsymbol{\gamma}^{\ell_R}$ and in $\boldsymbol{\gamma}^{L_L}$. This means in practice for the running of the Wilson coefficients in the mass basis the only relevant term including Yukawa couplings in the expressions in (\ref{fieldanomdim}) is $\text{diag}\left( y^{2}_{u},y^{2}_{c},y^{2}_{t} \right)$ as in the first line in (\ref{gammaleft}) which becomes $\text{diag}\left( 0,0,y^{2}_{t} \right)$, in this approximation. The evolution of the Yukawa coupling of the top quark is given by \cite{Grzadkowski:1987tf}: \begin{equation} \frac{d}{d\ln\mu}\,y_{t}(\mu)\,=\,\frac{9\,y^{3}_{t}}{32 \pi^2}\,-\,y_{t}\left( \frac{17\,\alpha_1}{48 \pi}+\frac{9\,\alpha_2}{16\pi}+\frac{2\,\alpha_3}{\pi}\right)\,. \end{equation} In the case of leptoquarks the full gauge group running must be included for consistent numerical estimates at leading order RG improved PT. \begin{figure}[t] \begin{center} \includegraphics[scale=0.86]{graph1} \end{center} \caption{Resummation effects on Wilson coefficients of the $\mathcal{O}(\lambda^2)$ operators for $S_1$ as a function of $M_{S_1}$ with top quark final state jets in all cases. The running is performed from the leptoquark scale to the top mass. The solid lines show the whole contribution and the dashed lines show the resummation only for the double logarithms. } \label{graph1} \end{figure} \par We now present numerical results for the resummation of the Wilson coefficients from the leptoquark scale to a lower scale for the $\mathcal{O}(\lambda^2)$ operators for the $S_1$ shown in (\ref{S1operators}). For these operators the largest effects comes from $\bar{t}\ell$ final states. Therefore we fix the low scale to the top quark mass and we consider $M_{S_1}=3 $ TeV. We numerically integrate the evolution function $\mathcal{U}(M_{S_1},\mu)$ such that the Wilson coefficients at different scales are related by: \begin{equation} \mathrm{C}^{t\ell}(m_t)=C^{t\ell}(M_{S_1})\,\exp\, \Big[\mathcal{U}(M_{S_1},m_t)\Big]\,. \end{equation} where $\ell$ here stands for a lepton either left handed or right handed. We find the following results: \begin{equation} \begin{aligned} \mathrm{C}^{t\ell}_{S_1^{\ast}u_R^c\ell_R}(m_t)&\,\approx\,0.93 \,e^{0.02 i}\,\mathrm{C}^{t\ell}_{S_1^{\ast}u_R^c\ell_R}(M_{S_1})\\\, \mathrm{C}^{t\ell}_{S_1^{\ast}Q_L^cL_L}(m_t)&\,\approx\, 0.92\, e^{0.07 i}\,\mathrm{C}^{t\ell}_{S_1^{\ast}Q_L^cL_L}(M_{S_1}) \end{aligned} \end{equation} For the operator $\mathcal{O}^{ij}_{S_1d_R\nu_R}$ the running is practically independent on the specific final state lepton flavour. In this case we find: \begin{equation} \mathrm{C}^{ij}_{S_1d_R\nu_R}(m_t)\,\approx\, 0.96\, \mathrm{C}^{ij}_{S_1d_R\nu_R}(M_{S_1}) \end{equation} In Fig.({\ref{graph1}) we show how the resummation effects vary for different values of $M_{S_1}$. We compare between the full running effects and the contribution only from terms multiplied by $\gamma_{\text{cusp}}^{(r)}$ for the full gauge group. In the latter case the single logarithmic terms coming from single-particle anomalous dimensions are neglected. In practice this is often the case, where one resumms only the double logarithms which exponentiate. Though in Fig.(\ref{graph1}) it can be seen that this difference is at least of $\mathcal{O}(10\%)$. In fact this is a merit of the effective theory that RG methods allow for a consistent resummation of the large logarithms, both single and double logarithms. \subsection[Resummation effects on the triplet $S_3$] {\boldmath Resummation effects on the triplet $S_3$} \begin{figure} \begin{center} \includegraphics[scale=0.86]{graph2} \end{center} \caption{Variation of the resummation effects on the $C_{S_3Q^{c}_LL_L}$ with the mass of $S_3$, for left handed top quark and left handed lepton final states with initial scale around the leptoquark mass.The solid line shows the whole contribution and the dashed line represents only the resummation of the double logarithms.} \label{graph2} \end{figure} As presented in Section \ref{s3sec} at leading order the triplet leptoquark $S_3$ decays only into a left handed quark and left handed lepton. In this case the Wilson coefficient $\mathrm{C}^{ij}_{S_3Q^{c}_LL_L}$ obeys an RG equation with anomalous dimension: \begin{equation} \begin{split} {\boldsymbol{\Gamma}}_{S_3^{\ast}Q^{c}_LL_L}&=\left(- \frac{4}{3} \gamma^{(3)}_{\text{cusp}}-2\gamma_{\text{cusp}}^{(2)}-\frac{1}{9} \gamma_{\text{cusp}}^{(1)}\right) \ln\frac{\mu}{M_{S_3}}\,\\ &+\left( \frac{1}{4}\gamma_{\text{cusp}}^{(2)}-\frac{1}{12}\gamma_{\text{cusp}}^{(1)} \right)\left(\ln\frac{\mu^2}{M^2_{S_3}}+i\pi\right)+\gamma^ {S_3}+\left(\boldsymbol{\gamma}^{L},.\right)+ \left(.\,,\boldsymbol{\gamma}^{Q}\right) \end{split} \end{equation} where $\boldsymbol{\gamma}^{L_L}$, $\boldsymbol{\gamma}^{Q_L}$ are shown in (\ref{fieldanomdim}) and the field anomalous dimension $\gamma^{S_3}$ reads: \begin{equation} \gamma^{S_3}=-\frac{2\,\alpha_3}{3\pi}-\frac{\alpha_2}{\pi}-\frac{\alpha_1}{18\pi}\,. \end{equation} The QCD running for double logarithmic terms has not changed and it is indeed the same for the three leptoquarks since there are no QCD interactions between final sates at $\mathcal{O}(\lambda^2)$. For a $3$ TeV leptoquark the effects are tiny in this case. They become more seizable for $M_{S_3}\geq 4$ TeV. For instance for $M_{S_3}=4.5$ TeV we find: \begin{equation} \mathrm{C}^{t\ell}_{S_3Q^{c}_LL_L}(m_t)\,\approx\, 0.97\, e^{-0.02 i}\, \mathrm{C}^{t\ell}_{S_3Q^{c}_LL_L}(M_{S_3}) \end{equation} In Fig.(\ref{graph2}) we show how these effects change significantly when single logarithmic terms are not included. For all the mass range the difference accounts for a change of $\mathcal{O}(20\%)$ in the Wilson coefficients. \subsection[Resummation effects on the vector $U_1^{\mu}$] {\boldmath Resummation effects on the vector $U_1^{\mu}$} \begin{figure} \begin{center} \includegraphics[scale=0.86]{graph3} \end{center} \caption{Resummation effects on the Wilson coefficients of $\mathcal{O}(\lambda^2)$ operators for $U^{\mu}_{1}$ as a function of $M_{U_1}$. The results are for top quark and lepton final state for $\mathrm{C}_{U_1Q_LL_L}$ and $\mathrm{C}_{U_1d_R\ell_R}$ and top quark and right handed neutrino for $C_{U_1u_R\nu_R}$. In both cases the initial scale is set to the $M_{U_1}$. The solid lines show the full effects and the dashed lines take into account only the double logs.} \label{graph3} \end{figure} In a similar fashion we derive the anomalous dimensions of the leading order two jet operators for $U_1^{\mu}$ in (\ref{leadingU1operators}). We find that: \begin{equation} \begin{split} \boldsymbol{\Gamma}_{U_1Q_LL_L}&=\left(-\frac{4}{3}\gamma^{(3)}_{\text{cusp}}-\frac{4}{9}\gamma^{(1)}_{\text{cusp}}\right)\ln\frac{\mu}{M_{U_1}}\,\\ &+\left(-\frac{3}{4}\gamma_{\text{cusp}}^{(2)}+\frac{1}{12}\gamma^{(1)}_{\text{cusp}}\right)\left(\ln\frac{\mu^2}{M^{2}_{U_1}}+i\pi \right)+\gamma^{U_1}+\left(.\,,\boldsymbol{\gamma}^{Q}\right)+\left(\boldsymbol{\gamma}^{L}\,,.\right)\,,\\ \boldsymbol{\Gamma}_{U_1d_R\ell_R}&=\left( -\frac{4}{3}\gamma^{(3)}_{\text{cusp}}-\frac{4}{9}\gamma^{(1)}_{\text{cusp}}\right)\ln\frac{\mu}{M_{U_1}}-\frac{1}{3}\gamma^{(1)}_{\text{cusp}}\left( \ln\frac{\mu^2}{M^2_{U_1}}+i\pi\right)+\gamma^{U_1}\,\\ &+\left(.\,,\boldsymbol{\gamma}^{d_R}\right)+\left(\boldsymbol{\gamma}^{\ell_R}\,,.\right)\,,\\ \boldsymbol{\Gamma}_{U_1u_R\nu_R}&=\left(-\frac{4}{3}\gamma^{(3)}_{\text{cusp}}-\frac{4}{9}\gamma_{\text{cusp}}^{(1)}\right)\ln\frac{\mu}{M_{U_1}}+\gamma^{U_1}+\left(.\,,\boldsymbol{\gamma}^{u_R}\right)\,, \end{split} \end{equation} where the anomalous dimension of the leptoquark $U_1^{\mu}$ reads: \begin{equation} \begin{aligned} \gamma^{U_1}=&-\frac{2\,\alpha_3}{3\pi}-\frac{2\,\alpha_1}{9\pi}\,\\ \end{aligned} \end{equation} The results for top quark final states are shown in Fig.(\ref{graph3}) both for the complete resummation and for the separate double log contribution. Also in this case there is a significant difference of about $20\%$ in neglecting the single log resummation. For $M_{U_{1}}=3$ TeV we find the following numerical results: \begin{equation} \begin{aligned} \mathrm{C}^{t\ell}_{U_1Q_LL_L}(m_t)&\,\approx \,0.92\,e^{0.06i} \,\mathrm{C}^{t\ell}_{U_1Q_LL_L}(M_{U_1})\\\, \mathrm{C}^{t\ell}_{U_1d_R\ell_R}(m_t)&\,\approx 0.95\,e^{0.01i} \,\mathrm{C}^{t\ell}_{U_1d_R\ell_R}(M_{U_1})\,\\ \mathrm{C}^{t\nu}_{U_1u_R\nu_R}(m_t)&\,\approx 0.94 \,\mathrm{C}^{t\nu}_{U_1u_R\nu_R}(M_{U_1})\\\, \end{aligned} \end{equation} \subsection{Example of an analytic solution of the RG equation} At one loop contribution it is possible to derive an analytic solution of the RG equations of the Wilson coefficients for the full $SU(3)\times SU(2) \times U(1)$ interactions. Beyond one loop this is challenging because cusp anomalous dimensions and beta functions start to mix with each other. Here we show an example of the exact solution for the evolution of the Wilson coefficient for a NLL resummation. We consider the Wilson coefficient of the operator $\mathcal{O}^{ij}_{S_1^{\ast}u_R^c\ell_R}$ in (\ref{S1operators}) neglecting the $SU(2)$, $U(1)$ and Yukawa running. We define the anomalous dimension $\Gamma^{\text{QCD}}_{S_1^{\ast}u_R^c\ell_R} $ such that: \begin{equation} \Gamma^{\text{QCD}}_{S_1^{\ast}u_R^{c}\ell_R}=-\frac{4}{3} \gamma^{(3)}_{\text{cusp}}\, \ln\frac{\mu}{M_{S_1}}+\gamma^ {S_1}_{\text{QCD}}+ \gamma^{u_R}_{\text{QCD}}\,, \end{equation} where $\gamma^{u_R}_{\text{QCD}}=-{\alpha_3}/{\pi}$ and $\gamma^ {S_1}_{\text{QCD}}=-2\alpha_3/{3\pi}$. Then it can be shown that the following expression is a solution to the RG equation with anomalous dimension $\Gamma^{\text{\text{QCD}}}_{S_1^{\ast}u_R^{c}\ell_R}$ \cite{Becher:2007ty,Becher:2006nr}: \begin{equation} \begin{aligned} C_{S_1^{\ast}u_R^c\ell_R}^{ij}(\Lambda,M_{S_1},\mu_2)=&\left( \frac{\mu_1}{M_{S_{1}}}\right)^{-a_{\gamma_3}(\mu_1,\mu_2)}\,\exp\Bigg[\mathcal{S}(\mu_1,\mu_2)-a_{\gamma}(\mu_1,\mu_2) \Bigg]\,\\ & \times C_{S_1^{\ast}u_R^c\ell_R}^{ij}(\Lambda,M_{S_{1}},\mu_1) \end{aligned} \end{equation} where the Sudakov exponent $\mathcal{S}$ is given by: \begin{equation} \mathcal{S}(\mu_1,\mu_2)=-\frac{4}{3}\int_{\alpha_3(\mu_1)}^{\alpha_3(\mu_2)}\,d\alpha\,\frac{\gamma^3_{\text{cusp}}(\alpha)}{\beta(\alpha)}\,\int^{\alpha}_{\alpha_3(\mu_1)}\,\frac{d\alpha^{\prime}}{\beta(\alpha^{\prime})} \end{equation} and we have defined $\gamma=\gamma_{\text{QCD}}^ {S_1} + \gamma^{u_R}_{\text{QCD}}$ and $\gamma_3=-\frac{4}{3}\gamma^{(3)}_{\text{cusp}}$. The quantity $a_{\gamma_i}$ is defined as: \begin{equation} a_{\gamma_i}(\mu_1,\mu_2)=-\int_{\alpha_3(\mu_1)}^{\alpha_3(\mu_2)}\,d\alpha\,\frac{\gamma_i(\alpha)}{\beta(\alpha)} \end{equation} In the above solution $\mu_1$ should be of the order of $M_{S_1}$ so that the initial condition is free of large logarithms. Using the one loop expressions of the one-particle anomalous dimensions, two loop cusps and two loop QCD $\beta$-function the scale evolution of the Wilson coefficient $C_{S_1^{\ast}u_R^c\ell_R}^{ij}$ is then \cite{Becher:2007ty}: \begin{equation} \begin{aligned} C_{S_1^{\ast}u_R^c\ell_R}^{ij}(\Lambda,M_{S_1},\mu_2) &=C_{S_1^{\ast}u_R^c\ell_R}^{ij}(\Lambda,M_{S_1},\mu_1)\left( \frac{\mu_1}{M_{S_{1}}}\right)^{-a_{\gamma_3}}\,\\ & \times \exp \Bigg\{ \frac{4\pi}{\alpha_3(\mu_1 )} \left(1-\frac{1}{r}-\ln r\right) +\left(\frac{251}{21} - \pi^2\right) \left(1-r+\ln r \right)\,\\ &\hspace{1.3cm}+ \frac{13}{7}\ln^2 r -\frac{10}{21\pi}\ln r \Bigg\} \end{aligned} \end{equation} where $r=\alpha_3(\mu_2)/\alpha_3(\mu_1)$ and at LO in RG-improved PT $a_{\gamma_3}=(56/9\pi)\ln r$. \section{Matching for tree level Wilson coefficients} \label{sectionmatching} In this section we look at certain UV models for each of the leptoquarks we have considered and perform a tree level matching of the matrix elements into the corresponding SCET Lagrangians. We match the operators that describe the two body decays at leading order and subleding order in the parameter $\lambda$. We start with the renormalizable Lagrangian that describes the $S_1$ interactions. We follow a similar notation as in \cite{Sakaki:2013bfa} and include an additional term for a right handed neutrino: \begin{equation} \mathcal{L}_{S_1}= g_{1L}^{ij}\,\bar{Q}^{c,i}_{L}i\sigma_2L_{L}^{j}S_1^{\star} +g^{ij}_{1}\,\bar{u}^{c,i}_{R}\ell_{R}^{j}S_1^{\star} +g^{ij}_{1\nu}\,\bar{d}^{c,i}_{R}\nu_{R}^{j}S_1^{\star} +\text{h.c.} \label{S1UV} \end{equation} where $g^{ij}$ is the coupling strength between a quark of generation $i$ and a lepton of the generation $j$. Tree level matching of the matrix elements of the above Lagrangian into our effective Lagrangian in (\ref{Lagrangian1}) yields the following Wilson coefficients: \begin{equation} \begin{aligned} C_{S_1^{\ast}u_R^{c}\ell_R}^{ij}&=g^{ij}_{1R}\,\\ C^{ij}_{S_1^{\ast}Q^{c}_LL_L}&=g_{1L}^{ij}\,\\ C_{S_1^{\ast}d_R^c\nu_R}^{ij}&=g_{1\nu}^{ij} \end{aligned} \end{equation} Matching of the subleading order SCET Lagrangian (\ref{subS1}) into the (\ref{S1UV}) gives vanishing Wilson coefficients for all the subleading operators for two body decays of the $S_1$: \begin{equation} \begin{aligned} &C^{(0)^{ij}}_{S_1^{\ast}L_L\Phi d_R}=C^{(0)^{ij}}_{S_1Q_L\Phi\nu_R}= C^{(1)^{ij}}_{S_1^{\ast}L_L\Phi d_R}=C^{(2)^{ij}}_{S_1^{\ast}L_L\Phi d_R}=0\\ &C^{(1)^{ij}}_{S_1Q_L\Phi\nu_R}=C^{(2)^{ij}}_{S_1Q_L\Phi\nu_R}=C^{(1)^{ij}}_{S_1d_R B\nu_R}=C^{(2)^{ij}}_{S_1d_R B\nu_R}=0 \end{aligned} \end{equation} This is a remnant of the fact that at leading power the $S_1$ couples only to the charge conjugate of the quark field, while the SM Higgs boson and the gauge bosons do not have conjugate particle vertices. In such a case it is not possible to get a hard propagator, which would have been integrated out in the effective theory. \par The full theory Lagrangian that describes the $S_3$ renormalizable interactions with SM fields is given by: \begin{equation} \mathcal{L}_{S_3}=g_{3L}^{ij}\,{\bar{Q^i}}^{c,a}_{L} \, \epsilon^{ab} \,S_{3}^{\ast bd}\,{L^{j,d}_{L}} +\text{h.c.} \end{equation} Then at tree level the Wilson coefficient from the SCET Lagrangian in (\ref{Ltripletleading}) reads: \begin{align} C_{3Q_LL_L}^{ij}=g_{3L}^{ij} \end{align} Also in this case the Wilson coefficients at subleading order are equal to zero. \par Lastly we look at the UV Lagrangian that describes the interactions of the vector leptoquark $U_1^{\mu}$. Since this is a Lorentz vector there are more subtle issues that arise regarding the UV completed model. The cases that are usually considered in literature are either a gauge model where $U_1^{\mu}$ arises from the breaking of a gauge symmetry into the SM gauge group \cite{Pati:1974yy,DiLuzio:2017vat} or strongly interacting models \cite{Baker:2019sli}. The most general Lagrangian describing the interaction of the leptoquark $U_1^{\mu}$ reads: \begin{equation} \begin{split} \mathcal{L}_{U_1} &= \frac{g_U}{\sqrt{2}}\left( \beta^{ij}_L\,\bar{Q}_{L}^{i}\gamma_{\mu}L^{j}_{L} U_1^{\mu} + \beta^{ij}_R\,\bar{d}_R^{i}\gamma_{\mu}\ell_R^{j}U_1^{\mu} \right) +g_{U}^{\nu}\,\beta_{\nu}^{ij}\,\bar{u}_{R}^{i}\gamma_{\mu}\nu_R^{j}U_{1}^{\mu}\,\\ &-ig_s(1-\kappa_U)U^{\dagger}_{1\mu}T^{a}U_{1\nu}G^{a\mu\nu}-ig_Y\frac{2}{3}(1-\tilde{\kappa}_U)U^{\dagger}_{1\mu}U_{1\nu}B^{\mu\nu}\,+\text{h.c.} \end{split} \label{LU1} \end{equation} The last two terms in (\ref{LU1}) describe the interaction of the $U_1^{\mu}$ with other gauge fields but they do not contribute to the matrix elements of the SCET operators for a tree level matching. We require that in the collinear limit for the same initial and final states the matrix elements of the Lagrangian in equation (\ref{LagrangianU1}) and in (\ref{LU1}) give the same result. Then we find the following Wilson coefficients of the $\mathcal{O}(\lambda^2)$ operators at tree level: \begin{equation} \begin{aligned} C^{ij}_{U_1Q_LL_L}&=\frac{g_U}{\sqrt{2}}\beta_L^{ij}\,\\ C^{ij}_{U_1d_R\ell_R}&=\frac{g_U}{\sqrt{2}}\beta_R^{ij}\,\\ C^{ij}_{U_1u_R\nu_R}&=\frac{g^{\nu}_{U}\beta^{ij}}{\sqrt{2}}\,\\ \end{aligned} \end{equation} For the Wilson coefficients at $\mathcal{O}(\lambda^3)$ in this case we find the non trivial results: \begin{equation} \begin{aligned} {C_{U_1Q_L\ell_R}^{(0)ij}}&=0\,,\\ {C_{U_1Q_L\ell_R}^{(1)ij}}&=-i \frac{g_U}{\sqrt{2}}\,\beta_{R}^{ij}\,y_{d_i}\frac{2\Lambda}{M_{U_1}(1+u)^2}\,,\\ {C_{U_1Q_L\ell_R}^{(2)ij}}&=-i \frac{g_U}{\sqrt{2}}\,\beta_{L}^{ij}\,y_{\ell_j}\frac{2\Lambda u}{M_{U_1}(1+u)^2}\,,\\ \end{aligned} \end{equation} where $y_{d_i}$ is the Yukawa coupling of the down-type quark in generation $i$ and $y_{\ell_j}$ is the Yukawa coupling of the lepton $\ell$ of generation $j$. Here the variable $u$ represents the momentum fraction that is caried by the scalar field $\Phi$, which is emitted in the same jet with the final state $Q_L$. \section{Conclusions} \label{conclusions} In this work we have applied a SCET framework to present a detailed discussion on the decay rates of three beyond SM particles; two scalar leptoquarks $S_1$, $S_3$ and a vector leptoquark $U_1^{\mu}$. A consistent analysis of this problem requires treating the leptoquarks as heavy degrees of freedom that interact with their lighter decay products described by SCET operators. We have shown that at leading order in the effective theory the leptoquarks decay into two SM particles and in the case of $U_1^{\mu}$ and $S_1$ a right handed neutrino is also allowed as a final state. In addition we have presented the subleading power two jet operators and the leading power Lagrangians for three jet final states at $\mathcal{O}(\lambda^3)$. \par We have computed all the leading and sub-leading order two body decay rates together with the differential decay rates for three body decays for the $S_1$, $S_3$ and $U_{1}^{\mu}$. We have used RG equations of the SCET operators at leading order to resum the large logarithms in their Wilson coefficients at next to leading logarithmic order. We have given numerical estimates of these effects on the decay rates for some of the decays with most phenomenological interest. We have found that for the two jet operators, for all the three leptoquarks, there is a significant effect coming from the single logarithmic terms in the running of the Wilson coefficients. The decay rates would change by as much as about $20\%$ if the single logarithmic terms are not properly resummed. We have observed that the leading power two jet decays of the scalar leptoquark $S_1$ receive the largest correction from resummation. Lastly we have done a matching procedure of our effective Lagrangians for the leptoquark $S_1$, $S_3$ and $U_1^{\mu}$ into three corresponding extensions of the SM with these heavy particles and show the relations between the Wilson coefficients in our effective theory and various coupling constants in these models. \par With this work we have extended the application of SCET for beyond SM - framework developed in \cite{Alte:2018nbn} to non-singlet exotic particles.We have studied leptoquarks which are considered main candidates for solving several observed deviations from the SM in the flavour sector. On application grounds this work provides an estimation of the effects of resummation on the main decay rates of the singlet leptoquark $S_1$, the triplet $S_3$ and the vector leptoquark $U_1^{\mu}$. \subsubsection*{Acknowledgments} M.N.~thanks Gino Isidori, the particle theory group at Zurich University and the Pauli Center for hospitality during a sabbatical stay. This research was supported by the Cluster of Excellence {\em Precision Physics, Fundamental Interactions and Structure of Matter\/} (PRISMA${}^+$ -- EXC~2118/1) within the German Excellence Strategy (project ID 39083149). \newpage \section{Introduction} Leptoquarks are hypothetical particles that couple both to leptons and quarks and appear in several extensions of the Standard Model (SM) \cite{Carpentier:2010ue,Buchmuller:1986zs,Dorsner:2017ufx,Leurer:1993ap}. They are color triplets and were initially predicted in the Pati-Salam model \cite{Pati:1974yy} and other unified theories \cite{Georgi:1974sy,Georgi:1974yf,Fritzsch:1974nn}. Leptoquark vertices can violate the lepton univerality in the SM and introduce generation changing interactions. In resent years the observation of the $B$-meson anomalies in $R_{(D^{\ast})}$ and $R_{(K)}$ measurements \cite{Aaij:2014ora, Aaij:2017vbb} have raised an interest in both vector (spin 1) and scalar (spin 0) leptoquarks. Indeed leptoquarks have become prominent candidates responsible for these observed deviations from the SM. Other authors have used leptoquarks as a possible solution to the long standing problem of the $(g-2)_{\mu}$ anomaly \cite{Davier:2010nc,Cheung:2001ip,Chakraverty:2001yg,Bauer:2015knc}. In several of the theoretical models that try to fit the anomalies the predicted leptoquark particles are a vector $U_1^{\mu}$ and two scalars $S_1$ and $S_3$, where $S_1$ is a singlet under $SU(2)_L$ and $S_3$ transfors as a triplet in $SU(2)_L$ \cite{Cox:2016epl,Calibbi:2017qbu,Blanke:2018sro,Buttazzo:2017ixm,Greljo:2018tuh,Barbieri:2015yvd,Bordone:2018nbg, Hiller:2017bzc,Sakaki:2013bfa,Angelescu:2018tyl,Bauer:2015knc,Crivellin:2017zlb,Davidson:1993qk}. \par Current searches for leptoquark pair production at LHC at $\sqrt{s}=13$ TeV have set a lower mass limit at round $1.7$ TeV \cite{Sirunyan:2018kzh,Aad:2020iuy}. Considering also the future upgrade of the LHC, if leptoquarks exist, in principle it would be possible to have one of these particles produced on-shell. Then the next natural step would be to study their properties in an effective field theory (EFT) approach. Soft collinear effective field theory (SCET) offers a consistent framework to describe the decay rates of these particles. \begin{comment} In the conventional EFT approach local operators of dimension $D\geqslant 5$ are added to the SM Lagrangian to build an EFT of the SM, the so called SMEFT\cite{Buchmuller:1985jz,Weinberg:1979sa,Grzadkowski:2010es}. These operators describe in a model independent way the low energy physics when off-shell higher degrees of freedom have been integrated out. The higher dimension operators are suppressed by ratios of the scales present in the problem, namely $v/\Lambda$, where $v\sim 245 GeV$ is the electroweak scale and $\Lambda$ is the higher scale of $\mathcal{O}(TeV)$. Instead in the scenario we are considering the on-shell discovered leptoquark with mass $M\sim\Lambda$ would be only one of a few other heavy particles all with a mass range around the large scale $\Lambda$. Then the argument that higher dimension operators would be suppressed by $\mathcal{O}(1/{M^{D-4}})$ is not valid any more and a local EFT would fail to systematically describe the decay rates of this particle. The decay products will carry large momentum fraction of the order of the mass of the heavy leptoquark and therefore operators containing higher order derivatives would still contribute at the order $\mathcal{O}({M}/{\Lambda})\sim 1$ even though naively they are of higher dimensions. To have a consistent description of the problem it requires not only disentangling the scales but also being able to include all these non-suppressed contributions from higher order operators. One framework that successfully achieves both of these tasks is the soft collinear effective field theory (SCET). \end{comment} SCET is a non-local EFT that quantitatively describes the decays of heavy particles into light and energetic ones, initially developed to study the $B$-meson decays \cite{Bauer:2000yr,Bauer:2001yt,Bauer:2000ew}. The approach of using SCET to analyse the decays of beyond the SM particles was intially introduced in \cite{Alte:2018nbn} as the SCET-BSM framework for the decays of a heavy singlet and was later applied to a model with vector-like quark mediators in \cite{Alte:2019iug}. In this work we use the SCET formalism to build the effective Lagrangians that describe the decays of the leptoquark $U_1^{\mu}$, $S_1$ and $S_3$. We construct the operator basis for two and three body final states at leading and subleading order in the power counting parameter $\lambda$. The parameter $\lambda$ is of the order $ v/\Lambda$ where $\Lambda$ is some large scale, $v$ is the electroweak scale and $\Lambda \gg v$. In addition we use renormalization group techniques to resum the QCD and electroweak large logarithms in the Wilson coefficients of leading order operators. \par Here we assume that a leptoquark can couple to different families of leptons and quarks at the same time, which is different from the original assumption on leptoquark couplings in the Buchmüller-Rückl-Wyler model \cite{Buchmuller:1986zs}. The final states consisit of SM particles and, to make the discussion more interesting we also allow for the existance of a light right handed neutrino in the particle spectrum. This is a singlet under the SM gauge group $\nu_R(1,1,0)$ and enters in several models with neutrino mass generation \cite{Khalil:2006yi,Abbas:2007ag}. \par We begin in Section \ref{BasicSCET} with a short introduction on the basic elements of SCET relevant for this work. In Section \ref{HSEFT} we introduce heavy field effective theory that we need to describe gauge boson interactions of charged heavy scalars and vectors. Then in Sections \ref{sections1}, \ref{sections3} and \ref{sectionU1} we construct the operator basis for the leptoquarks $S_1$, $S_3$ and $U_1^{\mu}$ at $\mathcal{O}(\lambda^2)$ and $\mathcal{O}(\lambda^3)$. In Section \ref{secwilsoncoeff} we show the running of the Wilson coefficients of the operators and sum their large logarithms using renormalization group equations. Lastly in Section {\ref{sectionmatching} we present a tree level matching for certain extensions of the SM and we finally conclude our results in Section \ref{conclusions}. We collect some of the expressions concerning technical details in the appendices. \section{Basic elements of SCET} \label{BasicSCET} The central idea of SCET lies in identifying the leading order momentum regions with respect to the parameter $\lambda$ for any given process and assign those momentum regions to quantum fields. The relevant momenta for the on-shell decays of a heavy particle are the collinear momenta carried by the energetic decay products, the hard momenta that has been integrated out and the soft momenta. The large energy flow in the final states defines the so called collinear directions $ \vec{n}_i$. For each such directions we define two vectors $n_i^{\mu}=\lbrace 1, \vec{n}_i \rbrace$ and $\bar{n}_i^{\mu}=\lbrace 1, - \vec{n}_i \rbrace$ such that $n_i\cdot \bar{n}_i=2$. The freedom to rescale these light-like reference vectors leads to the so called reparametrization invariance in SCET \cite{Manohar:2002fd}, which is a remnant of Lorentz invariance. All the operators of a SCET Lagrangian must be reparametrization invariant. \par The four momentum of a particle moving in the $n_i$ collinear direction is written in terms of the reference vectors \begin{equation} p^{\mu}_{i}=p_i\cdot\bar{n}_i\,\frac{n_i^{\mu}}{2}+p_i\cdot n_i\,\frac{\bar{n}_i^{\mu}}{2}+{p_i}_{\perp}^{\mu}\,, \end{equation} The components of the collinear momenta scale as $(p_i\cdot n_i,p_i\cdot \bar{n}_i,{p_i}_{\perp})\sim M(\lambda^2,1,\lambda)$, where $M$ is the mass of the decaying leptoquark. In fact $M$ represents the scale of a whole physics sector that has been integrated out and in principle could contain other heavy particles with masses around the scale $M$. The soft momentum components are defined such that they all vanish in the limit where $\lambda\rightarrow 0$. The exact $\lambda$- scaling depends on the specific process but for most cases they are either ultra-soft, where $k_{us}\sim M(\lambda^2,\lambda^2,\lambda^2)$ or soft, where $k_{s}\sim M(\lambda,\lambda,\lambda)$. \par The fields in SCET have a well defined scaling with respect to the power counting parameter $\lambda$. Acting with an arbitrary number of anti-collinear derivatives on a collinear field leaves its $\lambda$-scaling unchanged. Then a collinear field $\psi_{n_i}(x)$ can contain a series of such derivatives. To account for this effect operators built out of collinear fields are non-local along the light like direction. For instance: \begin{equation} \psi_{n_i}(x+t\bar{n}_i)=\sum_{k=0}^{\infty}\,\frac{t^{k}}{k!}\,(\bar{n}_{i}\cdot\partial)^{k}\,\psi_{n_i}(x)\,, \label{collinearfield} \end{equation} where $t$ is the displacement in the anti-collinear direction. In the Lagrangian operators built out of such collinear fields always appear multiplied by Wilson coefficients that also depend on the $t$ parameters, and these products are integrated over the variables $t$. In this way an arbitrary dependence on the large derivatives $\bar{n}_{i}\cdot\partial$ is allowed. This property of collinear fields makes SCET a non local EFT and it is therefore necessary to introduce Wilson lines in each collinear direction $n_i$ defined as \cite{Bauer:2002nz,Eichten:1989zv}: \begin{equation} W^{(A)}_{n_i}(x)\,=P\, \exp\left[i\, g_{A}\,t^{a}_{A}\,\int_{-\infty}^0 ds\, \bar{n}_i \cdot A\,^{a}_{n_i}\,(x+s\bar{n}_i)\right]\,, \label{Wilsonline} \end{equation} where $A_{n_i}$ is a collinear gauge field and the $t^{a}$ is the corresponding group generator in the representation of the field where the Wilson line is acting on. The $g_{A}$ here is the gauge coupling constant corresponding to the gauge field $A$. For the gauge group $U(1)_{Y}$ the gauge field $A^{a}$ is replaced by the gauge field $B$ and the generators are the hypercharge $Y$ of the field. \par In SCET collinear spinor fields in $n_i$ direction are defined with the help of a projector operator $P_{n_i}=\frac{\slashed{n_i}\slashed{\bar{n_i}}}{4}$, such that this operator projects out only the large momentum component of the spinor and $P_{n_i}^2=P_{n_i}$. Then at leading order in $\lambda$ a collinear SM fermion scales as $\mathcal{O}(\lambda)$ and it is defined as: \begin{equation} \Psi_{n_i}(x)=\frac{\slashed{n}_i\slashed{\bar{n}}_i}{4}\,W_{n_i}^{\dagger}(x)\,\psi(x)\,. \label{fielddef} \end{equation} Here the Wilson line $W_{n_i}$ without the superscript $(A)$ is a product of Wilson lines $W^{(A)}_{n_i}$, one for each gauge group where the field $\psi (x)$ is transformed. The fermionic field $\Psi_{n_i}(x)$ obeys the constraint: \begin{equation} \slashed{\bar{n}}_i\Psi_{n_i}(x)=0 \end{equation} A SM collinear scalar field is also dressed in Wilson lines such that: \begin{align} \label{fielddef} \Phi_{n_i}(x)&=W_{n_i}^{\dagger }(x)\,\phi(x)\,, \end{align} and it scales $\sim \lambda$. These collinear fields in SCET defined in terms of Wilson lines are referred to as collinear gauge invariant building blocks. The gauge invariant building block for a boson $\mathcal{A}_{n_i}^{\mu}$ is defined as the corresponding strength tensor sandwiched between two Wilson lines \cite{Bauer:2001yt,Hill:2002vw}: \begin{equation} \mathcal{A}_{n_i}^{\mu}(x)=g_A\,\int^0_{-\infty} ds\, \bar{n}_{i,\nu} \left[W_{n_i}^{(A)^{\dagger}}\,F_{n_i}^{\nu\mu}\,W_{n_i}^{(A)}\right](x+s\bar{n}_i) \label{scetgaugeboson} \end{equation} For an Abelian gauge group such as $U(1)_Y$ the definition simplifies to: \begin{equation} \mathcal{B}_{n_i}^{\mu}(x)=g_{B}\,Y\,\int_{-\infty}^{0} ds\, \bar{n}_{i\nu}\,B^{\nu\mu}_{n_i}\,(x+s\bar{n}_i)\,, \label{Bgaugeinv} \end{equation} The different components of a vector field in SCET follow the same scaling as the corresponding momentum component. Then from the definition of the Wilson line in (\ref{Wilsonline}) a collinear field can emit any number of gauge bosons along its collinear direction suppressed only by powers of the coupling constant. In addition since $n_i^{\mu}$ in equation (\ref{scetgaugeboson}) is a light-like vector, the gauge invariant building block for a gauge field obeys the following constraint: \begin{equation} \bar{n}_i\cdot \mathcal{A}_{n_i}=0\,. \label{gauge fixing} \end{equation} The remaining components of a collinear gauge field with momentum $p\sim M(\lambda^2,1,\lambda)$ will scale as the corresponding momentum component \cite{Becher:2014oda}: \begin{equation} \mathcal{A}_{n_i\,,\perp}^{\mu}\, \sim\, \lambda ,\hspace{3.6cm} n_i\,\cdot\,\mathcal{A}_{n_i}\,\sim\, \lambda^2\ \,. \label{gaugescaling} \end{equation} where $\mathcal{A}_{n_i\,,\perp}^{\mu}\,$ is defined: \begin{equation} \mathcal{A}_{n_i\,,\perp}^{\mu}=\mathcal{A}_{n_i}^{\mu}-n_i\,\cdot\mathcal{A}_{n_i}\,\frac{\bar{n}_i^{\mu}}{2}\,. \end{equation} The component $n_i\cdot\mathcal{A}_{n_i}$ is power suppressed and in fact it can always be eliminated using a field redefinition \cite{Marcantonini:2008qn}. This implies that only the transverse component of a collinear gauge boson will be produced in the decay of a heavy particle. \par In principle it is possible to introduce SCET fields to describe also SM particles carrying soft momenta and these fields have well defined scaling with respect to $\lambda$, similarly to the collinear fields. In this work though we are interested in analysing two and three particle final states, at leading and subleading order in $\lambda$. Operators with soft field are further suppressed as they appear starting from $\mathcal{O}(\lambda^3)$. The additonal particle we allow as a final state, the right handed neutrino is represented by a collinear field $\nu_R(x)$. Contrary to the SM fields this collinear field is not dressed in Wilson lines since it transforms trivially under the gauge group. For a heavy particle charged under SM such as leptoquaks a consistent description requires them to be treated within a heavy particle effective theory framework, similarly to the heavy quark effective theory (HQET) \cite{Eichten:1989zv,Georgi:1990um,Neubert:1993mb}. In the next section we present a short overview of the heavy particle effective theory, which we will apply for all three leptoquarks. \section{Heavy Particle Effective Theory} \label{HSEFT} In our frame work of describing the decays of leptoquarks into SM particles we are integrating out the heavy degrees of freedom around the scale of the leptoquark mass. This restricts the interactions of these heavy particles only through soft momentum transfer $k\sim M(\lambda^2,\lambda^2,\lambda^2)$ such that the leptoquarks would still remain on shell via such interactions. The four momentum of such a particle with mass $M$ can be written: \begin{equation} p^{\mu}=Mv^{\mu}+k^{\mu}\,, \end{equation} where $v^{\mu}=(1,0,0,0)$, $k^{\mu}$ is some residual momentum of the order of the EW scale in this case. In the on shell limit the heavy scalar can be described by a quantum field $S_v(x)$ such that it admits the field redefinition where $S(x)\rightarrow e^{-iM_{S}v\cdot x}{S}_v(x)$. Inserting this expression in the Lagrangian of a complex scalar field: \begin{equation} \mathcal{L}_{\text{scalar}}=(D^{\mu}S)^{\dagger} \,D_{\mu}S-M^2\,S^{\dagger}S, \end{equation} we get: \begin{equation} \label{LHSET} \mathcal{L}_{\text{scalar}}=2M_{S}\,{S}_v^{\dagger}\,\left( i v\cdot D{S}_v\right) + (D^{\mu}{S}_v)^{\dagger}D_{\mu}{S}_v, \end{equation} where $D_{\mu}=\partial _{\mu}-igG_{\mu}^at^a-ig_2W_{\mu}^a\tau^a-ig_1YB_{\mu}$. The covariant derivative in this case will pick up only the soft component of the momentum $p^{\mu}$ since $S_{v}(x)=e^{-ix\cdot k}S_v(k)$. The second term in (\ref{LHSET}) is suppressed by $(1/M_{S})$ relative to the first term. At leading power then the scalar Lagrangian becomes the effective Lagrangian $\mathcal{L}_{\text{HSET}}$ describing the propagation of the heavy field $S_{v}$ such that: \begin{equation} \label{LHSETfinal} \mathcal{L}_{\text{HSET}}=2M_{S}\,\Bigg[{S}_v^{\dagger}\,\left( i v\cdot D{S}_v\right) +\mathcal{O}\left(\frac{1}{M_{S}}\right) \Bigg] \end{equation} The physical quantities described by $\mathcal{L}_{\text{HSET}}$ are mass independent at leading order. This observation is similar to HQET and the accident symmetries that arise there \cite{Neubert:1993mb}. This is due to the fact that the exchange of an ultra soft gauge boson cannot probe the quantum numbers of the particle, instead one would need a hard momentum exchange. In the analysis for the two scalar leptoquarks $S_1$ and $S_3$ we will neglect the second term in (\ref{LHSET}). \par In a similar fashion, a consistent description of the decay rates of a heavy vector particle such as $U_1^{\mu}$ requires a heavy vector effective theory (HVET) that separates the leading power contribution from the vector field $U_1^{\mu}(x)$. We separate the transverse and longitudinal components of the vector $U_1^{\mu}$ using the projector operators defined in terms of the reference vector $v^{\mu}$: \begin{equation} T^{\mu\nu}_{\parallel}={v^{\mu}v^{\nu}}\,\,,\hspace{2cm} T^{\mu\nu}_{\perp}=g^{\mu\nu}-{v^{\mu}v^{\nu}} \end{equation} where $v^{\mu}$ is a reference vector in the direction of the four momentum of $U_1^{\mu}$. The way we define $v^{\mu}$ here we have $ v^2=1$. Then: \begin{equation} U_{1\parallel}^{\mu}={T_{ \parallel}}_{ \mu\nu} U_{1}^{\nu}\,\,,\hspace{2cm} U_{1\perp}^{\mu}={T_{\perp}}_{\mu\nu}U_{1}^{\nu}\,, \end{equation} where $U_{1\parallel}^{\mu}$ is the component parallel to the direction of four momentum of the leptoquark and $U_{1\perp}^{\mu}$ is the component perpendicular to its four momentum. Looking at the two point correlation functions \cite{Becher:2014oda} for these two fields it is not difficult to show that the $U_{1\parallel}^{\mu}$ is powers suppressed compared to $U_{1\perp}^{\mu}$. This means at leading power the $U_{1}^{\mu}$ is produced perpendicularly polarized and $v\cdot U_{1}=0$. Its longitudinal component is integrated out in the effective Lagrangian. We derive this Lagrangian starting from the most general gauge invariant Lagrangian for a massive vector field $U_1^{\mu}(x)$\footnote{The minus sing in front of the second term is necessary to get the equation $D_{\mu}U^{\mu}_1=0$ in the massless limit.}: \begin{equation} \mathcal{L}=(D^{\mu}{U_1}^{\nu})^{\dagger}\,D_{\mu}{U_1}_{\nu}-(D^{\nu}{U_1}^{\mu})^{\dagger}\,D_{\mu}{U_1}_{\nu}-{M^2}\,{{U_1}^{\mu}}^{\dagger}{U_1}_{\mu} \label{KinetictermU1} \end{equation} with $D_{\mu}=\partial_{\mu}-igG_{\mu}^{a}t^{a}-\frac{2}{3}i g_1B_{\mu} $. We perform a field transformation on $U_1^{\mu}(x)$ such that: $U_1^{\mu}(x)\rightarrow e^{-i\,M_{U_1}v\cdot x}U_{1v}^{\mu}(x)$, where $U_{1v}^{\mu}(x)$ contains only the soft momentum fluctuations. Then the Lagrangian that describes the interactions of the heavy field $U_{1v}^{\mu}(x)$ reads: \begin{equation} \mathcal{L}=2 M_{U_{1}}\,{U_{1v}^{\mu}}^{\dagger}\left(i v\cdot D {U_{1v}}_{\mu}\right)+(D_{\mu}{U_{1v}}_{\nu})^{\dagger}(D^{\mu}{U_{1v}}^{\nu}) -(D^{\nu}{U_{1v}}^{\mu})^{\dagger}(D_{\mu}{U_{1v}}_{\mu})+\mathcal{L}\left(v\cdot U_{1}\right)\,, \label{HVEFT} \end{equation} where the second and the third term are suppressed relative to the first one by $1/M_{U_1}$. The $\mathcal{L}\left(v\cdot U_{1}\right)$ contains power suppressed terms that are integrated out. We then define the HVET Lagrangian: \begin{equation} \begin{aligned} \mathcal{L}_{\text{HVET}}&=2 M_{U_{1}}\,\Bigg[ {U_{1\perp v}^{\mu}}^{\dagger}\left(i v\cdot D {U_{1 v\perp}}_{\mu}\right)+\mathcal{O}\left(\frac{1}{M_{U_1}}\right)\Bigg]\,\\ & \equiv 2 M_{U_{1}} \Bigg[{T_{\perp}}_{\mu\nu}\Big[\,{U_{1 v}^{\mu}}^{\dagger}\left(i v\cdot D U_{1 v}^{\nu}\right)\Big]+\mathcal{O}\left(\frac{1}{M_{U_1}}\right)\Bigg]\,, \end{aligned} \end{equation} where ${U_{1\perp v}}_{\mu}$ is the perpendicular component of the field ${U_{1v}}_{\mu}$. The leading term in the above Lagrangian is the same as the one in (\ref{LHSET}). In other words at leading power the renormalization of the heavy vector field $U_{1v}^{\mu}(x)$ and of its interactions with SM particles is similar to renormalizing the field and the interactions of a heavy scalar. \section{\boldmath SCET formalism for the scalar leptoquark $S_1(3,1,-\frac{1}{3})$} \label{sections1} The scalar leptoquark $S_1$ is a color triplet, an $SU(2)_L$ singlet and has hypercharge $Y=-1/3$. It couples to SM particles similarly to a right handed down type quark. This particular leptoquark has been studied as a viable solution both to the flavour anomalies and the $(g-2)_{\mu}$ anomaly. Note that its quantum numbers allow the $S_1$ to couple in operators that would induce proton decays though we neglect those operators here. In literature they are usually suppressed assuming the realization of certain symmetries such as Peccei-Quinn symetry or other discrete symmetries \cite{Bauer:2015knc,Bajc:2005zf,Cox:2016epl}. \subsection[Leading power two jet operators for $S_1$]{\boldmath Leading power two jet operators for $S_1$ }\label{Sec1} We start with the SCET Lagrangian that describes the decays of the $S_1$ at leading order in scale ratios $\lambda$. The decay products are all collinear gauge invariant building blocks presented in the introduction and the leptoquark is described within the heavy scalar effective theory by the heavy field ${S_1}_v(x)$. At lowest order in $\lambda$ the symmetries allow for $S_1$ to couple to two collinear fermions moving in opposite directions so the leading order operators are $\mathcal{O}(\lambda^2)$. We use the subscript ${n_i}$ to denote a collinear field in SCET moving in the $n_i$ direction. Then the SCET Lagrangian at $\mathcal{O}(\lambda^2)$ reads: \begin{equation} \begin{aligned} \label{Lagrangian1} \mathcal{L}_{S_1}^{(\lambda^2)}\,=&\,C^{ij}_{S_1^{\ast}u^{c}_{R}\ell_R}\, {\bar{u}}\,^{c,i}_{R,n_1}\, \ell^j_{R,n_2} \,S_{1v}^{\ast}\, +\,C^{ij}_{S_1^{\ast}Q^{c}_{L}L_L}\,\bar{Q}\,^{c,i}_{L,n_1}\,i\sigma_2\,L^j_{L,n_2}\,S_{1v}^{\ast} \,\\ &+ C^{ij}_{S_1^{\ast}d^{c}_{R}\nu_R}\,\bar{d}\,^{c,i}_{R,n_1}\, \nu_{R,n_2}^j\,S^{\ast}_{1v}\,+\,(n_1\leftrightarrow n_2)\, +\,\text{ h.c.}\,, \end{aligned} \end{equation} where $C^{ij}_{S_1f_1f_2}$ are the Wilson coefficients of the corresponding operators. We label the operators and their Wilson coefficient by their field content. The fields $Q_{L,n_i}$ and $L_{L,n_1}$ are the collinear quark and lepton doublets while $u_{R,n_i}$ and $d_{R,n_i}$, $\ell_{R,n_i}$ and $\nu_{R,n_i}$ stand for up and down type collinear quarks, right handed collinear lepton and right handed collinear neutrino respectively. The indices $i,j$ where $\lbrace i,j\rbrace\in \lbrace 1,2,3\rbrace$ labell the fermion families. As mentioned before we are considering here the most general case where the leptoquark can decay into a quark and a lepton of different generations. This is the case which gives rise to flavour changing neutral currents (FCNC) in the model \cite{Mitsou:2004hm}. The fields that carry a superscript $c$ are the charge conjugate field defined as $\Psi^c=C\bar{\Psi}^T$ with $C$ being the charge conjugate operator. As a result all the operators in (\ref{Lagrangian1}) violate fermion number conservation by $\Delta F=2$. The above Lagrangian contains all the non-vanishing operators at $\mathcal{O}(\lambda^2)$ that are SM gauge invariant, Lorentz invariant and reparametrization invariant in SCET. For simplicity we keep the coordinate and scale dependence of the fields and the Wilson coefficients implicit but according to equation (\ref{collinearfield}) the above operator products should be understood as products of non-local fields. Considering for instance the first term in (\ref{Lagrangian1}) we would have: \begin{equation} C^{ij}_{S_1^{\ast}u^{c}_R\ell_R} \,\bar{u}^{c,i}_{R,n_1} \,\ell^j_{R,n_2} S_{1v}^{\ast}\equiv \int ds \,dt\, \bar{C}_{S_1^{\ast}u^{c}_{R}\ell_R}^{ij}\,(s,t,\Lambda,\mu)\,\bar{u}^{c,i}_{R,n_1}\,(x+s\bar{n}_1)\, \ell^j_{R,n_2}(x+t\bar{n}_2)\, S_{1v}^{\ast}(x)\,, \end{equation} where $\Lambda$ represents the large scale that has been integrated out and $\mu$ is the factorization scale of the operators. Inserting the exponential form of the series in (\ref{collinearfield}) and apply it on the Fourier transform of the fields we end up with the following: \begin{equation} \int\, ds\, dt \,\bar{C}_{S_1^{\ast}u^{c}_{R}\ell_R}^{ij}(s,t,\Lambda,\mu)\,e^{i t\bar{n}_1\cdot\mathcal{P}_1}\, e^{i s\bar{n}_2\cdot\mathcal{P}_2} \,\bar{u}^{c,i}_{R,n_1}(x)\, \ell^j_{R,n_2}(x) \,S_{1v}^{\ast}(x), \end{equation} where $\mathcal{P}_i$ is a momentum label. This is a generalization to the four momentum $p_i$ carried by the field with the index $i$ where now $\mathcal{P}_i$ denotes the total momentum carried by all the fields with the index $i$ and $\bar{n}_i\cdot \mathcal{P}_i$ will pick up the total large momentum component in the direction $n_i$. The Wilson coefficients appearing in the Lagrangian in (\ref{Lagrangian1}) are defined as the Fourier transform of the Wilson coefficients $\bar{C}(s,t,\Lambda,\mu)$ such that: \begin{equation} C\,\equiv\, C(\bar{n}_1\cdot\mathcal{P}_1,\bar{n}_2\cdot\mathcal{P}_2,\Lambda,\mu)\,=\,\int\, ds\, dt\, \bar{C}(s,t,\Lambda,\mu)\,e^{is\bar{n}_1\cdot\mathcal{P}_1}\,e^{it\bar{n}_2\cdot\mathcal{P}_2}\,. \label{Wilsoncoeff} \end{equation} Using arguments of Lorentz and reparametrization invariance it follows that at leading order the dependence of $C\,(\bar{n}_1\cdot\mathcal{P}_1,\bar{n}_2\cdot\mathcal{P}_2,\Lambda,\mu)$ on the momenta can only be proportional to the operator $\mathcal{P}^2$, where $\mathcal{P}$ is the operator carrying the total momentum of all the final states, whose eigenvalue is the mass of the leptoquark $M$ for the two jet operators \cite{Alte:2018nbn}. From now on it is implied that all the Wilson coefficients of the two jet operators are defined as in (\ref{Wilsoncoeff}). Because of reparametrization invariance the $\bar{n}_i\cdot \mathcal{P}$ scalar product can only depend on the leptoquark mass $M$ such that: \begin{equation} C\equiv C(\bar{n}_1\cdot\mathcal{P}_1,\bar{n}_2\cdot\mathcal{P}_2,\Lambda,\mu)\equiv C(\Lambda,M,\mu)\,. \end{equation} The Lagrangian in (\ref{Lagrangian1}) contains only dimension four operators and therefore the Wilson coefficients are dimensionless. We write the Lagrangian in a compact form: \begin{equation} \begin{aligned} \mathcal{L}_{S_1}^{(\lambda^2)}&=C^{ij}_{S^{\ast}_1u^{c}_R\ell_R}(\Lambda,M_{S_1},\mu)\, \mathcal{O}^{ij}_{S_1^{\ast}u^{c}_{R}\ell_R}(\mu)\, + \,C^{ij}_{S_1^{\ast}Q^{c}_{L}L_L}(\Lambda,M_{S_1},\mu)\,\mathcal{O}^{ij}_{S_1^{\ast}Q^{c}_{L}L_L}(\mu)\, \\ &+\,C_{S_1^{\ast}d^{c}_{R}\nu_R}^{ij}(\Lambda,M_{S_1},\mu)\, \mathcal{O}_{S_1^{\ast}d^{c}_{R}\nu_R}^{ij}(\mu)\, +\,\text{h.c.}\,, \end{aligned} \end{equation} with the operator basis: \begin{equation} \begin{aligned} \mathcal{O}^{ij}_{S_1^{\ast}u^{c}_{R}\ell_R}\,&=\,\bar{u}^{c,i}_{R,n_1}\, \ell^j_{R,n_2} S_{1v}^{\ast}+(n_1\leftrightarrow n_2)\,, \\ \mathcal{O}^{ij}_{S_1^{\ast}Q^{c}_{L}L_L}\,&=\,\bar{Q}^{c,i}_{L,n_1}\,i\,\sigma_2\, L^j_{L,n_2}\,S_{1v}^{\ast}+(n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_1^{\ast}d^{c}_{R}\nu_R}^{ij}\,&= \,\bar{d}^{c,i}_{R,n_1}\, \nu^j_{R,n_2}\,S^{\ast}_{1v}+(n_1\leftrightarrow n_2)\,. \end{aligned} \label{S1operators} \end{equation} The first two operators in the above equation define a two jet final state while the decay into a $\nu_R$ is a mono jet signature plus missing energy. From this operator basis it is straightforward to calculate the tree level decay rates of the leptoquark $S_1$. In this case the SM fields and the Wilson coefficients are transformed from the weak basis to the mass basis. We collect the components $\mathrm{C}^{ij}$ of the Wilson coefficients in the mass basis in the matrix $\boldsymbol{\mathrm{C}}$, which transforms with the transformation matrix of the various fields in that operator. \par The two body decays at $\mathcal{O}(\lambda^2)$ are fixed by kinematics and in the limit of massless final states the total decay rates for the singlet $S_1$ are: \begin{equation} \begin{split} \Gamma(S_1\rightarrow {u_R^{c,j}}\bar{\ell}_{R}^{i})&=\,\frac{M_{S_1}}{16\pi}\,|\, \mathrm{C}^{ij}_{S_1^{\ast}u^{c}_{R}\ell_R} \,|^2 \,,\\ \Gamma(S_1\rightarrow Q_L^{c,i}\bar{L}_{L}^{j})&=\,\frac{M_{S_1}}{16\pi}\,|\,\mathrm{C}^{ij}_{S_1^{\ast}Q^{c}_LL_L}\,|^2\,, \\ \Gamma(S_1\rightarrow d^{c,i}_R \bar{\nu}^{j}_R )&=\,\frac{M_{S_1}}{16\pi}\,|\,\mathrm{C}^{ij}_{S_1^{\ast}d^c_R\nu_R}\,|^2 \,,\\ \end{split} \end{equation} where $M_{S_1}$ is the mass of $S_1$. For different final states the decay rates differ only by their Wilson coefficients. \subsection[Subleading poer two jet operators for $S_1$]{\boldmath Subleading power two jet operators for $S_1$} \label{secs1sub} It is of interest to further explore the beyond SM SCET Lagrangian at $\mathcal{O}(\lambda^3)$. At subleading order in power counting $S_1$ decays into two and three jet final states. In the first case two collinear particles in $n_i$ direction share the total jet momentum $\mathcal{P}$ such that one of the particles will carry momentum $u\mathcal{P}$, with $0<u<1$ and the other momentum $(1-u)\mathcal{P}$. Since $u$ can have any value between $0$ and $1$ one has to integrate over this parameter space in the Lagrangian. Applying the same arguments of gauge invariance, Lorentz invariance and reparametrization invariance the two body decay Lagrangian for the leptoquark $S_1$ at subleading order reads: \begin{equation} \begin{aligned} \mathcal{L}_{S_1}^{(\lambda^3)}\bigg\rvert_{\text{2 jet}}&\,=\,\frac{1}{\Lambda}\,\Biggl[\, C_{S_1^{\ast}L_L\Phi d_R}^{(0)^{ij}}(\Lambda,M_{S_1},\mu)\,{\mathcal{O}_{S_1^{\ast}L_L\Phi d_R}^{(0)^{ij}}}(\mu)\,\\ &\hspace{1.9cm}+\,{C_{S_1Q_L\Phi \nu_R}^{(0)^{ij}}}(\Lambda,M_{S_1},\mu)\,\mathcal{O}_{S_1Q_L\Phi \nu_R}^{(0)^{ij}}(\mu)\,\Biggr]\,\\ &\,+\,\frac{1}{\Lambda}\,\Biggl [ \sum_{k=1,2}\int_{0}^{1}du\Biggl(\,{C_{S_1^{\ast}L_L\Phi d_R}^{(k)^{ij}}}(\Lambda,M_{S_1},\mu,u)\,{\mathcal{O}_{S_1^{\ast}L_L\Phi d_R}^{(k)^{ij} }}(\mu,u)\,\\ &\hspace{1.9cm}+\,{C_{S_1Q_L\Phi \nu_R}^{(k)^{ij}}}(\Lambda,M_{S_1},\mu,u)\,{\mathcal{O}_{S_1Q_L\Phi\nu_R}^{(k)^{ij}}}(\mu,u) \,\\ &\hspace{1.9cm}+\,{C_{S_1d_R B\nu_R}^{(k)^{ij}}}(\Lambda,M_{S_1},\mu,u)\,{\mathcal{O}_{S_1d_R B\nu_R}^{(k)^{ij}}} (\mu,u)\, \Biggr)\,+\,\text{h.c.}\, \Biggr]\,. \end{aligned} \label{subS1} \end{equation} We label the operators by their field content and $B$ is the $U(1)_Y$ gauge boson. To distinguish the two jet operators at $\mathcal{O}(\lambda^3)$ we use the superscript $(k)$ for $k=1\,,2$ which denotes the collinear direction in which the third field with momentum $u\mathcal{P}_i$ is emitted. The operators labelled by $(0)$ contain a zero momentum field $\Phi^{(0)}$ such that: \begin{equation} \begin{aligned} &{\mathcal{O}_{S_1^{\ast}L_L\Phi d_R}^{(0)^{ij}}}\,=\,\bar{L}^{i}_{L,n_1}\,\tilde{\Phi}^{(0)}\,d^{j}_{R,n_2} \,S_{1v}^{\ast}\, +\,(n_1\leftrightarrow n_2)\,,\\ &{\mathcal{O}_{S_1Q_L\Phi \,\nu_R}^{(0)^{ij}}}\,=\,\bar{Q}^{i}_{L,n_1}\,{\Phi}^{(0)}\nu_{R,n_2}^{j}\,S_{1v}\, +\, (n_1\leftrightarrow n_2)\,, \end{aligned} \label{Eq.1} \end{equation} where $\tilde{\Phi}^{(0)}=i\sigma_2{\Phi^{(0)}}^{\ast}$. The zero momentum field $ \Phi^{(0)} $ has the gauge quantum numbers of the Higgs doublet but it does not transform under gauge transformations in SCET. After electroweak symmetry breaking it can be rotated to: \begin{equation} \Phi^{(0)}\,=\,\frac{1}{\sqrt{2}}\,(0,v)^T\,. \label{phizero} \end{equation} These operators will give a non-vanishing contribution to the two body decay rates of $S_1$ at $\mathcal{O}(\lambda^3)$. The second equation in (\ref{Eq.1}) describes a mono-jet signature in the detector plus missing energy from the $\nu_R$. All the fields in the remaining operators in the Lagrangian (\ref{subS1}) carry momentum different from zero. \par The Wilson coefficients for the two jet Lagrangian at $\mathcal{O}(\lambda^3)$ depend on the parameter $u$ if a particle with non zero momentum is emitted within the same jet. The superscript $(u)$ on the field implies the presence of a $\delta$- function which is there to ensure that the large momentum component of the second particle in the $i^{th}$ jet is fixed by $u\left(\bar{n}_i\cdot\mathcal{P}_i\right)$. For an explicit derivation see \cite{Alte:2018nbn}. For example: \begin{equation} \begin{aligned} {\mathcal{O}_{S_1^{\ast}L_L\Phi d_R}^{(1){ij}}}(u)&\,=\,\bar{L}^{i}_{L,n_1}(x)\, \tilde{ \Phi}_{n_1}^{(u)}(x)\,d^{j}_{R,n_2}(x)\, S_{1v}^{\ast}(x)\,\\ &\,\equiv\, \bar{L}^{i}_{L,n_1}(x)\, \delta(u-\frac{\bar{n}_1\cdot\mathcal{P}_{\Phi}}{\bar{n}_1\cdot\mathcal{P}_1})\,\tilde{ \Phi}_{n_1}(x)\,d^{j}_{R,n_2}(x)\, S_{1v}^{\ast}(x)\,. \end{aligned} \end{equation} The other three operators contain only fermionic fields and the SM scalar $\Phi(x)$: \begin{equation} \begin{aligned} {\mathcal{O}_{S_1^{\ast}L_L\Phi d_R}^{(2)^{ij}}}(u)&\,=\,\bar{L}^{i}_{L,n_1}\, \tilde{ \Phi}_{n_2}^{(u)}\,d^{j}_{R,n_2}\, S_{1v}^{\ast}\, +\, (n_1 \leftrightarrow n_2)\,,\\ {\mathcal{O}_{S_1Q_L\Phi\nu_R}^{(1)^{ij}}}(u)\,&=\,\bar{Q}^{i}_{L,n_1}\,{\Phi}_{n_1}^{(u)}\,\nu^{j}_{R,n_2}\,S_{1v} \,+\, (n_1\leftrightarrow n_2)\,,\\ {\mathcal{O}_{S_1Q_L\Phi\nu_R}^{(2)^{ij}}}(u)&\,=\,\bar{Q}^{i}_{L,n_1}\,{\Phi}_{n_2}^{(u)}\,\nu^{j}_{R,n_2}\,S_{1v} \,+ \,(n_1\leftrightarrow n_2)\,. \end{aligned} \end{equation} Note that two fermionic fields cannot be emitted in the same $n_i$ direction since that would give a vanishing contribution due to $n^2=\bar{n}^2=0$. The last line in (\ref{subS1}) contains the same chirality operators built out of a down-type quark, a right handed neutrino and the $U(1)_Y$ gauge boson. To maintain the subleading order power counting we can only include the perpendicular component of the gauge invariant building block $\mathcal{B}_{\mu}^{\perp}\sim \mathcal{O}(\lambda)$ of the gauge field. The last two operators in (\ref{subS1}) then are: \begin{equation} \begin{aligned} {\mathcal{O}_{S_1d_R B\nu_R}^{(1)^{ij}}}(u)\,&=\,{\bar{d}}^{i}_{R,n_1}\, \slashed{\mathcal{B}}^{\perp,(u)}_{n_1}\,\nu^{j}_{R,n_2}\,S_{1v}\, +\, (n_1\leftrightarrow n_2)\,,\\ {\mathcal{O}_{S_1d_R B\nu_R}^{(2)^{ij}}}(u)\,&=\,{\bar{d}}^{i}_{R,n_1}\, \slashed{\mathcal{B}}^{\perp,(u)}_{n_2}\,\nu^{j}_{R,n_2}\,S_{1v}\, +\, (n_1\leftrightarrow n_2)\,, \end{aligned} \end{equation} where the $\slashed{\mathcal{B}}^{\perp}_n=\gamma_{\perp}^{\mu}\,\mathcal{B}^{\perp,\mu}_n$ such that the perp component of the $\mathcal{B}_{n}$ is defined as: \begin{equation} \mathcal{B}_{n}^{\perp,\mu}\,=\,\mathcal{B}_{n}^{\mu}\,-n\cdot\mathcal{B}\,\frac{\bar{n}^\mu}{2}\,, \label{vecperp} \end{equation} and \begin{equation} \gamma^{\mu}_{\perp}\,=\,\gamma^{\mu}\,-\,\frac{\slashed{n}_1}{n_1\cdot {n}_2}\,{n}_2^{\mu}\,-\,\frac{\slashed{n}_2}{n_1 \cdot n_2}\,n_1^{\mu}\,. \label{gamma} \end{equation} There are no charge conjugate fields arising at $ \mathcal{O}(\lambda^3)$ and therefore all the operators conserve the fermion number. Moreover this implies no mixing between the leading and sub-leading order operators for the $S_1(3,1,-\frac{1}{3})$. Since all the operators are of canonical dimension five we multiply each term by $1/\Lambda$ so that the Wilson coefficients are dimensionless. It is instructive to do so because effectively the Wilson coefficients play the role of coupling constants. \par From the operator basis at $\mathcal{O}(\lambda^3)$ it is not difficult to derive the two body decays of $S_1$ at subleading order: \begin{equation} \begin{aligned} \Gamma(S_1\rightarrow \bar{L}_{L}^{i}d^{j}_R)\,&=\,\frac{v^2}{2}\,\frac{M_{S_1}}{16\pi}\, \frac{ |\, \mathrm{C}_{S_1^{\ast}L_L\Phi d_R}^{(0)^{ij}} \,|^2}{\Lambda^2}\,, \\ \Gamma(S_1\rightarrow \bar{Q}_{L}^{i}\bar{\nu}^{j}_R )\,&=\,\frac{v^2}{2}\frac{M_{S_1}}{16\pi}\,\frac{ |\,\mathrm{C}_{S_1Q_L\Phi\nu_R}^{(0)^{ij}}\,|^2 }{\Lambda^2}\,. \end{aligned} \end{equation} They are both suppressed by a factor of $ {v^2}/{\Lambda^2}$ compared to the two body decay rates from $\mathcal{O}(\lambda^2)$ operators. \subsection[Leading power three body decays for $S_1$]{\boldmath Leading power three body decays for $S_1$} It is possible to have the same field content as in (\ref{subS1}) with each collinear field emitted in one separate collinear direction $n_i$. These operators describe the decays of the leptoquark $S_1$ into three jet final states though the phase space would be much smaller and the decay rates further suppressed. Then the $S_1$ three jet Lagrangian at $\mathcal{O}(\lambda^3)$ is: \begin{equation} \begin{split} \mathcal{L}_{S_{1v}}^{(\lambda^3)}\bigg\rvert_{\text{3 jet}}\,=\,\frac{1}{\Lambda}\,\Biggl [&\,C_{S_1^{\ast}L_L\Phi d_R}^{ij}(\Lambda,M_{S_1},\lbrace m^2_{k\ell}\rbrace,\mu)\,\mathcal{O}_{S_1^{\ast}L_L\Phi d_R}^{ij} (\mu)\, \\ &\,\,+\,C^{ij}_{S_1Q_L\Phi\nu_R}(\Lambda,M_{S_1},\lbrace m^2_{k\ell}\rbrace,\mu)\,\mathcal{O}^{ij}_{S_1Q_L\Phi\nu_R}(\mu)\, \\ & \,\,+\,C_{S_1d_R B\nu_R}^{ij}(\Lambda,M_{S_1},\lbrace m^2_{k\ell}\rbrace,\mu)\,\mathcal{O}_{S_1d_RB\nu_R}^{ij} (\mu)\,+\,\text{h.c.}\, \Biggr]\,. \end{split} \label{3jetS1} \end{equation} The Wilson coefficients in this case depend also on the invariant mass $m^2_{k\ell}$ for any $(k,\ell)$ pair of final state particles, where $k\neq \ell\in \lbrace 1,2,3\rbrace$ . The operators read: \begin{equation} \begin{aligned} \mathcal{O}_{S_1^{\ast}L_L\Phi d_R}^{ij}\,&=\,\bar{L}^{i}_{L,n_1} \, \tilde{ \Phi}_{n_3}d^{j}_{R,n_2}\, S_{1v}^{\ast}\, +\, (n_{1}\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_1Q_L\Phi\nu_R}^{ij}\,&=\,\bar{Q}^{i}_{L,n_1}\,{\Phi}_{n_3}\nu^{j}_{R,n_2}\,S_{1v} \,+\, (n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_1d_R B \nu_R}^{ij}\,&=\,\bar{d}^{i}_{R,n_1}\, \slashed{\mathcal{B}}^{\perp}_{n_3}\,\nu^{j}_{R,n_2}\,S_{1v}\, +\, (n_1\leftrightarrow n_2)\,, \label{ZandgammadecayS1} \end{aligned} \end{equation} where $n_1, n_2, n_3$ are the three collinear directions each defining a jet signature in a possible event in the experiment. From the three jet Lagrangian in (\ref{3jetS1}) we compute the squared matrix element for a three body decay of the leptoquark $S_1$ into two fermions and a Higgs boson: \begin{equation} \begin{aligned} \mid\mathcal{M}(S_1\rightarrow \bar{L}_{L}^{i} h {d}^{j}_R)\mid^2\,&=\,\frac{|\, \mathrm{C}_{S_1^{\ast}L_L\Phi d_R}^{ij}\,| ^2}{4\Lambda^2}\,(n_1\cdot n_2)\,(\bar{n}_1\cdot p_1)\,(\bar{n}_2\cdot p_2)\,\\ &\approx\,\frac{| \mathrm{C}_{S_1L_L\Phi d_R}^{ij}| ^2}{2\Lambda^2}\, m_{Ld}^2\,,\\ \mid\mathcal{M}^2(S_1\rightarrow \bar{Q}_{L}^{i}h{\nu}^{j}_R)\mid^2\,&=\,\frac{|\, \mathrm{C}_{S_1Q_L\Phi\nu_R}^{ij}\,| ^2}{4\Lambda^2}\,(n_1\cdot n_2)\,(\bar{n}_1\cdot p_1)\,(\bar{n}_2\cdot p_2)\,\\ &\approx\,\frac{|\, \mathrm{C}_{S_1Q_L\Phi\nu_R}^{ij}\,| ^2}{2\Lambda^2}\, m_{Q\nu}^2\,, \end{aligned} \end{equation} where $m_{Ld}^2=(p_L+p_d)^2$, $m_{Q\nu}^2=(p_Q+p_{\nu})^2$ and we have used the following approximation: \begin{equation} \begin{aligned} m^{2}_{12}&=\frac{1}{2}(n_{1}\cdot n_{2})(\bar{n}_{1}\cdot p_1)(\bar{n}_2\cdot p_2)+\mathcal{O}(\lambda^2)\,\\ &\approx \frac{1}{2}(n_{1}\cdot n_{2})(\bar{n}_{1}\cdot p_1)(\bar{n}_2\cdot p_2) \end{aligned} \end{equation} Then the differential decay rates from the above contributions read: \begin{equation} \begin{aligned} \frac{d^2\Gamma(S_1\rightarrow\bar{L}^{i}h{d}^{j}_R)}{dm_{hd}^2\,dm_{Ld}^2}\,&=\,\frac{1}{512\pi^3}\,\frac{|\, \mathrm{C}_{S_1^{\ast}Ld\Phi}^{ij}\,| ^2}{\Lambda^2} \,\frac{m_{Ld}^2}{M_{S_1}^3}\,,\\ \frac{d^2\Gamma(S_1\rightarrow \bar{Q}^{i}h{\nu}^{j}_R)}{dm_{Qh}^2\,dm_{Q\nu}^2}\,&=\,\frac{1}{512\pi^3}\,\frac{|\, \mathrm{C}_{S_1Q\nu\Phi}^{ij}\,| ^2}{\Lambda^2} \,\frac{m_{Q\nu}^2}{M_{S_1}^3}\,. \end{aligned} \end{equation} From equation (\ref{ZandgammadecayS1}) we find the following differential decay rate for photon and $Z$ boson decays: \begin{equation} \begin{aligned} \frac{d^2\Gamma(S_1\rightarrow d_R^{i}\gamma\nu_R^{j})}{dm^2_{\gamma d}\,dm^2_{\gamma\nu}}\,&=\,\frac{\alpha_1}{64\pi^2}\,\frac{1}{\cos\theta_w}\,\frac{| \mathrm{C}_{S_1d_R B\nu_R}^{ij}| ^2\, |Y|^2}{\Lambda^2}\,\frac{m^2_{d\nu}}{M^3_{S_1}}\,\frac{(m_{d\gamma}^2)^2\,+\,(m^2_{\nu \gamma})^2}{(M^2_{S_1}-m^2_{d\nu})^2}\,,\\ \frac{d^2\Gamma(S_1\rightarrow d_R^{i}\nu_R^{j}Z)}{dm^2_{d\nu}\,dm^2_{Z\nu}}\,&=\frac{\alpha_1}{64\pi^2}\,\frac{1}{\sin\theta_w}\,\frac{| \mathrm{C}_{S_1d_R B\nu_R}^{ij}| ^2\, |Y|^2}{\Lambda^2}\,\frac{m^2_{d\nu}}{M^3_{S_1}}\,\frac{(m_{dZ}^2)^2\,+\,(m^2_{\nu Z})^2}{(M^2_{S_1}-m^2_{d\nu})^2}\,. \end{aligned} \label{3bodys12} \end{equation} In computing the squared matrix element we have summed over the two perp polarization vectors of the gauge bosons such that: \begin{equation} \sum^{2}_{i=1}\,\epsilon^{\mu}_{\perp}(p_3)\,\epsilon_{\perp}^{\star\nu}(p_3)\,=\,-g^{\mu\nu}_{\perp}, \end{equation} where \begin{equation} g^{\mu\nu}_{\perp}\,=\,g^{\mu\nu}\,-\,\frac{n_3^{\mu}\,\bar{n}_3^{\nu}}{2}\,-\,\frac{\bar{n}_3^{\mu}\,n_3^{\nu}}{2}\,. \end{equation} The gauge coupling and the $U(1)$ generator $Y$ dependence in the decay rates in the above equations follow from the Feynman rule for the physical $B_{\perp}^{\mu}$ derived from the corresponding SCET field $\mathcal{B}_{\perp}^{\mu}$ that is: $g^{\prime}Y\epsilon^{\star}_{\perp}(p)$. \begin{comment} \subsection{Tree level decay rates of the $S_1(3,1,-\frac{1}{3})$ } For a given operator basis it is straightforward to calculate the decay rates of the singlet LQ at tree level, with the final states transformed into the mass basis. In this case also the Wilson coefficients need to be transformed from the weak basis of the operators to the mass basis such that for a general operator of the form $C^{ij}\bar{\psi}^{i}\chi^{j}$ we will have: \begin{equation} C^{ij}\bar{\psi}^{i}\chi^{j}\rightarrow C^{ij}V^{\dagger}\bar{\psi}^{i}U\chi^{j}, \end{equation} where $V^{\dagger}\psi$ and $U\chi$ define the rotation of the these two collinear fields to their mass basis after EW symmetry breaking. We define the Wilson coefficients in the new basis as $\tilde{C}^{ij}$, such that: \begin{equation} \tilde{C}^{ij}=V^{\dagger}C^{ij}U \label{Wilsoncoef} \end{equation} For a two body decay everything is fixed by kinematics and we compute the total decay rate in the limit of massless final states. The contributions from the Lagrangian in (\ref{Lagrangian1}) then yields the following decay rates: \begin{equation} \begin{split} &\Gamma(S_1\rightarrow \bar{\ell_R}^{i}{u_R^c})=N_f\frac{M_{S_1}}{16\pi}| \tilde{C}^{ij}_{1R} |^2 \\ &\Gamma(S_1\rightarrow Q^{c,i}\bar{ L}^{j})=N_f\frac{M_{S_1}}{16\pi}|\tilde{C}^{ij}_{1L}|^2 \\ &\Gamma(S_1\rightarrow d^{c,i}_R \bar{\nu}^{j}_R )=N_f\frac{M_{S_1}}{16\pi}|\tilde{C}^{ij}_{1\nu}|^2 \\ \end{split} \end{equation} where the $N_f$ is the number of coloured fermions that can possibly appear in the final state in the respective decay process. Note that the above rates differ only by their Wilson coefficients. The order $\mathcal{O}(\lambda^3)$ operators in Eq.(\ref{Eq.2}) and Eq.(\ref{Eq.1}) also give a contribution to the two body decay rate from the two jet and mono jet signatures: \begin{equation} \begin{split} &\Gamma(S_1\rightarrow \bar{L}^{i}d^{j}_R)=N_f\frac{v^2}{2}\frac{M_{S_1}}{16\pi} \frac{ | {\tilde{C_1}}_{Ld}^{{(0)}ij} |^2}{M^2} \\ &\Gamma(S_1\rightarrow Q^{i}\bar{\nu}^{j}_R )=N_f\frac{v^2}{2}\frac{M_{S_1}}{16\pi}\frac{ |{\tilde{C_1}}_{Q\nu}^{(0)ij}|^2 }{M^2} \end{split} \end{equation} They are both suppressed by a factor of $ \frac{v^2}{M^2}$ compared to the two body decay rates from $\mathcal{O}(\lambda^2)$ operators. Lastly from the three jet Lagrangian in (\ref{3jetS1}) we compute the squared matrix element for a three body decay of $S_1(3,1,-\frac{1}{3})$ into two fermions and a Higgs boson: \begin{align} \mathcal{M}^2(S_1\rightarrow L^{i}h\bar{d}^{j}_R)&=N_f\frac{| {\tilde{C_1}}_{Ld}^{ij}| ^2}{4M^2}(n_1\cdot n_2)(\bar{n}_1\cdot p_1)(\bar{n}_2\cdot p_2)\sim N_f\frac{| {\tilde{C_1}}_{Ld}^{ij}| ^2}{M^2} (m_{Ld}^2)\\ \mathcal{M}^2(S_1\rightarrow Q^{i}h\bar{\nu}^{j}_R)&=N_f\frac{| {\tilde{C_1}}_{Q\nu}^{ij}| ^2}{4M^2}(n_1\cdot n_2)(\bar{n}_1\cdot p_1)(\bar{n}_2\cdot p_2)\sim N_f\frac{| {\tilde{C_1}}_{Q\nu}^{ij}| ^2}{M^2} (m_{Q\nu}^2) \end{align} where $m_{Ld}^2=(p_L+p_d)^2$ and $m_{Q\nu}^2=(p_Q+p_{\nu})^2$ and we are neglecting the masses of the other particles compared to the mass of the LQ. Then the differential decay rates from the above contributions read: \begin{align} \frac{d^2\Gamma(S_1\rightarrow L^{i}h\bar{d}^{j}_R)}{dm_{hd}^2dm_{Ld}^2}&=\frac{1}{512\pi^3}N_f\frac{| {\tilde{C_1}}_{Ld}^{ij}| ^2}{M^2} \frac{(m_{Ld}^2)}{M_{S_1}^3}\\ \frac{d^2\Gamma(S_1\rightarrow Q^{i}h\bar{\nu}^{j}_R)}{dm_{Qh}^2dm_{Q\nu}^2}&=\frac{1}{512\pi^3}N_f\frac{| {\tilde{C_1}}_{Q\nu}^{ij}| ^2}{M^2} \frac{(m_{Q\nu}^2)}{M_{S_1}^3} \end{align} Lastly from Eq.(\ref{ZandgammadecayS1}) we find the following decay rate of the $S_1(3,1,-\frac{1}{3})$ into $\gamma$ and $Z^0$: \begin{equation} \frac{d^2\Gamma(S_1\rightarrow d_R^{i}\gamma\nu_R^{j})}{dm^2_{\gamma d}dm^2_{B\nu}}=\frac{\alpha_1}{32\pi^2}\frac{N_f}{2\cos\theta_w}\frac{| {\tilde{C_1}}_{d\nu}^{ij}| ^2 |Y|^2}{M^2}\frac{(m^2_{d\nu})}{M^3_{S_1}}\frac{(m_{dZ}^2)^2+(m_{\nu Z})^2}{(M^2_{S_1}-m^2_{d\nu})^2} \label{3bodys11} \end{equation} \begin{equation} \frac{d^2\Gamma(S_1\rightarrow d_R^{i}\nu_R^{j}Z^{0})}{dm^2_{Zd}dm^2_{Z\nu}}=\frac{\alpha_1}{32\pi^2}\frac{N_f}{2\sin\theta_w}\frac{| {\tilde{C_1}}_{d\nu}^{ij}| ^2 |Y|^2}{M^2}\frac{(m^2_{d\nu})}{M^3_{S_1}}\frac{(m_{dZ}^2)^2+(m_{\nu Z})^2}{(M^2_{S_1}-m^2_{d\nu})^2} \label{3bodys12} \end{equation} In computing the squared matrix element we have summed over the two perp polarization vectors of the gauge bosons such that: \begin{equation} \sum^{2}_{i=1}\epsilon^{\mu}_{\perp}(p_3)\epsilon_{\perp}^{\star\nu}(p_3)=-g^{\mu\nu}_{\perp}, \end{equation} where \begin{equation} g^{\mu\nu}_{\perp}=g^{\mu\nu}-\frac{n_3^{\mu}\bar{n}_3^{\nu}}{2}-\frac{\bar{n}_3^{\mu}n_3^{\nu}}{2} \end{equation} The gauge coupling and the $U(1)$ generator $Y$ dependence in the decay rates in Eq.(\ref{3bodys11}) and Eq.(\ref{3bodys12}) follows from the Feynman rule for the physical $B_{\perp}^{\mu}$ derived from the corresponding SCET field $\mathcal{B}_{\perp}^{\mu}$ that is: $g^{\prime}Y\epsilon^{\star}_{\perp}(p)$. \color{black} \end{comment} \section{\boldmath{SCET formalism for the scalar leptoquark $S_3(3,3,-\frac{1}{3})$}} \label{sections3} There are several possible extensions to the SM that tempt to interpret the observed anomalies in $B$-physics systems. Most of these theoretical models that use scalar leptoquarks as a viable explanation contain both the singlet $S_1$ and another scalar leptoquark $S_3$ that transforms as a triplet under $SU(2)_L$ with hypercharge $-1/3$ \cite{Buttazzo:2017ixm,Crivellin:2017zlb}. Such models seem to give promising solution both to the $R_{(D^{\ast})}$ and to the neutral current process $b\rightarrow s\mu^{+}\mu^{-}$. It is therefore of interest to apply our framework to the triplet $S_3$ and find its tree level decay rates. We present in the following section the $S_3$ effective Lagrangian in SCET at $\mathcal{O}(\lambda^2)$ and $\mathcal{O}(\lambda^3)$ for two and three body final states. \subsection[Leading power two jet operators for the $S_3$]{\boldmath Leading power two jet operators for the $S_3$} \label{s3sec} We start by constructing the leading order Lagrangian which we refer to as $\mathcal{L}_{S_3}^{(\lambda^2)}$. Since $S_3$ is an $SU(2)$ triplet it should be understood as $S_3\equiv t^{a}S_3^{a}$, for $a=1,2,3$ and $t^{a}$ generators of the $SU(2)$. As a result gauge invariance constrains the operator basis a lot more in this case. Indeed we find only one operator that describes the decays of the $S_3$ into two energetic SM particles going into the collinear directions $n_1$ and $n_2$. It is a dimension four operator built out of a quark and a lepton doublet and the $S_3$ where the SM doublets couple to the $S_3$ triplet similarly to an $SU(2)$ gauge field: \begin{equation} \mathcal{L}_{S_3}^{(\lambda^2)}\,=\,C^{ij}_{S_3^{\ast}Q_L^cL_L}(\Lambda,M_{S_3},\mu)\,\mathcal{O}_{S_3^{\ast}Q_L^cL_L}^{ij}(\mu)\,+\,\text{h.c}., \label{Ltripletleading} \end{equation} with \begin{equation} \mathcal{O}_{S_3^{\ast}Q_L^cL_L}^{ij}=\,{\bar{Q^i}}^{c,a}_{L,n_1} \, \epsilon^{ab} \,S_{3v}^{\ast bd}\,{L^{j,d}}_{L,n_2}\,+\,(n_1\leftrightarrow n_2)\,. \label{LOopS3} \end{equation} where ${i,j}$ are flavor indices and the ${a,b,c}$ are $SU(2)$ indices. The heavy particle $S_3$ is treated within the HSEFT as described before where $S_{3v}(x)$ contains only the soft momentum fluctuations. The Wilson coefficients are defined in the same way as in equation (\ref{Wilsoncoeff}) and they are dimensionless. We notice that similarly to the $\mathcal{L}_{S_1}^{(\lambda^2)}$ the Lagrangian in (\ref{Ltripletleading}) violates fermion number conservation. The leading order two body decay rates of the leptoquark $S_3$ are governed by the matrix elements of the Lagrangian in (\ref{Ltripletleading}), which allows for a decay into a left handed quark and a left handed lepton. The total two body decay rate at $\mathcal{O}(\lambda^2)$ for $S_3$ evaluates to: \begin{equation} \begin{split} &\Gamma(S_3\rightarrow Q^{c,i}_{L}\bar{ L}_{L}^{j})\,=\,\frac{M_{S_3}}{32\pi}\,|\mathrm{C}_{S_3^{\ast}Q_L^{c}L_L}^{ij}|^2 \,.\\ \end{split} \end{equation} \subsection[Subleading power two jet operators for the $S_3$]{\boldmath Subleading power two jet operators for the $S_3$} At $\mathcal{O}(\lambda^3)$ the symmetries allow for a larger number of operators both for two and three collinear directions. For two jet final states we find six operators of mixed chirality and two operators of the same chirality fields such that: \begin{equation} \begin{aligned} \mathcal{L}_{S_3}^{(\lambda^3)}\bigg\rvert_{\text{2 jet}}&\,=\,\frac{1}{\Lambda}\,\Biggl[\,C_{S_3Q_L\Phi\nu_R}^{(0)^{ij}} (\Lambda,M_{S_3},\mu)\,\mathcal{O}_{S_3Q_L\Phi\nu_R}^{(0)^{ij}}\,\\ &\,\hspace{1.7cm}+\,C_{S_3d_R\Phi L_L}^{(0)^{ij}}(\Lambda,M_{S_3},\mu)\,\mathcal{O}_{S_3d_R\Phi L_L}^{(0)^{ij}}(\mu)\,\Biggr]\,\\ &+\,\frac{1}{M}\,\Biggl[\,\sum_{k=1,2}\int_{0}^{1}\,du\,\Biggl( \,C_{S_3Q_L\Phi\nu_R}^{(k)^{ij}}(\Lambda,M_{S_3},\mu,u)\, \mathcal{O}_{S_3Q_L\Phi\nu_R}^{(k)^{ij}}(\mu,u) \,\\ &\hspace{1.9cm}+\,C_{S_3d_R\Phi L_L}^{(k)^{ij}}(\Lambda,M_{S_3},\mu,u)\, \mathcal{O}_{S_3d_R\Phi L_L}^{(k)^{ij}}(\mu,u)\,\\ &\hspace{1.9cm}+\,C_{S_3d_R W\nu_R}^{(k)^{ij}} (\Lambda,M_{S_3},\mu,u)\,\mathcal{O}_{S_3d_R W\nu_R}^{(k)^{ij}}(\mu,u)\, \Biggr)\,+\,\text{h.c}.\,\Biggl]\,. \end{aligned} \label{S32jetsub} \end{equation} We name the operators and the Wilson coefficients based on their field content, where $W$ here is the $SU(2)$ gauge boson. The operators in the first line in (\ref{S32jetsub}) contain the zero momentum field $\Phi^{(0)}(x)$ and $\tilde{\Phi}^{(0)}(x)$ with the following explicit structure: \begin{equation} \begin{aligned} &{\mathcal{O}_3^{(0)}}^{ij}_{Q_L\Phi\nu_R} \,=\,\bar{Q}^{i}_{L,n_1}\,S_{3v} \,\Phi^{(0)}\,\nu^{j}_{R,n_2}\, +\,(n_1\leftrightarrow n_2)\,,\\ &{\mathcal{O}_3^{(0)}}_{d_R\Phi L_L}^{ij}\,=\,\bar{d}^{i}_{R,n_1}\,\tilde{\Phi}^{(0)}\,S_{3v}\,L^{j}_{L,n_2} \,+\,(n_1\leftrightarrow n_2)\,, \label{2bodyS3sub2} \end{aligned} \end{equation} which are mono-jet detector signatures. The rest of the mixed chirality operators read as follows: \begin{equation} \begin{aligned} \mathcal{O}_{S_3Q_L\Phi\nu_R}^{(1)^{ij}} (u)\,&=\,\bar{Q}^{i}_{L,n_1}\,S_{3v}\, \Phi_{n_1}^{(u)}\,\nu^{j}_{R,n_2}\,+\,(n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_3Q_L\Phi\nu_R}^{(2)^{ij}} (u)\,&=\,\bar{Q}^{i}_{L,n_1}\,S_{3v}\, \Phi_{n_2}^{(u)}\,\nu^{j}_{R,n_2}\,+\,(n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_3d_R\Phi L_L}^{(1)^{ij}}(u)\,&=\,\bar{d}^{i}_{R,n_1}\,\tilde{\Phi}^{(u)}_{n_1}\,S_{3v}\,L^{j}_{L,n_2}\, +\,(n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_3d_R\Phi L_L}^{(2)^{ij}}(u)\,&=\,\bar{d}^{i}_{R,n_1}\,\tilde{\Phi}^{(u)}_{n_2}\,S_{3v}\,L^{j}_{L,n_2}\, +\,(n_1\leftrightarrow n_2)\,, \end{aligned} \end{equation} The triplet $S_3$ very similarly to $S_1$ will decay into right handed neutrinos, left handed quarks and SM Higgs and into left handed leptons together with right handed down-type quarks. Lastly the same chirality operators in equation (\ref{S32jetsub}) contain the perp component of a $SU(2)_L$ gauge boson, a down type quark and the right handed neutrino: \begin{equation} \begin{aligned} \mathcal{O}_{S_3d_R W \nu_R}^{(1)^{ij}}(u)\,&=\,\bar{d}^{i}_{R,n_1}\,S_{3v}\,\slashed{\mathcal{W}}^{\perp,(u)}_{n_1}\,\nu^{j}_{R,n_2}\, +\,(n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_3d_R W \nu_R}^{(2)^{ij}}(u)\,&=\,\bar{d}^{i}_{R,n_1}\,S_{3v}\,\slashed{\mathcal{W}}^{\perp,(u)}_{n_2}\,\nu^{j}_{R,n_2}\, +\,(n_1\leftrightarrow n_2)\,, \end{aligned} \end{equation} where the perp component of the gauge invariant building block $\mathcal{W}^{\mu}$ is defined: \begin{equation} \mathcal{W}_{n}^{\perp,\mu}\,=\,\mathcal{W}_{n}^{\mu}\,-n\cdot\mathcal{W}\,\frac{\bar{n}^\mu}{2}\,. \label{Wvecperp} \end{equation} Subleading operators with the zero momentum field $\Phi^{(0)}$ in (\ref{2bodyS3sub2}) will give a contribution to the two body decay rates such that: \begin{equation} \begin{aligned} \Gamma(S_3\rightarrow \bar{Q}_{L}^{i}{\nu}^{j}_R )\,&=\,\frac{v^2}{2}\,\frac{M_{S_3}}{16\pi}\,\frac{ |\mathrm{C}_{S_3Q_L\Phi\nu_R}^{(0)^{ij}}|^2 }{\Lambda^2}\,,\\ \Gamma(S_3\rightarrow \bar{d}^{i}_RL_{L}^{j} )\,&=\,\frac{v^2}{2}\frac{M_{S_3}}{16\pi}\,\frac{ |\mathrm{C}_{S_3d_R\Phi L_L}^{(0)^{ij}}|^2 }{\Lambda^2}\,. \end{aligned} \end{equation} \subsection[Leading power three body decays for $S_3$]{\boldmath Leading power three body decays for $S_3$} We can also explore the leading power three jet final states for this leptoquark where the Lagrangian in this case is: \begin{equation} \begin{split} \mathcal{L}_{S_3}^{(\lambda^3)}\bigg\rvert_{\text{3 jet}}\,=\,\frac{1}{\Lambda}\,\Biggl [&\,C_{S_3d_R\Phi L_L}^{ij}(\Lambda,M_{S_3},\lbrace m^2_{k\ell}\rbrace,\mu)\,\mathcal{O}_{S_3d_R\Phi L_L}^{ij} (\mu) \,\\ &\,\,+\,C_{S_3Q_L\Phi\nu_R}^{ij}(\Lambda,M_{S_3},\lbrace m^2_{k\ell}\rbrace,\mu)\,\mathcal{O}_{S_3Q_L\Phi\nu_R}^{ij}(\mu)\, \\ & \,\,+\,C_{S_3d_RW \nu_R}^{ij}(\Lambda,M_{S_1},\lbrace m^2_{k\ell}\rbrace,\mu)\,\mathcal{O}_{S_3d_R W\nu_R}^{ij} (\mu)\,+\,\text{h.c.} \,\Biggr]\,. \end{split} \label{3jetS3} \end{equation} where the operators read: \begin{equation} \begin{aligned} \mathcal{O}_{S_3d_R\Phi L_L}^{ij}&\,=\,\bar{d}^{i}_{R,n_1}\,\tilde{\Phi}_{n_3}\,S_{3v}\,L^{j}_{L,n_2}\,\, +\,\,(n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_3Q_L\Phi \nu_R}^{ij}&\,=\,\bar{Q}^{i}_{L,n_1}\, \Phi_{n_3}\,S_{3v}\,\nu^{j}_{R,n_2}\, +\,(n_1\leftrightarrow n_2)\,,\\ \mathcal{O}_{S_3d_R W \nu_R}^{ij}&\,=\,\bar{d}^{i}_{R,n_1}\,\slashed{\mathcal{W}}^{\perp}_{n_3}\,S_{3v}\,\nu^{j}_{R,n_2}\,+\,(n_1\leftrightarrow n_2)\,. \label{3jetS3} \end{aligned} \end{equation} The field content of the above operators is the same as for the two jet decays, though experimentally they would have very different angular distributions. Three body decay rates for left-right chirality operators here are: \begin{equation} \begin{aligned} \frac{d^2\,\Gamma(\,S_3\rightarrow \bar{d}^{i}_R\,L^{j}_{L}\,h\,)}{dm_{hd}^2\,dm_{Ld}^2}&\,=\,\frac{1}{512\pi^3}\,\frac{| \mathrm{C}_{S_3L_L\Phi d_R}^{ij}| ^2}{\Lambda^2} \,\frac{m_{Ld}^2}{M_{S_3}^3}\,,\\ \frac{d^2\,\Gamma(\,S_3\rightarrow \bar{Q}^{i}_{L}\,h\,{\nu}^{j}_R\,)}{dm_{Qh}^2\,dm_{Q\nu}^2}&\,=\,\frac{1}{512\pi^3}\,\frac{| \mathrm{C}_{S_3Q_L\Phi \nu_R}^{ij}| ^2}{\Lambda^2} \,\frac{m_{Q\nu}^2}{M_{S_3}^3}\,, \end{aligned} \end{equation} where $m^2_{ij}$ is the invariant mass of the particle pair $(i,j)$ and for simplicity in notation we keep the field chirality labels implicit in the $m^2_{ij}$. The same chirality operator with the right handed neutrino in the final state yield a less trivial result: \begin{equation} \frac{d^2\Gamma(S_3\rightarrow \bar{d}_R^{i}\nu_R^{j}W)}{dm^2_{Wd}\,dm^2_{W\nu}}\,=\,\frac{\alpha_2}{32\pi^2}\,\frac{| \mathrm{C}_{S_3d\nu W}^{ij}| ^2\, |t^a|^2}{\Lambda^2}\,\frac{m^2_{d\nu}}{M^3_{S_1}}\,\frac{(m_{dW}^2)^2+(m^2_{\nu W})^2}{(M^2_{S_1}-m^2_{d\nu})^2}\,, \label{3bodys12} \end{equation} where $\mid t^a\mid^2$ is a color factor coming from the definition of the collinear field $\mathcal{W}$ in SCET defined in (\ref{scetgaugeboson}). \begin{comment} \subsection{Decay rates for the $S_3(3,3-\frac{1}{3})$} The leading order two body decay rate of the triplet LQ is governed by the matrix elements of the Lagrangian in (\ref{Ltripletleading}), which allows for a decay of the scalar $S_3(3,3,-\frac{1}{3})$ into a left handed chiral quark and a left handed chiral lepton. The final states are set to the mass basis and accordingly the Wilson coefficients as defined in Eq.(\ref{Wilsoncoef}). Then we have the two body decay rate from the LO operator in Eq.(\ref{LOopS3}): \begin{equation} \begin{split} &\Gamma(S_3\rightarrow Q^c\bar{ L})=N_f\frac{M_{S_3}}{32\pi}|{\tilde{C_{3}}_L}^{ij}|^2 \\ \end{split} \end{equation} and the two body decay rate from the $\mathcal{O}(\lambda^3)$ operator in Eq.(\ref{2bodyS3sub1}) and Eq.(\ref{2bodyS3sub2}): \begin{equation} \Gamma(S_3\rightarrow \bar{Q}^{i}{\nu}^{j}_R )=N_f\frac{v^2}{2}\frac{M_{S_3}}{16\pi}\frac{ |{\tilde{C_3}^{(0)ij}_{Q\nu}}|^2 }{M^2} \end{equation} \begin{equation} \Gamma(S_3\rightarrow \bar{d}^{i}_RL^{j} )=N_f\frac{v^2}{2}\frac{M_{S_3}}{16\pi}\frac{ |{\tilde{C_3}^{(0)ij}_{dL}}|^2 }{M^2} \end{equation} Then the rates for a three body decay of the mixed chirality operators from the Lagrangian in Eq.(\ref{3jetS3}) are: \begin{align} \frac{d^2\Gamma(S_1\rightarrow \bar{d}^{i}_RL^{j}h)}{dm_{hd}^2dm_{Ld}^2}&=\frac{1}{512\pi^3}N_f\frac{| {\tilde{C_1}}_{Ld}^{ij}| ^2}{M^2} \frac{(m_{Ld}^2)}{M_{S_3}^3}\\ \frac{d^2\Gamma(S_1\rightarrow \bar{Q}^{i}h{\nu}^{j}_R)}{dm_{Qh}^2dm_{Q\nu}^2}&=\frac{1}{512\pi^3}N_f\frac{| {\tilde{C_1}}_{Q\nu}^{ij}| ^2}{M^2} \frac{(m_{Q\nu}^2)}{M_{S_3}^3} \end{align} The decay rate for the same chirality operators give a less trivial result which reads: \begin{equation} \frac{d^2\Gamma(S_3\rightarrow d_R^{i}\nu_R^{j}W)}{dm^2_{Wd}dm^2_{W\nu}}=\frac{\alpha_1}{32\pi^2}N_f\frac{| {\tilde{C_1}}_{d\nu}^{ij}| ^2 |Y|^2}{M^2}\frac{(m^2_{d\nu})}{M^3_{S_1}}\frac{(m_{dW}^2)^2+(m_{\nu W})^2}{(M^2_{S_1}-m^2_{d\nu})^2} \label{3bodys12} \end{equation} \end{comment} \section{\boldmath{SCET formalism for the vector leptoquark $U_1^{\mu}(3,1,\frac{2}{3})$}} \label{sectionU1} The vector $U_1^{\mu}$ is another interesting example from the family of leptoquarks that has been introduced as a solution to the departures from SM in the flavour sector \cite{Barbieri:2015yvd,Buttazzo:2017ixm}. It is a color triplet, $SU(2)$ singlet and has hypercharge $2/3$. In the following section we analyse its decays in leading and subleading order in power counting. All interactions of the field $U_1^{\mu}$ here are described in the soft limit by the HVEFT shown in Section \ref{HSEFT}. \subsection[Leading power two jet operators for the $U_1^{\mu}$] {\boldmath Leading power two jet operators for the $U_1^{\mu}$} At leading order in SCET the Lagrangian for the leptoquark $U_1^{\mu}$ is: \begin{equation} \begin{aligned} \mathcal{L}_{U_1}^{(\lambda^2)}\,&=\,C^{ij}_{U_1Q_LL_L}(\Lambda,M_{U_1},\mu)\,\mathcal{O}^{ij}_{U_1Q_LL_L}(\mu)\,+\,C^{ij}_{U_1d_R\ell_R}(\Lambda,M_{U_1},\mu)\,\mathcal{O}^{ij}_{U_1d_R\ell_R}(\mu)\,\\ &+\,C^{ij}_{U_1u_R\nu_R}(\Lambda,M_{U_1},\mu)\,\mathcal{O}^{ij}_{U_1u_R\nu_R}(\mu)\,+\, \text{h.c.} \end{aligned} \label{LagrangianU1} \end{equation} with the following dimension four operators: \begin{equation} \begin{aligned} \mathcal{O}_{U_1Q_LL_L}^{ij}&\,=\,\bar{Q}^{i}_{L,n_1}\,\slashed{U}_{1v\perp}\,L^{j}_{L,n_2}+\,(n_1\leftrightarrow n_2)\,, \\ \mathcal{O}^{ij}_{U_1d_R\ell_R}&\,=\,\bar{d}^{i}_{R,n_1}\,\slashed{U}_{1v\perp}\,\ell^{j}_{R,n_2}\,+\, (n_1 \leftrightarrow n_2)\,,\\ \mathcal{O}^{ij}_{U_1u_R\nu_R}&\,=\,\bar{u}^{i}_{R,n_1}\,\slashed{U}_{1v\perp}\,\nu^{j}_{R,n_2} +\,(n_1\leftrightarrow n_2)\,. \end{aligned} \label{leadingU1operators} \end{equation} where $\slashed{U}_{1v\perp}=\gamma_{\perp}^{\mu}\cdot {U}_{1v\perp\mu}$ with $\gamma_{\perp}^{\mu}$ defined in (\ref{gamma}). Then at leading order the vector leptoquark will decay into two fermions of the same chirality with the following unpolarized decay rates: \begin{equation} \begin{aligned} \Gamma(U_1\rightarrow \bar{Q}^{i}_L L^{j}_L)&=\,\frac{M_{U_1}}{24\pi}\mid \mathrm{C}_{U_1Q_LL_L}^{ij}\mid ^2\,, \\ \Gamma(U_1\rightarrow \bar{d}^{i}_R \ell^{j}_R)&=\,\frac{M_{U_1}}{24\pi}\mid \mathrm{C}^{ij}_{U_1d_R\ell_R}\mid ^2\,, \\ \Gamma(U_1\rightarrow\bar{u}_R^{i} \nu^{j}_R)&=\,\frac{M_{U_1}}{24\pi}\mid \mathrm{C}^{ij}_{U_1u_R\nu_R}\mid ^2 \,.\\ \end{aligned} \end{equation} where the Wilson coefficients above are in the mass basis. \subsection[Subleading power two jet operators for the $U_1^{\mu}$]{\boldmath Subleading power two jet operators for the $U_1^{\mu}$} At $\mathcal{O}(\lambda^3)$ we find there are two operator constructions allowed by gauge invariance and space-time symmetries.They differ by the SM scalar doublet $\Phi$ and the zero momentum field $\Phi^{(0)}$ defined in (\ref{phizero}). It is also useful to define the reparametrization invariant quantity $\Pi^{\mu}$ such that: \begin{equation} \Pi^{\mu}=\frac{(v\cdot \bar{n})n^{\mu}-(v\cdot n)\bar{n}^{\mu}}{2}\,, \end{equation} where $\Pi^{\mu}\rightarrow -\Pi^{\mu}$ under hermitian conjugate and it is odd for $n\leftrightarrow \bar{n}$ \cite{Heiles:2020plj}. Then the Lagrangian describing the decays of the vector leptoquark at subleading power is: \begin{equation} \begin{split} \mathcal{L}_{U_1}^{(\lambda^3)}\bigg\rvert_{\text{2 jet}}&=\frac{1}{\Lambda}\, C_{U_1Q_L\ell_R}^{(0)\,ij}\,(\Lambda,M_{U_1},\mu)\,{\mathcal{O}^{(0)\,ij}_{U_1Q_L\ell_R}}\,(\mu)\,\\ &\,+\frac{1}{\Lambda} \sum_{k=1,2}\int_{0}^{1}du \,{C_{U_1Q_L\ell_R}^{(k)\,ij}}\,(\Lambda,M_{U_1},\mu,u) \,{\mathcal{O}_{U_1Q_L\ell_R}^{(k)\,ij}}\,(\mu,u)\,+\, \text{h.c.}\,, \end{split} \label{LagrangianU1sub} \end{equation} with: \begin{equation} \begin{aligned} {\mathcal{O}^{(0)ij}_{U_1Q_L\ell_R}}(\mu)&=\bar{Q}^{i}_{L,n_1}\Phi^{(0)}\,\Pi\cdot U_{1v}\,\ell^{j}_{R,n_2} \,-(n_1\leftrightarrow n_2)\,,\\ {\mathcal{O}_{U_1Q_L\ell_R}^{(1)ij}}(u)&=\bar{Q}^{i}_{L,n_1}\Phi^{(u)}_{n_1}\,\Pi\cdot U_{1v}\,\ell^{j}_{R,n_2} \,-(n_1\leftrightarrow n_2)\,,\\ {\mathcal{O}_{U_1Q_L\ell_R}^{(2)ij}}(u)&=\bar{Q}^{i}_{L,n_1}\Phi^{(u)}_{n_2}\,\Pi\cdot U_{1v}\,\ell^{j}_{R,n_2} \,-(n_1\leftrightarrow n_2)\,.\\ \end{aligned} \end{equation} We divide by the large scale $\Lambda$ so that the Wilson coefficients remain dimensionless. The sum in the second line in (\ref{LagrangianU1sub}) accounts for both cases when the field $\Phi^{(u)}$ with momentum fraction $u\mathcal{P}_i$ is emitted in the $n_1$ or $n_2$ direction. The vector $v$ is the reference vector $v^{\mu}=(1,0,0,0)$ and $v\cdot U_{1v}=0$. It is then straightforward to calculate the remaining power suppressed two body decay rate: \begin{equation} \Gamma(U_1\rightarrow \bar{Q}^{i}_L\ell^{j}_R)=\frac{v^2}{12\pi}\frac{M_{U_1}}{\Lambda^2}\mid {\mathrm{C}^{(0)ij}_{U_1Q_L\ell_R}} \mid ^2 \end{equation} \subsection[Leading power three body decays for $U_1^{\mu}$]{\boldmath Leading power three body decays for $U_1^{\mu}$} It is not difficult to find the operator basis for $U_{1}^{\mu}$ for three jet final states. The symmetries in this case allow for only one operator and the Lagrangian reads: \begin{equation} \begin{split} \mathcal{L}_{U_1}^{(\lambda^3)}\bigg\rvert_{\text{3 jet}}&=\,\frac{1}{\Lambda}\, C^{ij}_{U_1Q_L\ell_R}(\Lambda,M_{U_1},\lbrace m^2_{k\ell}\rbrace,\mu)\,\mathcal{O}^{ij}_{U_1Q_L\ell_R}(\mu)+ \text{h.c.}\,, \end{split} \label{3jetU1} \end{equation} where $m^2_{k,\ell}$ is the invariant mass of the particle pair $(k,\ell)$ and the operator is: \begin{equation} \mathcal{O}_{U_1Q_L\ell_R}^{ij}=\bar{Q}^{i}_{L,n_1}\,\Phi_{n_3}\,\Pi\cdot U_{1v}^{\mu}\,\ell^{j}_{R,n_2} -(n_1\leftrightarrow n_2)\,. \label{U13bodyL} \end{equation} In comparison to the scalars $S_{1}$ and $S_{3}$ there are no right handed neutrino interactions at subleading order for $U_1^{\mu}$. The differential decay rate for the three-body decays of $U_1^{\mu}$ can be derived from (\ref{3jetU1}): \begin{equation} \frac{d^2\Gamma}{dm^2_{Q\ell}\,dm^2_{h\ell}}=\frac{1}{1536\pi^3}\frac{\mid \mathrm{C}^{ij}_{U_1Q_L\ell_R} \mid ^2}{M_{U_1}\,\Lambda^2}\frac{(m^2_{Q\ell})^2}{(M^2_{U_1}-m^2_{h\ell})(M^2_{U_1}-m^2_{Qh})} \end{equation} In deriving the above expression we have averaged over the polarizations of the massive vector and use the following relation for massless particles collinear in $n_1$ and $n_2$ directions: \begin{equation} n_1\cdot n_2=\frac{k_1\cdot k_2}{E_1E_2}\,, \end{equation} where the energies of the two fermions in the rest frame of the leptoquark are respectively $E_1=\frac{M^2_{U_1}-m^2_{h\ell}}{2M_{U_1}}$ and $E_2=\frac{M^2_{U_1}-m^2_{Qh}}{2M_{U_1}}$. \section{Running of the Wilson coefficients } \label{secwilsoncoeff} In the process of scale separation in an EFT, Wilson coefficients are the functions that capture the hard scale dependence. They correspond to loop diagrams with vertices from the full theory that have been integrated out. Inherently they depend on logarithms of ratios of the hard scale $\Lambda$ and the factorization scale $\mu$. Thus a reliable result on the decay rates of heavy particles considered within an EFT framework requires resummation of these large logarithms in the Wilson coefficients. \par In SCET it is possible to achieve this using renormalization group techniques. Wilson coefficients obey well defined renormalization group (RG) equations which are derived from renormalization of their corresponding operators. At one loop the type of diagrams that contribute to the renormalization of the $\mathcal{O}(\lambda^2)$ operators are shown in Fig.(\ref{vertexdiag}). In the most general case the RG equation is a matrix equation such that: \begin{equation} \begin{aligned} \frac{d\,\boldsymbol{C}(\mu)}{d\ln\mu}&=\boldsymbol{\Gamma}(\mu)\otimes\,\boldsymbol{C}(\mu)\,,\\ \end{aligned} \label{RGEs} \end{equation} where $\boldsymbol{\Gamma}$ is the anomalous dimension matrix in generation space and $\boldsymbol{C}$ is the matrix of Wilson coefficients in generation space. The symbol $\otimes$ takes into account that the ordering of $\boldsymbol{\Gamma}$ and $\boldsymbol{C}$ matters. The solution of (\ref{RGEs}) can then be formally written as: \begin{equation} \boldsymbol{C}(\mu)\,=\,\boldsymbol{C}(\Lambda)\exp\Bigg[\int^{\mu}_{\Lambda}{d\ln\mu}\,\boldsymbol{\Gamma}(\mu) \Bigg] \label{CRGE} \end{equation} and it systematically resums the large logarithms of type $\ln(\Lambda^2/\mu^2)$. \par The anomalous dimension $\boldsymbol{\Gamma}$ of a SCET operator with three external lines, one heavy particle and two collinear particles in $n_1$ and $n_2$ directions depends on the cusps anomalous dimensions $\gamma^{(r)}_{\text{cusp}}(\alpha_r)$, on the single-particle anomalous dimensions $\boldsymbol{\gamma}^{i}$ and on the leptoquark anomalous dimension $\gamma^{LQ}$ such that \cite{Becher:2009kw,Alte:2018nbn,Becher:2009cu}: \begin{equation} \begin{split} \boldsymbol{\Gamma}\left( \lbrace p,p_1,p_2 \rbrace ,M,\mu\right) &=\frac{1}{2}\sum_{r}\left(C^{(r)}-C_{1}^{(r)}-C_{2}^{(r)}\right)\,\gamma^{(r)}_{\text{cusp}}(\alpha_r)\, \left(\ln\frac{\mu^2}{M^2}+i\pi\right)\,\\ & - \sum_{r}C^{(r)}\,\gamma^{(r)}_{\text{cusp}}(\alpha_r)\,\ln\frac{ \mu}{M}+ \sum_{i=1,2}\boldsymbol{\gamma}^{i}+\gamma^{LQ} \label{anomalousdim} \end{split} \end{equation} where $M$ is the mass of the leptoquark and $C^{(r)}\,,C^{(r)}_{1}$, $C_{2}^{(r)}$ are the Casimir operators of the leptoquark, the $n_1$ and $n_2$ collinear particles respectively for the gauge group $(r)$ where these particles transform. For a non-Abelian group $SU(N)$ the Casimir operator is $C_i=(N^2-1)/2N$ for the fundamental representation and $C_i=N$ for the adjoint representation. For the Abelian group $U(1)_{Y}$ we have $C_i=Y^2_i$, where $Y_i$ is the hypercharge of the particle. The $\gamma^{(r)}_{\text{cusp}}(\alpha_r)$ are functions of the coupling constant arising from light like Wilson loops \cite{Korchemskaya:1992je,Korchemskaya:1994qp}. Up to NNLO they depend only on $\alpha_r$ for each symmetry group $G^{(r)}$ \cite{Moch:2004pa, Korchemsky:1987wg,Jantzen:2005az}. For SM particles, $\boldsymbol{\gamma^{i}}$ are matrices in generation space that contain SM Yukawa matrices. They multiply the corresponding Wilson coefficient either from the left or from the right as described below. Here we present the resummation of the Wilson coefficients in the mass basis for the leading power two jet operators for the leptoquarks $S_1$, $S_3$ and $U_{1}^{\mu}$. We show in the appendix (\ref{anomalousdim3jet}) the anomalous dimensions for the three jet operators at $\mathcal{O}(\lambda^3)$ and a very similar procedure can be straightforwardly extended for the Wilson coefficients of these operators. We work at leading order in RG improved perturbation theory (PT), which is equivalent to resumming the large logarithms at next-to-leading logarithmic (NLL) order. This requires the two loop expressions for $\gamma^{(r)}_{\text{cusp}}(\alpha_r)$, one loop expression for $\boldsymbol{\gamma}^{i}$ and one loop $\gamma^{LQ}$. This estimate would give a prediction of the running effects to the tree level matching coefficients for various decay rates. \begin{figure} \center \includegraphics[scale=0.5]{vertexdiag} \caption{Soft and collinear gluon emissions for the one-loop renormalization of operators at $\mathcal{O}(\lambda^2)$. The double line indicates a heavy leptoquark. The first diagram corresponds to soft gauge boson emissions, the second diagram describes final state interactions, the last diagram accounts for the type of diagrams where gauge bosons are emitted from the collinear Wilson lines. In the first two diagrams gauge bosons have soft momentum scaling, in the third diagram they have collinear scaling.} \label{vertexdiag} \end{figure} In the appendix (\ref{cusps}) we collect the explicit expressions for the cusp anomalous dimensions and the beta functions at two loop. \subsection[Resummation effects on the singlet $S_1$]{\boldmath Resummation effects on the singlet $S_1$} \label{resumationS1} From the formula (\ref{anomalousdim}) we derive the evolution of the Wilson coefficients of the $\mathcal{O}(\lambda^2)$ operators for $S_1$ shown in (\ref{S1operators}}). They are governed by the following anomalous dimensions in generation space: \begin{equation} \begin{split} {\boldsymbol{\Gamma}_{S_1^{\ast}u_R^{c}\ell_R}} &=\left(-\frac{4}{3} \gamma^{(3)}_{\text{cusp}}- \frac{1}{9}\gamma^{(1)}_{\text{cusp}}\right) \ln\frac{\mu}{M_{S_1}} - \frac{2}{3} \gamma^{(1)}_{\text{cusp}}\left(\ln\frac{\mu^2}{M^2_{S_1}}+i\pi\right) +\gamma^ {S_1}\,\\ &+\left( \boldsymbol{\gamma}^{\ell_R},\,.\,\right) + \left(.\,,\boldsymbol{\gamma}^{u_R}\right) \,,\\ {\boldsymbol{\Gamma}}_{S_1^{\ast}Q^c_LL_L} &=\left(-\frac{4}{3} \gamma^{(3)}_{\text{cusp}}-\frac{1}{9}\gamma^{(1)}_{\text{cusp}}\right) \ln\frac{\mu}{M_{S_1}}-\left( \frac{3}{4}\gamma^{(2)}_{\text{cusp}}+\frac{1}{12}\gamma^{(1)}_{\text{cusp}} \right)\left( \ln\frac{\mu^2}{M^2_{S_1}}+i\pi \right)\,\\ &+\gamma^ {S_1}+\left(\boldsymbol{\gamma}^{L_L},\,.\right) + \left(.\,,\boldsymbol{\gamma}^{Q_L}\right)\,,\\ {\boldsymbol{\Gamma}}_{S_1d_R\nu_R} &=\left(-\frac{4}{3} \gamma^{(3)}_{\text{cusp}}-\frac{1}{9}\gamma^{(1)}_{\text{cusp}}\right) \ln\frac{\mu}{M_{S_1}}+\gamma^ {S_1}+\left(.\,,\boldsymbol{\gamma}^{d_R}\right)\,, \end{split} \label{GammaS1LP} \end{equation} where we use the notations $\left( .\,,\boldsymbol{\gamma}\right)$ and $\left( \boldsymbol{\gamma}\,,.\right)$ for the single particle anomalous dimensions to indicate a multiplication with the Wilson coefficient from the left and from the right respectively, such that: \begin{equation} \begin{aligned} \left( .\,,\boldsymbol{\gamma}\right)\, \boldsymbol{C}\,&\equiv\, \boldsymbol{C}\, \boldsymbol{\gamma}\,\\ \left(\boldsymbol{\gamma}\,,.\right)\,\boldsymbol{C}\,&\equiv\,\boldsymbol{\gamma}\, \boldsymbol{C} \end{aligned} \end{equation} If $\boldsymbol{\gamma}$ is the anomalous dimension of an antiparticle the multiplication with the Wilson coefficient is from the left and for a particle it becomes a multiplication from the right. The various single field anomalous dimensions in (\ref{GammaS1LP}) are \cite{Alte:2018nbn}: \begin{equation} \begin{split} \boldsymbol{\gamma}^{\ell_R}&=-\frac{\alpha_1}{4\pi}+\frac{1}{16 \pi^2}{\textbf{Y}}_{\ell}^{\dagger}\textbf{Y}_{\ell} \, ,\\ \boldsymbol{\gamma}^{L_L}&=-\frac{9\alpha_2}{16\pi}-\frac{\alpha_1}{16\pi}+\frac{1}{32\pi^2}{\textbf{Y}}_{\ell}\textbf{Y}_{\ell}^{\dagger} \,\\ \boldsymbol{\gamma}^{u_R}&=-\frac{\alpha_3}{\pi}-\frac{\alpha_1}{9\pi}+\frac{1}{16\pi^2}\textbf{Y}_{u}^{\dagger}\textbf{Y}_{u}\,,\\ \boldsymbol{\gamma}^{d_R}&=-\frac{\alpha_3}{\pi}-\frac{\alpha_1}{36\pi}+\frac{1}{16\pi^2}\textbf{Y}_{u}^{\dagger}\textbf{Y}_{u}\,,\\ \boldsymbol{\gamma}^{Q_L}&=-\frac{\alpha_3}{\pi}-\frac{9\alpha_2}{16\pi}-\frac{\alpha_1}{144\pi}+\frac{1}{32\pi^2}\left( \textbf{Y}_u\textbf{Y}_u^{\dagger}+\textbf{Y}_{d}\textbf{Y}_{d}^{\dagger}\right)\,,\\ \gamma^{S_1}&=-\frac{2\,\alpha_3}{3\pi}-\frac{\alpha_1}{18\pi}\,, \end{split} \label{fieldanomdim} \end{equation} where $\textbf{Y}_{\ell}$ is the Yukawa matrix for the lepton $\ell$, $\textbf{Y}_{u}$ and $\textbf{Y}_{d}$ are the Yukawa matrices for up and down - type quarks. In practice we transform the Wilson coefficient in mass basis since this is the relevant basis for physical quantities such as decay rates. In the mass basis the Yukawa matrices in (\ref{fieldanomdim}) become diagonal expect for the case of $\boldsymbol{\gamma}^{Q_L}$. In this case one needs to distinguish between the up-type quark and the down-type quark in the doublet \cite{Heiles:2020plj}: \begin{equation} \begin{aligned} \boldsymbol{\gamma}^{u_L}&=-\frac{\alpha_3}{\pi}-\frac{9\alpha_2}{16\pi}-\frac{\alpha_1}{144\pi}+\frac{1}{32\pi^2}\Big[(\text{diag}\left( y^{2}_{u},y^{2}_{c},y^{2}_{t} \right)+\boldsymbol{V}\text{diag}\left( y^2_{d},y^2_{s},y_{b}^{s}\right)\boldsymbol{V}^{\dagger}\Big]\,,\\ \boldsymbol{\gamma}^{d_L}&=-\frac{\alpha_3}{\pi}-\frac{9\alpha_2}{16\pi}-\frac{\alpha_1}{144\pi}+\frac{1}{32\pi^2}\Big[\boldsymbol{V}^{\dagger}(\text{diag}\left( y^{2}_{u},y^{2}_{c},y^{2}_{t} \right)\boldsymbol{V}+\text{diag}\left( y^2_{d},y^2_{s},y_{b}^{s}\right)\Big]\,,\\ \end{aligned} \label{gammaleft} \end{equation} where $y_{q}$ is the Yukawa coupling of the quark $q$ and $\boldsymbol{V}=\boldsymbol{U}_{u}^{\dagger}\boldsymbol{U}_{d}$ is the CKM matrix. For the numerical estimates we present here we take into account only the top quark Yukawa coupling. All the other quark Yukawas, including in $\boldsymbol{\gamma}^{u_R}$ have tiny effects in the resummation. We also neglect the Yukawa coupling of the leptons in $\boldsymbol{\gamma}^{\ell_R}$ and in $\boldsymbol{\gamma}^{L_L}$. This means in practice for the running of the Wilson coefficients in the mass basis the only relevant term including Yukawa couplings in the expressions in (\ref{fieldanomdim}) is $\text{diag}\left( y^{2}_{u},y^{2}_{c},y^{2}_{t} \right)$ as in the first line in (\ref{gammaleft}) which becomes $\text{diag}\left( 0,0,y^{2}_{t} \right)$, in this approximation. The evolution of the Yukawa coupling of the top quark is given by \cite{Grzadkowski:1987tf}: \begin{equation} \frac{d}{d\ln\mu}\,y_{t}(\mu)\,=\,\frac{9\,y^{3}_{t}}{32 \pi^2}\,-\,y_{t}\left( \frac{17\,\alpha_1}{48 \pi}+\frac{9\,\alpha_2}{16\pi}+\frac{2\,\alpha_3}{\pi}\right)\,. \end{equation} In the case of leptoquarks the full gauge group running must be included for consistent numerical estimates at leading order RG improved PT. \begin{figure}[t] \begin{center} \includegraphics[scale=0.86]{graph1} \end{center} \caption{Resummation effects on Wilson coefficients of the $\mathcal{O}(\lambda^2)$ operators for $S_1$ as a function of $M_{S_1}$ with top quark final state jets in all cases. The running is performed from the leptoquark scale to the top mass. The solid lines show the whole contribution and the dashed lines show the resummation only for the double logarithms. } \label{graph1} \end{figure} \par We now present numerical results for the resummation of the Wilson coefficients from the leptoquark scale to a lower scale for the $\mathcal{O}(\lambda^2)$ operators for the $S_1$ shown in (\ref{S1operators}). For these operators the largest effects comes from $\bar{t}\ell$ final states. Therefore we fix the low scale to the top quark mass and we consider $M_{S_1}=3 $ TeV. We numerically integrate the evolution function $\mathcal{U}(M_{S_1},\mu)$ such that the Wilson coefficients at different scales are related by: \begin{equation} \mathrm{C}^{t\ell}(m_t)=C^{t\ell}(M_{S_1})\,\exp\, \Big[\mathcal{U}(M_{S_1},m_t)\Big]\,. \end{equation} where $\ell$ here stands for a lepton either left handed or right handed. We find the following results: \begin{equation} \begin{aligned} \mathrm{C}^{t\ell}_{S_1^{\ast}u_R^c\ell_R}(m_t)&\,\approx\,0.93 \,e^{0.02 i}\,\mathrm{C}^{t\ell}_{S_1^{\ast}u_R^c\ell_R}(M_{S_1})\\\, \mathrm{C}^{t\ell}_{S_1^{\ast}Q_L^cL_L}(m_t)&\,\approx\, 0.92\, e^{0.07 i}\,\mathrm{C}^{t\ell}_{S_1^{\ast}Q_L^cL_L}(M_{S_1}) \end{aligned} \end{equation} For the operator $\mathcal{O}^{ij}_{S_1d_R\nu_R}$ the running is practically independent on the specific final state lepton flavour. In this case we find: \begin{equation} \mathrm{C}^{ij}_{S_1d_R\nu_R}(m_t)\,\approx\, 0.96\, \mathrm{C}^{ij}_{S_1d_R\nu_R}(M_{S_1}) \end{equation} In Fig.({\ref{graph1}) we show how the resummation effects vary for different values of $M_{S_1}$. We compare between the full running effects and the contribution only from terms multiplied by $\gamma_{\text{cusp}}^{(r)}$ for the full gauge group. In the latter case the single logarithmic terms coming from single-particle anomalous dimensions are neglected. In practice this is often the case, where one resumms only the double logarithms which exponentiate. Though in Fig.(\ref{graph1}) it can be seen that this difference is at least of $\mathcal{O}(10\%)$. In fact this is a merit of the effective theory that RG methods allow for a consistent resummation of the large logarithms, both single and double logarithms. \subsection[Resummation effects on the triplet $S_3$] {\boldmath Resummation effects on the triplet $S_3$} \begin{figure} \begin{center} \includegraphics[scale=0.86]{graph2} \end{center} \caption{Variation of the resummation effects on the $C_{S_3Q^{c}_LL_L}$ with the mass of $S_3$, for left handed top quark and left handed lepton final states with initial scale around the leptoquark mass.The solid line shows the whole contribution and the dashed line represents only the resummation of the double logarithms.} \label{graph2} \end{figure} As presented in Section \ref{s3sec} at leading order the triplet leptoquark $S_3$ decays only into a left handed quark and left handed lepton. In this case the Wilson coefficient $\mathrm{C}^{ij}_{S_3Q^{c}_LL_L}$ obeys an RG equation with anomalous dimension: \begin{equation} \begin{split} {\boldsymbol{\Gamma}}_{S_3^{\ast}Q^{c}_LL_L}&=\left(- \frac{4}{3} \gamma^{(3)}_{\text{cusp}}-2\gamma_{\text{cusp}}^{(2)}-\frac{1}{9} \gamma_{\text{cusp}}^{(1)}\right) \ln\frac{\mu}{M_{S_3}}\,\\ &+\left( \frac{1}{4}\gamma_{\text{cusp}}^{(2)}-\frac{1}{12}\gamma_{\text{cusp}}^{(1)} \right)\left(\ln\frac{\mu^2}{M^2_{S_3}}+i\pi\right)+\gamma^ {S_3}+\left(\boldsymbol{\gamma}^{L},.\right)+ \left(.\,,\boldsymbol{\gamma}^{Q}\right) \end{split} \end{equation} where $\boldsymbol{\gamma}^{L_L}$, $\boldsymbol{\gamma}^{Q_L}$ are shown in (\ref{fieldanomdim}) and the field anomalous dimension $\gamma^{S_3}$ reads: \begin{equation} \gamma^{S_3}=-\frac{2\,\alpha_3}{3\pi}-\frac{\alpha_2}{\pi}-\frac{\alpha_1}{18\pi}\,. \end{equation} The QCD running for double logarithmic terms has not changed and it is indeed the same for the three leptoquarks since there are no QCD interactions between final sates at $\mathcal{O}(\lambda^2)$. For a $3$ TeV leptoquark the effects are tiny in this case. They become more seizable for $M_{S_3}\geq 4$ TeV. For instance for $M_{S_3}=4.5$ TeV we find: \begin{equation} \mathrm{C}^{t\ell}_{S_3Q^{c}_LL_L}(m_t)\,\approx\, 0.97\, e^{-0.02 i}\, \mathrm{C}^{t\ell}_{S_3Q^{c}_LL_L}(M_{S_3}) \end{equation} In Fig.(\ref{graph2}) we show how these effects change significantly when single logarithmic terms are not included. For all the mass range the difference accounts for a change of $\mathcal{O}(20\%)$ in the Wilson coefficients. \subsection[Resummation effects on the vector $U_1^{\mu}$] {\boldmath Resummation effects on the vector $U_1^{\mu}$} \begin{figure} \begin{center} \includegraphics[scale=0.86]{graph3} \end{center} \caption{Resummation effects on the Wilson coefficients of $\mathcal{O}(\lambda^2)$ operators for $U^{\mu}_{1}$ as a function of $M_{U_1}$. The results are for top quark and lepton final state for $\mathrm{C}_{U_1Q_LL_L}$ and $\mathrm{C}_{U_1d_R\ell_R}$ and top quark and right handed neutrino for $C_{U_1u_R\nu_R}$. In both cases the initial scale is set to the $M_{U_1}$. The solid lines show the full effects and the dashed lines take into account only the double logs.} \label{graph3} \end{figure} In a similar fashion we derive the anomalous dimensions of the leading order two jet operators for $U_1^{\mu}$ in (\ref{leadingU1operators}). We find that: \begin{equation} \begin{split} \boldsymbol{\Gamma}_{U_1Q_LL_L}&=\left(-\frac{4}{3}\gamma^{(3)}_{\text{cusp}}-\frac{4}{9}\gamma^{(1)}_{\text{cusp}}\right)\ln\frac{\mu}{M_{U_1}}\,\\ &+\left(-\frac{3}{4}\gamma_{\text{cusp}}^{(2)}+\frac{1}{12}\gamma^{(1)}_{\text{cusp}}\right)\left(\ln\frac{\mu^2}{M^{2}_{U_1}}+i\pi \right)+\gamma^{U_1}+\left(.\,,\boldsymbol{\gamma}^{Q}\right)+\left(\boldsymbol{\gamma}^{L}\,,.\right)\,,\\ \boldsymbol{\Gamma}_{U_1d_R\ell_R}&=\left( -\frac{4}{3}\gamma^{(3)}_{\text{cusp}}-\frac{4}{9}\gamma^{(1)}_{\text{cusp}}\right)\ln\frac{\mu}{M_{U_1}}-\frac{1}{3}\gamma^{(1)}_{\text{cusp}}\left( \ln\frac{\mu^2}{M^2_{U_1}}+i\pi\right)+\gamma^{U_1}\,\\ &+\left(.\,,\boldsymbol{\gamma}^{d_R}\right)+\left(\boldsymbol{\gamma}^{\ell_R}\,,.\right)\,,\\ \boldsymbol{\Gamma}_{U_1u_R\nu_R}&=\left(-\frac{4}{3}\gamma^{(3)}_{\text{cusp}}-\frac{4}{9}\gamma_{\text{cusp}}^{(1)}\right)\ln\frac{\mu}{M_{U_1}}+\gamma^{U_1}+\left(.\,,\boldsymbol{\gamma}^{u_R}\right)\,, \end{split} \end{equation} where the anomalous dimension of the leptoquark $U_1^{\mu}$ reads: \begin{equation} \begin{aligned} \gamma^{U_1}=&-\frac{2\,\alpha_3}{3\pi}-\frac{2\,\alpha_1}{9\pi}\,\\ \end{aligned} \end{equation} The results for top quark final states are shown in Fig.(\ref{graph3}) both for the complete resummation and for the separate double log contribution. Also in this case there is a significant difference of about $20\%$ in neglecting the single log resummation. For $M_{U_{1}}=3$ TeV we find the following numerical results: \begin{equation} \begin{aligned} \mathrm{C}^{t\ell}_{U_1Q_LL_L}(m_t)&\,\approx \,0.92\,e^{0.06i} \,\mathrm{C}^{t\ell}_{U_1Q_LL_L}(M_{U_1})\\\, \mathrm{C}^{t\ell}_{U_1d_R\ell_R}(m_t)&\,\approx 0.95\,e^{0.01i} \,\mathrm{C}^{t\ell}_{U_1d_R\ell_R}(M_{U_1})\,\\ \mathrm{C}^{t\nu}_{U_1u_R\nu_R}(m_t)&\,\approx 0.94 \,\mathrm{C}^{t\nu}_{U_1u_R\nu_R}(M_{U_1})\\\, \end{aligned} \end{equation} \subsection{Example of an analytic solution of the RG equation} At one loop contribution it is possible to derive an analytic solution of the RG equations of the Wilson coefficients for the full $SU(3)\times SU(2) \times U(1)$ interactions. Beyond one loop this is challenging because cusp anomalous dimensions and beta functions start to mix with each other. Here we show an example of the exact solution for the evolution of the Wilson coefficient for a NLL resummation. We consider the Wilson coefficient of the operator $\mathcal{O}^{ij}_{S_1^{\ast}u_R^c\ell_R}$ in (\ref{S1operators}) neglecting the $SU(2)$, $U(1)$ and Yukawa running. We define the anomalous dimension $\Gamma^{\text{QCD}}_{S_1^{\ast}u_R^c\ell_R} $ such that: \begin{equation} \Gamma^{\text{QCD}}_{S_1^{\ast}u_R^{c}\ell_R}=-\frac{4}{3} \gamma^{(3)}_{\text{cusp}}\, \ln\frac{\mu}{M_{S_1}}+\gamma^ {S_1}_{\text{QCD}}+ \gamma^{u_R}_{\text{QCD}}\,, \end{equation} where $\gamma^{u_R}_{\text{QCD}}=-{\alpha_3}/{\pi}$ and $\gamma^ {S_1}_{\text{QCD}}=-2\alpha_3/{3\pi}$. Then it can be shown that the following expression is a solution to the RG equation with anomalous dimension $\Gamma^{\text{\text{QCD}}}_{S_1^{\ast}u_R^{c}\ell_R}$ \cite{Becher:2007ty,Becher:2006nr}: \begin{equation} \begin{aligned} C_{S_1^{\ast}u_R^c\ell_R}^{ij}(\Lambda,M_{S_1},\mu_2)=&\left( \frac{\mu_1}{M_{S_{1}}}\right)^{-a_{\gamma_3}(\mu_1,\mu_2)}\,\exp\Bigg[\mathcal{S}(\mu_1,\mu_2)-a_{\gamma}(\mu_1,\mu_2) \Bigg]\,\\ & \times C_{S_1^{\ast}u_R^c\ell_R}^{ij}(\Lambda,M_{S_{1}},\mu_1) \end{aligned} \end{equation} where the Sudakov exponent $\mathcal{S}$ is given by: \begin{equation} \mathcal{S}(\mu_1,\mu_2)=-\frac{4}{3}\int_{\alpha_3(\mu_1)}^{\alpha_3(\mu_2)}\,d\alpha\,\frac{\gamma^3_{\text{cusp}}(\alpha)}{\beta(\alpha)}\,\int^{\alpha}_{\alpha_3(\mu_1)}\,\frac{d\alpha^{\prime}}{\beta(\alpha^{\prime})} \end{equation} and we have defined $\gamma=\gamma_{\text{QCD}}^ {S_1} + \gamma^{u_R}_{\text{QCD}}$ and $\gamma_3=-\frac{4}{3}\gamma^{(3)}_{\text{cusp}}$. The quantity $a_{\gamma_i}$ is defined as: \begin{equation} a_{\gamma_i}(\mu_1,\mu_2)=-\int_{\alpha_3(\mu_1)}^{\alpha_3(\mu_2)}\,d\alpha\,\frac{\gamma_i(\alpha)}{\beta(\alpha)} \end{equation} In the above solution $\mu_1$ should be of the order of $M_{S_1}$ so that the initial condition is free of large logarithms. Using the one loop expressions of the one-particle anomalous dimensions, two loop cusps and two loop QCD $\beta$-function the scale evolution of the Wilson coefficient $C_{S_1^{\ast}u_R^c\ell_R}^{ij}$ is then \cite{Becher:2007ty}: \begin{equation} \begin{aligned} C_{S_1^{\ast}u_R^c\ell_R}^{ij}(\Lambda,M_{S_1},\mu_2) &=C_{S_1^{\ast}u_R^c\ell_R}^{ij}(\Lambda,M_{S_1},\mu_1)\left( \frac{\mu_1}{M_{S_{1}}}\right)^{-a_{\gamma_3}}\,\\ & \times \exp \Bigg\{ \frac{4\pi}{\alpha_3(\mu_1 )} \left(1-\frac{1}{r}-\ln r\right) +\left(\frac{251}{21} - \pi^2\right) \left(1-r+\ln r \right)\,\\ &\hspace{1.3cm}+ \frac{13}{7}\ln^2 r -\frac{10}{21\pi}\ln r \Bigg\} \end{aligned} \end{equation} where $r=\alpha_3(\mu_2)/\alpha_3(\mu_1)$ and at LO in RG-improved PT $a_{\gamma_3}=(56/9\pi)\ln r$. \section{Matching for tree level Wilson coefficients} \label{sectionmatching} In this section we look at certain UV models for each of the leptoquarks we have considered and perform a tree level matching of the matrix elements into the corresponding SCET Lagrangians. We match the operators that describe the two body decays at leading order and subleding order in the parameter $\lambda$. We start with the renormalizable Lagrangian that describes the $S_1$ interactions. We follow a similar notation as in \cite{Sakaki:2013bfa} and include an additional term for a right handed neutrino: \begin{equation} \mathcal{L}_{S_1}= g_{1L}^{ij}\,\bar{Q}^{c,i}_{L}i\sigma_2L_{L}^{j}S_1^{\star} +g^{ij}_{1}\,\bar{u}^{c,i}_{R}\ell_{R}^{j}S_1^{\star} +g^{ij}_{1\nu}\,\bar{d}^{c,i}_{R}\nu_{R}^{j}S_1^{\star} +\text{h.c.} \label{S1UV} \end{equation} where $g^{ij}$ is the coupling strength between a quark of generation $i$ and a lepton of the generation $j$. Tree level matching of the matrix elements of the above Lagrangian into our effective Lagrangian in (\ref{Lagrangian1}) yields the following Wilson coefficients: \begin{equation} \begin{aligned} C_{S_1^{\ast}u_R^{c}\ell_R}^{ij}&=g^{ij}_{1R}\,\\ C^{ij}_{S_1^{\ast}Q^{c}_LL_L}&=g_{1L}^{ij}\,\\ C_{S_1^{\ast}d_R^c\nu_R}^{ij}&=g_{1\nu}^{ij} \end{aligned} \end{equation} Matching of the subleading order SCET Lagrangian (\ref{subS1}) into the (\ref{S1UV}) gives vanishing Wilson coefficients for all the subleading operators for two body decays of the $S_1$: \begin{equation} \begin{aligned} &C^{(0)^{ij}}_{S_1^{\ast}L_L\Phi d_R}=C^{(0)^{ij}}_{S_1Q_L\Phi\nu_R}= C^{(1)^{ij}}_{S_1^{\ast}L_L\Phi d_R}=C^{(2)^{ij}}_{S_1^{\ast}L_L\Phi d_R}=0\\ &C^{(1)^{ij}}_{S_1Q_L\Phi\nu_R}=C^{(2)^{ij}}_{S_1Q_L\Phi\nu_R}=C^{(1)^{ij}}_{S_1d_R B\nu_R}=C^{(2)^{ij}}_{S_1d_R B\nu_R}=0 \end{aligned} \end{equation} This is a remnant of the fact that at leading power the $S_1$ couples only to the charge conjugate of the quark field, while the SM Higgs boson and the gauge bosons do not have conjugate particle vertices. In such a case it is not possible to get a hard propagator, which would have been integrated out in the effective theory. \par The full theory Lagrangian that describes the $S_3$ renormalizable interactions with SM fields is given by: \begin{equation} \mathcal{L}_{S_3}=g_{3L}^{ij}\,{\bar{Q^i}}^{c,a}_{L} \, \epsilon^{ab} \,S_{3}^{\ast bd}\,{L^{j,d}_{L}} +\text{h.c.} \end{equation} Then at tree level the Wilson coefficient from the SCET Lagrangian in (\ref{Ltripletleading}) reads: \begin{align} C_{3Q_LL_L}^{ij}=g_{3L}^{ij} \end{align} Also in this case the Wilson coefficients at subleading order are equal to zero. \par Lastly we look at the UV Lagrangian that describes the interactions of the vector leptoquark $U_1^{\mu}$. Since this is a Lorentz vector there are more subtle issues that arise regarding the UV completed model. The cases that are usually considered in literature are either a gauge model where $U_1^{\mu}$ arises from the breaking of a gauge symmetry into the SM gauge group \cite{Pati:1974yy,DiLuzio:2017vat} or strongly interacting models \cite{Baker:2019sli}. The most general Lagrangian describing the interaction of the leptoquark $U_1^{\mu}$ reads: \begin{equation} \begin{split} \mathcal{L}_{U_1} &= \frac{g_U}{\sqrt{2}}\left( \beta^{ij}_L\,\bar{Q}_{L}^{i}\gamma_{\mu}L^{j}_{L} U_1^{\mu} + \beta^{ij}_R\,\bar{d}_R^{i}\gamma_{\mu}\ell_R^{j}U_1^{\mu} \right) +g_{U}^{\nu}\,\beta_{\nu}^{ij}\,\bar{u}_{R}^{i}\gamma_{\mu}\nu_R^{j}U_{1}^{\mu}\,\\ &-ig_s(1-\kappa_U)U^{\dagger}_{1\mu}T^{a}U_{1\nu}G^{a\mu\nu}-ig_Y\frac{2}{3}(1-\tilde{\kappa}_U)U^{\dagger}_{1\mu}U_{1\nu}B^{\mu\nu}\,+\text{h.c.} \end{split} \label{LU1} \end{equation} The last two terms in (\ref{LU1}) describe the interaction of the $U_1^{\mu}$ with other gauge fields but they do not contribute to the matrix elements of the SCET operators for a tree level matching. We require that in the collinear limit for the same initial and final states the matrix elements of the Lagrangian in equation (\ref{LagrangianU1}) and in (\ref{LU1}) give the same result. Then we find the following Wilson coefficients of the $\mathcal{O}(\lambda^2)$ operators at tree level: \begin{equation} \begin{aligned} C^{ij}_{U_1Q_LL_L}&=\frac{g_U}{\sqrt{2}}\beta_L^{ij}\,\\ C^{ij}_{U_1d_R\ell_R}&=\frac{g_U}{\sqrt{2}}\beta_R^{ij}\,\\ C^{ij}_{U_1u_R\nu_R}&=\frac{g^{\nu}_{U}\beta^{ij}}{\sqrt{2}}\,\\ \end{aligned} \end{equation} For the Wilson coefficients at $\mathcal{O}(\lambda^3)$ in this case we find the non trivial results: \begin{equation} \begin{aligned} {C_{U_1Q_L\ell_R}^{(0)ij}}&=0\,,\\ {C_{U_1Q_L\ell_R}^{(1)ij}}&=-i \frac{g_U}{\sqrt{2}}\,\beta_{R}^{ij}\,y_{d_i}\frac{2\Lambda}{M_{U_1}(1+u)^2}\,,\\ {C_{U_1Q_L\ell_R}^{(2)ij}}&=-i \frac{g_U}{\sqrt{2}}\,\beta_{L}^{ij}\,y_{\ell_j}\frac{2\Lambda u}{M_{U_1}(1+u)^2}\,,\\ \end{aligned} \end{equation} where $y_{d_i}$ is the Yukawa coupling of the down-type quark in generation $i$ and $y_{\ell_j}$ is the Yukawa coupling of the lepton $\ell$ of generation $j$. Here the variable $u$ represents the momentum fraction that is caried by the scalar field $\Phi$, which is emitted in the same jet with the final state $Q_L$. \section{Conclusions} \label{conclusions} In this work we have applied a SCET framework to present a detailed discussion on the decay rates of three beyond SM particles; two scalar leptoquarks $S_1$, $S_3$ and a vector leptoquark $U_1^{\mu}$. A consistent analysis of this problem requires treating the leptoquarks as heavy degrees of freedom that interact with their lighter decay products described by SCET operators. We have shown that at leading order in the effective theory the leptoquarks decay into two SM particles and in the case of $U_1^{\mu}$ and $S_1$ a right handed neutrino is also allowed as a final state. In addition we have presented the subleading power two jet operators and the leading power Lagrangians for three jet final states at $\mathcal{O}(\lambda^3)$. \par We have computed all the leading and sub-leading order two body decay rates together with the differential decay rates for three body decays for the $S_1$, $S_3$ and $U_{1}^{\mu}$. We have used RG equations of the SCET operators at leading order to resum the large logarithms in their Wilson coefficients at next to leading logarithmic order. We have given numerical estimates of these effects on the decay rates for some of the decays with most phenomenological interest. We have found that for the two jet operators, for all the three leptoquarks, there is a significant effect coming from the single logarithmic terms in the running of the Wilson coefficients. The decay rates would change by as much as about $20\%$ if the single logarithmic terms are not properly resummed. We have observed that the leading power two jet decays of the scalar leptoquark $S_1$ receive the largest correction from resummation. Lastly we have done a matching procedure of our effective Lagrangians for the leptoquark $S_1$, $S_3$ and $U_1^{\mu}$ into three corresponding extensions of the SM with these heavy particles and show the relations between the Wilson coefficients in our effective theory and various coupling constants in these models. \par With this work we have extended the application of SCET for beyond SM - framework developed in \cite{Alte:2018nbn} to non-singlet exotic particles.We have studied leptoquarks which are considered main candidates for solving several observed deviations from the SM in the flavour sector. On application grounds this work provides an estimation of the effects of resummation on the main decay rates of the singlet leptoquark $S_1$, the triplet $S_3$ and the vector leptoquark $U_1^{\mu}$. \subsubsection*{Acknowledgments} M.N.~thanks Gino Isidori, the particle theory group at Zurich University and the Pauli Center for hospitality during a sabbatical stay. This research was supported by the Cluster of Excellence {\em Precision Physics, Fundamental Interactions and Structure of Matter\/} (PRISMA${}^+$ -- EXC~2118/1) within the German Excellence Strategy (project ID 39083149). \newpage
1,108,101,565,913
arxiv
\section{Introduction} A sequential generation of patterns by a system of oscillators is motivated by central pattern generators of biological neural networks \cite{Hooper, brocard}. Central pattern generators usually are considered in the context of neuroscience as an explanation of how nervous systems produce movements. They are autonomous neural networks that can endogenously produce rhythmically patterned output like breathing, walking, or heartbeat. Apart from improving an understanding of the biological aspects, nowadays the engineering aspect of networks of oscillators plays a prominent role \cite{Manrubia}. For example, complex dynamical structures in populations of phase oscillators were engineered by means of nonlinear time-delayed feedback that is implemented in the interactions of the oscillators \cite{kori}. In this work we address the problem of generation of periodic sequences of patterns by a dynamical system of phase oscillators. We consider the case where a set of $P$ patterns $\xi$ should be presented periodically in a certain time order. To solve this problem we design a system which consists of a pacemaker, a retrieval network, and $P$ stored patterns between these elements. The proposed system can be seen as an associative memory driven by a pacemaker. Here, we use a particular case of an associative memory (retrieval network) with time-dependent couplings controlled by the pacemaker through the patterns $\xi$. Associative memory models based on the use of phase oscillators are alternatives to the Hopfield model \cite{Hopfield} with spins replaced by phase oscillators. These models are generalizations of the Kuramoto model \cite{kuramoto} (for a review see Acebron \cite{Acebron}). Examples for such generalizations were considered by Aoyagi \cite{Aoyagi} and further improved by Nishikawa {\it et al.} \cite{Nishikawa} with the goal to achieve a storing capacity similar to that of the Hopfield model along with error-free retrieval. As a result, our system is able to produce sequences of patterns in the retrieval network. These patterns are encoded as phase differences between the oscillators. The associated dynamical states correspond to phase-locked synchronization. Such states amount to cluster partitions of the oscillators' set, in which oscillators sharing the same phase are gathered in one cluster. The system can be seen as a stylized version of the central pattern generator of the leech heartbeat \cite{Hooper}, that is composed of two sets, the rhythm generator (corresponding to our pacemaker), and the pattern generator (the retrieval network). The paper is organized as follows. In section \ref{sec1} we present the model for our device and its different building blocks. In section \ref{sec2} we illustrate its performance and point on the analogy to a central pattern generator. The conclusions are drawn in section \ref{sec3}. \section{The model of our device}\label{sec1} The system is composed of a phase oscillator, which plays the role of pacemaker and is characterized by a phase variable $\psi$, a retrieval network with $N$ globally coupled phase oscillators with phases $\phi_i$, $i=1,...,N$, and $P$ patterns $\xi$ stored in the couplings between the pacemaker and the retrieval network. The function of the pacemaker is to control the timing and activation of the stored patterns $\xi$ as a function of its phase. In this way, the pacemaker works as a clock that points to the pattern that should be activated in the sequence. The retrieval network encodes the stored patterns $\xi$ as phase differences between its $N$ phase oscillators, one by one as a function of time. We call it retrieval network since its function consists in retrieving the information stored in the couplings between the pacemaker and the network, and translating the patterns to the corresponding cluster partitions of the phase oscillators. The operation of the retrieval network is achieved by designing a suitable energy function $L$. The minima of this function are approached by a gradient dynamics in which the energy function plays the role of the potential. The minima correspond to specific cluster partitions, for which the phase differences take only values of zero or $\pi$. These configurations are easily mapped to binary sequences. In the set of $N$ oscillators only $N-1$ phases are independent, therefore we can have $2^{N-1}$ cluster partitions. A pattern $\xi$ is then described as an $N-1$-dimensional vector with components $\xi_i$ being the phase differences between the oscillators $i$ and $N$. Now, in order to retrieve a selected pattern out of the $2^{N -1}$ ones that are in principle available, we implement couplings between the pacemaker and the phase oscillators which modulate the energy landscape in a way that the selected pattern becomes the only minimum at a given time. So the proposed system can be seen as an associative memory. The associative memory is particular in the sense that the couplings are time-dependent, driven and controlled by the pacemaker through the choice of patterns $\xi$. For the selection of another pattern, another set of couplings is addressed by the pacemaker. This way it becomes possible to retrieve a whole temporal sequence. If the retrieval network had only static sets of couplings between the oscillators, several minima could coexist and would be reached by the gradient dynamics depending on their basins of attraction. This case corresponds to an associative memory network as proposed by Aogagi \cite{Aoyagi} and Nishikawa {\it et al.} \cite{Nishikawa}. Our aim is, however, different. We consider time-dependent couplings in order to have a time-dependent energy landscape that is controlled by the pacemaker. In addition, we impose the constraint that only one minimum of $L$ may exist at a time, when a pattern is selected by the pacemaker. The advantage then is that the gradient dynamics will lead to the required minimum starting from any initial condition due to the absence of competing basins of attraction. In the following we shall describe the building blocks of our device in detail, before we illustrate how it works for retrieving a cyclic sequence of patterns. \subsection{The retrieval network}\label{sec1a} The dynamics of this network is characterized by an energy function $L$ ("L" shall remind to its role as a Lyapunov function) whose gradient determines the phase evolution of the oscillators. In general, the system evolves to the different minima of $L$ depending on the initial condition and on the basis of attraction of each minimum. For this retrieval network we use a particular version of the model proposed by Nishikawa {\it et al.} \cite{Nishikawa}. We shall show how we tune the couplings in order to have only one minimum in $L$ when a pattern is selected for retrieval. The function $L$ then depends on $N$ oscillator phases $\Phi_i$ in the following way: % \begin{equation} L= -\frac{K}{4N} \sum_{i,j=1, i\not=j}^N \Big( \cos(\Phi_j-\Phi_i) -f_{ij} \Big)^2 \label{eq1} \end{equation} % with $\Phi_i \in [0,2\pi[, i=1,...,N$ the phase variables and $K$ the coupling strength that finally determines the speed of convergence of the dynamics towards the stationary state, see Eq. \ref{eq2} below. As noted before, out of the maximally $N(N-1)/2$ different phase differences, only $N-1$ are independent, so we choose $\Delta\Phi_{iN}=\Phi_i-\Phi_N$, $i=1,...,N-1$ as independent variables, using the phase of the $N$th oscillator as reference. With $f_{ij}$ we denote the couplings, $f_{ij}\in \mathbf{R}, i,j\in \{1,...,N\}$. For vanishing couplings $f_{ij}$ we get back the Kuramoto model with twice the usual frequency in the interaction between the oscillators. Then, $L$ has obviously $2^{N-1}$ minima given by the vectors $\xi^{(k)} , k=1,..., 2^{N-1}$ of equal height with components $\xi^{(k)}_i$ $\in\{0,\pi\}$. In the configuration space of phase differences $\{\Delta\Phi_{iN}\}$, these minima are located at the corners of a hypercube of linear size $\pi$ with one corner in the origin of coordinates. It is these $2^{N-1}$ local minima that are our candidates for retrieval. The label $k$ of the state $\xi^{(k)}$ is determined by the pattern of $0$s and $\pi$s interpreted as binary sequence in decimal representation. In general, the $f_{ij}$ are couplings appropriately chosen to modulate the energy function $L$ in order to retrieve a selected pattern $\xi^{(s)}$. A sufficient condition for a local minimum reads that the Hessian matrix of $L$ is positive definite, i.e., all eigenvalues being larger than zero, due to an appropriate choice of $f_{ij}$. Now let us select one pattern $\xi^{(s)}$ with $s\in\{1,...,2^{N-1}\}$. Let the couplings modulate the interaction between oscillator pairs $(ij)$ according to % \begin{equation} f_{ij}(\alpha, s)=\alpha\left(\frac{2}{\pi}\vert\xi_i^{(s)}-\xi_j^{(s)}\vert-1\right)\; \label{eq4} \end{equation} % for $i,j\in\{1,...,N,i\not=j\}$ with $\xi_N^{(s)}\equiv 0$, $\alpha$ any real number with $\alpha>1$ and $s$ the index of the selected pattern. So the couplings $f_{ij}$ take values of $\pm\alpha$, depending on the pattern. This choice is guided by the postulate that the selected configuration remains the only local minimum when these couplings are applied, while all other former local minima become saddles in at least one direction or local maxima. The conjecture is that a choice according to Eq.\ref{eq4} satisfies this postulate. In the appendix we shall show that for this choice of couplings the Hessian is positive definite for each configuration that is selected from the $2^{N-1}$ patterns, and not positive definite for all other $2^{N-1}-1$ configurations, which were formerly also minima for vanishing couplings. (For $\alpha<1$ it can be shown that all local minima remain stable, but the selected one becomes the deepest.) The gradient dynamics will then retrieve the local minimum from any initial condition, not necessarily close to the selected minimum. It should be noticed that the choice of couplings $f_{ij}$ for a selected pattern $\xi$ by Eq. \ref{eq4} can be mapped to the Hebbian rule in the particular case where only one pattern is memorized by the associative memory of Nishikawa {\it et al.} \cite{Nishikawa}. However, this restriction to the case of only one minimum of the energy function (ensured by the choice of $\alpha>1$) has the advantage that it leads to an error-free retrieval starting from an arbitrary initial condition, possibly far away from the final configuration. Explicitly the gradient dynamics of the oscillators then reads: \begin{equation} \dot{\Phi_i}= -\frac{\partial L}{\partial \Phi_i}= -\frac{K}{N}\sum_{j=1 , j\not=i}^{N} \sin(\Delta\Phi_{ji}) \Big( \cos(\Delta\Phi_{ji})-f_{ji} \Big). \label{eq2} \end{equation} Similarly to the Kuramoto dynamics the interaction of the oscillators depends only on phase differences $\Delta \Phi_{ij}$, and due to the choice of trigonometric functions the interaction terms are bounded as in the Kuramoto dynamics. Due to the gradient dynamics the phase differences will evolve to a fixed point which is the minimum of the energy function $L$ that is closest to the initial conditions. Note that the second order Fourier term was related to the formation of clusters by Mato \cite{Mato}. In our case, this term appears due to the very construction of the energy function. \subsection{The role of the pacemaker} We extend our dynamics to sequential pattern retrieval via time dependent couplings controlled by a pacemaker. The pacemaker is a phase oscillator with constant frequency $\omega_R$ and phase $\psi$, whose time derivative is given by % \begin{equation}\label{equ_clock} \dot{\psi} = \omega_{R}. \end{equation} It is the phase $\psi$ that selects and activates a pattern $r$ among the $P$ stored ones over a duration $B$. This activation period lies in the phase (time) interval between $\psi=\psi_r$ and $\psi=\psi_{r+1}$. For simplicity we have chosen $\psi_r=\frac{2\pi}{P}(r-1)$ for all $r\in\{1,...,P\}$, and $B =\frac{2\pi}{P}$. This means, when the instantaneous phase $\psi$ comes to the value $\psi_r$, couplings $f_{ij}$ are switched on that guarantee the retrieval of the pattern $^r\xi^{s(r)}$. Here the pre-superscript $r$ indicates the label of the pattern within the time sequence, the post-superscript $s(r)$ stands for the decimal label of the pattern $r$ out of the selected subset. The corresponding dynamical equations read: % \begin{equation} \dot{\Phi_i}\;=\; -\frac{K}{N}\sum_{j=1, j\not=i}^{N} \sin(\Delta\Phi_{ji}) \Big( \cos(\Delta\Phi_{ji})-f_{ji}(\psi) \Big), \label{equ_oscillators} \end{equation} % where % \begin{equation} f_{ij}(\psi) = \sum^{P}_{r=1} \alpha\left(\frac{2}{\pi}\vert ^r\xi_i^{s(r)}- \; ^r\xi_j^{s(r)}\vert\;-\;1\right)g_{r}(\psi) \label{couplings_psi} \end{equation} % and % \begin{equation} g_r(\psi) = \Theta(\psi - \psi_r) - \Theta(\psi - \psi_r - B). \label{g} \end{equation} Here $\Theta$ denotes the Heavyside function. This means that the function $g_r(\psi)$ controls the couplings to be given as $\alpha\left(\frac{2}{\pi}\vert ^r\xi_i^{s(r)}-\; ^r\xi_j^{s(r)}\vert-1\right)$ over the phase interval $B$, starting from $\psi = \psi_r$ on. Obviously we should ensure that $\psi_{r+1}-\psi_r\ge B\ge \psi_{trans}$, that is, the time interval between two initiations of pattern retrievals and the time of application of the constant couplings (to reach the new pattern) should be larger than the transient time $\psi_{trans}$ which the retrieval network needs to go from one pattern to the next (the speed of the internal dynamics is controlled by $K$). The information for the sequence generation is stored in the phase values $\psi_r$'s through the functions $g_r(\psi)$ which ``switch on" the appropriate couplings. Since the stored patterns $\xi$ are required at different times, they must be stored outside the retrieval network. Therefore we say that the patterns generated in the sequence are memorized in the couplings between the pacemaker and the retrieval network. This way the architecture can modulate the couplings $f_{ij}$ between the oscillators of the retrieval network. In summary, the pacemaker transforms the static contents, memorized in the couplings, into a temporal sequence of patterns, a feature that is in common with a central pattern generator. To complete our set of equations, we have to introduce noise in the retrieval dynamics. The reason is the following. As we have seen, once a pattern is selected for retrieval, the system evolves according to the dynamics of Eq. \ref{eq2} to the only stable fixed point of $L$ of the retrieval network. When the next pattern of the sequence is activated by the pacemaker, the system is still in the former fixed point that turns into an unstable one. To kick the system out of this fixed point and follow the required sequence, we apply Gaussian white noise of small intensity T. Eq.\ref{eq2} is then replaced by \begin{equation} \dot{\Phi_i}\;=\; -\frac{K}{N}\sum_{j=1, j\not=i}^{N} \sin(\Delta\Phi_{ji}) \Big( \cos(\Delta\Phi_{ji})-f_{ji}(\psi) \Big) + T\eta_i(t), \label{equ_oscillators_noise} \end{equation} where $\eta_i(t)$ is a random variable describing the white noise with zero mean, ͗$<\eta_i(t)\eta_j(t')> = \delta_{ij} \delta(t-t')$, and $T$ is the noise intensity. Our system is then described by Eq.s \ref{equ_clock}, \ref{equ_oscillators_noise}, \ref{couplings_psi}, and \ref{g}. \section{Numerical study}\label{sec2} In this section we study an example of this system and focus on its dynamical properties. We integrate the dynamics using a second order stochastic Runge-Kutta method\cite{Honeycutt} with time step $\Delta t = 0.01$ and noise intensity $T=0.001$. We consider a set of $N=11$ phase oscillators in the retrieval network. This network can therefore encode $2^{10} = 1024$ patterns as cluster partitions. Out of this set, we have randomly selected $P=5$ patterns. They are $ \;^1\xi^{672}$, $\; ^2\xi^{0}$,$\; ^3\xi^{942}$, $\; ^4\xi^{477}$, $\; ^5\xi^{1023}$ in the indicated order. The time dependent-couplings are changed at phase values $\psi_r=0,\; 2\pi/5,\; 4\pi/5,\;6\pi/5$ and $8\pi/5$. The parameters are chosen as $K = 10$, $B=2\pi/P$, $\alpha =2$ and $\omega_R=1$. Figure \ref{fig3}a shows the energy $L$ as function of time evaluated in the actual state of the retrieval network. $L$ jumps from its minimal value at $L\approx -500$ to some larger value around $L\approx-300$, where it remains as long as the system of oscillators searches the new minimum, corresponding to the new choice of external fields. When the new minimum is found, $L$ drops to the minimal value again. During such an interval of duration $B$, the Euclidean distance $D(t)$ in configuration space between the actual state and the closest pattern (corner of the hypercube) has a peak at an intermediate time interval where the system is moving from one to the next selected state. We see these peaks in Fig.~\ref{fig3}b. For about half of the period $B$ this distance is zero, indicating that the state of the system system corresponds to the required pattern. In Fig.~\ref{fig3}c we plot the states which are closest to the instantaneous states of the system as a function of time. Obviously the closest states are just the selected ones, but this does not mean that the actual states (evolving with time) are identical with the selected ones over the whole duration of the plateau; as mentioned before, the distance to the selected states vanishes only for roughly half of the period as it is seen from Fig.~\ref{fig3}b. The width of the peaks in the distance from the closest states can be tuned by the coupling parameter $K$, large $K$ accelerates the convergence to a new pattern, once the time-dependent couplings are changed; also the strength of the couplings, parameterized by $\alpha$, determines the speed of convergence. \begin{figure}[!ht] \begin{center} \includegraphics[width=1.0\columnwidth]{ortmanns_fig3.eps} \end{center} \caption{Sequential pattern retrieval of a sequence $s_1$ = $\{ \xi^{672}$, $\xi^{0}$, $\xi^{942}$, $\xi^{477}$, $\xi^{1023} \}$. (a) Energy $L(t)$ as function of time. (b) Euclidean distance of the actual state of the system to the closest state (corresponding to patterns on the corners of the hypercube). It vanishes for roughly half of the period $B$, so that the system then has retrieved the desired state. (c) Closest states to the current evolving state as function of time. As seen from the figure, the closest states themselves vary with time. The set of closest states agrees with the set of selected states. The states carry their decimal labels.} \label{fig3} \end{figure} \begin{figure}[!ht] \begin{center} \includegraphics[width=1.0\columnwidth]{ortmanns_fig4.eps} \end{center} \caption{Sequential pattern retrieval of different sequences with the same states. (a) Time evolution of the sequence $s_2$ = $\{ \xi^{0}$, $\xi^{1023}$, $\xi^{942}$, $\xi^{672}$, $\xi^{477} \}$. (b) Time evolution of the sequence $s_3$ = $\{ \xi^{942}$, $\xi^{1023}$, $\xi^{0}$, $\xi^{477}$, $\xi^{672} \}$. (c) Time evolution of the sequence $s_4$ = $\{ \xi^{1023}$, $\xi^{672}$, $\xi^{942}$, $\xi^{0}$, $\xi^{477} \}$. In all cases we show only the states which are closest to the current evolving state. For roughly half a period the distance between the closest state and the actual state of the system vanishes, which is interpreted as pattern retrieval.} \label{fig4} \end{figure} In Fig.~\ref{fig4} we show three permutations of the same set of the five stored patterns. Indicated are the five plateaus in time where a certain pattern remains the closest to the current state and where this pattern agrees with the system's state over roughly half of the period (the analogous figures to Fig.~\ref{fig3}b are not displayed here). It should be noticed that a change in the pattern sequence from Fig.~\ref{fig4}a to \ref{fig4}b and \ref{fig4}c only amounts to reorder the phase shifts $\psi_r$, no other change of the system's structure is needed. \section{Conclusions}\label{sec3} We have designed a system of phase oscillators that is able to produce periodic sequences of patterns. Patterns are stored in the couplings of the system and retrieved and encoded as phase differences. Due to the task division between the pacemaker, the stored patterns and the retrieval network, the system is very flexible and robust. Different sequences of the stored patterns can be implemented without modifying the system's structure. The retrieval network itself operates in a robust way since it has by construction only one minimum, therefore the dynamics converges to the desired pattern independently of the initial conditions. Our device may be regarded as a very stylized version of the central pattern generator of the leeches' heartbeat. According to Hooper \cite{Hooper}, the central pattern generator of the leech heartbeat can be divided into two sets, the rhythm generator (corresponding to our pacemaker), and the pattern generator (corresponding to our retrieval network). The pattern generator there generates the actual motor pattern in response to the driving input from the rhythm generator (in our case in response to the driving input of time-dependent couplings from the pacemaker). \section{Acknowledgments} We would like to thank Alexander S. Mikhailov very much for valuable discussions. \section{Appendix} Consider the energy function for an all-to-all coupled system of $N$ phase oscillators: \begin{equation}\label{aeq1} L=-\frac{K}{4N}\sum_{ij, i\not=j}^N (\cos(\Phi_j-\Phi_i) -f_{ij})^2. \end{equation} From now on we set the coupling strength $K=1$. Let the couplings be chosen according to % \begin{equation}\label{aeq4} f_{ij}(\alpha, s)=\alpha \bigg(\frac{2}{\pi}\vert\xi_i^s-\xi_j^s\vert-1\bigg)\; \textrm{for} \;i,j \in \{ 1,...,N, i \not= j \} \end{equation} % with $\Delta\Phi_{ij}=x_i-x_j$, $\xi_N^s=x_N\equiv 0$, $\alpha$ any real number with $\alpha>1$ and $s$ the index of the selected pattern. We now prove a sufficient condition that the Hessian matrix with respect to the $N-1$ independent phase differences is positive definite for the selected pattern and not positive definite for all other $2^{N-1}-1$ patterns, provided we choose the external fields $f_{ij}$ according to Eq.\ref{eq4}. From the first derivative $\partial L/\partial x_{i}$ we immediately see that candidates for extrema are $x_{i}\in \{0,\pi\}$, where $x_i$ was defined as $\Delta\Phi_{iN}=\Phi_i-\Phi_N$, while for $f_{iN}>1$ the individual cos-dependent terms are different from zero. For a particular given choice of $f_{ij}, f_{iN}$ with possibly alternating signs the first derivatives can vanish also at intermediate values of $x_{i}$ which we project on $[0,2\pi[$ that can lead to further extrema. This part we treat numerically in order to exclude that these extrema compete with the selected minimum that shall be retrieved. Next let us consider the Hessian of $L$ as function of the phase differences. Apart from the normalization factor, its diagonal elements are given as: \begin{eqnarray} \frac{\partial^2 L}{\partial x_{k}^2} &=& -\sin^2(x_{k}) + \cos(x_{k})\big( \cos(x_{k}) - f_{kN} \big) \nonumber \\ &-& \sum_{j=1, j \ne k}^{N-1} \bigg( \sin^2(x_{k} - x_{j}) - \cos(x_{k} - x_{j})\\ \nonumber &\cdot& \big( \cos(x_{k} - x_{j}) - f_{jk} \big) \bigg)\;, \label{equ_full_deri_22} \end{eqnarray} its off-diagonal elements are: \begin{equation} \frac{\partial^2 L}{\partial x_{k} \partial x_{l}} = \sin^2(x_{k} - x_{l}) - \cos(x_{k} - x_{l})\big( \cos(x_{k} - x_{l}) - f_{lk} \big) . \label{equ_full_deri_21} \end{equation} For a choice of the external fields $f_{ij}$ according to the rule (\ref{eq4}), the matrix simplifies to \begin{equation} \frac{\partial^2 L}{\partial x_{k}^2} = -\Bigg\{ \mp 1 \big( \pm 1 - f_{kN} \big) + \sum_{j=1, j\not=k}^{N-1} \bigg( \mp 1 \big( \pm 1 - f_{jk} \big) \bigg)\Bigg\} \label{equ_full_deri_22_corner} \end{equation} for the diagonal elements and $k=1,...,N-1$, and to \begin{equation} \frac{\partial^2 L}{\partial x_{k} \partial x_{l}} = \mp 1 \big( \pm 1 - f_{lk} \big) \label{equ_full_deri_21_corner} \end{equation} for the off-diagonal elements, with $k,l\in\{1,...,N-1\}, k\not=l$. The upper (lower) sign in front of the the bracket with $f_{lk}$ stands for the case that the difference of components $\vert x_{k}^s-x_{l}^s\vert$, read off from the selected configuration $\vec{\xi}^s$, is zero ($\pi$), respectively. \noindent Next we study the positive definiteness of this matrix for all $2^{N-1}$ configurations which may be selected for retrieval. Here it is convenient to classify the configurations in terms of their Hamming distance from the selected pattern, i.e. the number of mismatches of components between $\vec{\xi}$ and $\vec{\xi}^{s}$, which varies between zero and $N-1$. By a suitable permutation of the coordinate axis in configuration space we can always achieve that the k mismatches occur in the first k coordinates of $\xi$ so that the corresponding Hessian $H$ is chosen as representative for all patterns with k mismatches.\\\\ \noindent {\bf H with no mismatches} According to our choice of $f_{ij}$, their signs are opposite to those of $\cos(\Delta\Phi_{lk}^s)$, that is $\Delta\Phi_{lk}^s=0$ or $(\pi)$, so that $\cos\Delta\Phi_{lk}^s=1$ or $(-1)$ and $f_{lk}=-\alpha$ (or $+\alpha$), $\alpha>1$, respectively. The diagonal elements then simplify to $\partial^2L/\partial x_{k}^2=(N-1)(1+\alpha)$, the off-diagonal elements to $\partial^2L/\partial x_{k}\partial x_{l}=-(1+\alpha)$. The Hessian therefore takes the form of an $(N-1)\times(N-1)$ dimensional circulant matrix, whose eigenvalues turn out to be $\lambda_1=1+\alpha$ with multiplicity 1 and $\lambda_2=N(1+\alpha)$ with multiplicity (N-2). (Here we have used the following: Eigenvalues of an $n\times n$ circulant matrix, specified by the vector $(c_0,c_1,...,c_{n-1})$, are known to be given as $c_j^\prime=\sum_{k=0}^{n-1}e^{2\pi ijk/n}c_k$ with $j=0,-1,-2,...,-(n-1)$. In our case the Hessian has a particularly simple form, for which one element in each row is $(N-1)(1+\alpha)$, while all other $N-2$ elements are $-(1+\alpha)$. Using these values and the fact that the sum over all n roots of the unit circle adds up to zero leads to our results for the eigenvalues.) Now, since for $\alpha>1$ all eigenvalues are positive, the selected configuration corresponds to a local minimum in configuration space, whatever pattern has been chosen for retrieval. (In order to have only a local minimum at the selected configuration, obviously $\alpha>-1$ would be sufficient, but at the same time, the other patterns should become saddles or local maxima, and in view of that we shall need $\alpha>1$, see below.)\\\\ \noindent {\bf H with one mismatch} Next we evaluate the Hessian for a configuration that differs from the selected pattern in a single phase difference. Without loss of generality we assume the mismatch to happen in the first coordinate, affecting the Hessian in the first column and the first row according to $H_{11}=(N-1)(1-\alpha)$, $H_{1j}=(\alpha-1)=H_{j1}$ for $j=2,...,N-1$, while the remaining $(N-2)(N-2)$ submatrix remains circulant. The Sylvester criterion, applied to the positive definiteness of the overall $(N-1)\times(N-1)$ matrix, is now violated due to the first element $H_{11}=(N-1)(1-\alpha)<0$ for $\alpha>1$, so that the configuration with one mismatch is no longer a local minimum of the energy function $L$. (As necessary and sufficient condition for a Hermitian matrix to be positive definite, the Sylvester criterion requires that all leading principal minors of the matrix are positive.)\\\\ \noindent{\bf H with $k>1$ mismatches} Now the configuration has $k$ mismatches with the selected configuration which we arrange to occur in the first $k$ coordinates. Here it should be noticed that $f_{iN}$ will have the ``wrong" sign with respect to $\Delta\Phi_{iN}$, $i=1,..,k$, but $f_{il}$ will have the ``right" sign with respect to $\Delta\Phi_{il}$ for $i,l\in\{1,...,k\}$, since two mismatches compensate in the relative phase differences (``wrong" (or ``right") refer to the feature which prevents (or ensures) the property of becoming a local minimum, respectively.) This explains why the components of the $k\times k$ submatrix $S_k(H)$ in the upper left corner of the Hessian are given by \begin{equation} S_k(ii)=(N-1) - \alpha(N-2k+1),\;\; i=1,...,k \end{equation} for the diagonal elements and \begin{equation} S_k(ij)= -(1+\alpha)\;\; i,j=1,...,k,\;,i\not=j \end{equation} for the off-diagonal elements. The submatrix $S_k(H)$ is again circulant and has eigenvalues $\lambda_1=(N-k)(1-\alpha)$ with multiplicity 1 and $\lambda_2=N-\alpha(N-2k)$ with multiplicity $k-1$, so that the determinant of this submatrix reads $\vert S_k(H)\vert=\lambda_1\lambda_2^{k-1}$. Now we have to distinguish the following cases:\\ \noindent 1. k odd. For k odd, $\lambda_2^{k-1}$ is always positive while $\lambda_1<0$ for $\alpha>1$, so that $\vert S_k(H)\vert<0$ for odd k and $\alpha>1$ and the Sylvester criterion for $H$ being positive definite is violated as it should be for any positive number of mismatches.\\ \noindent 2. k even. For k even, both eigenvalues may be negative so that $\vert S_k(H)\vert>0$. In order to see that the Sylvester criterion is still violated, we have to distinguish the following cases: \\ \noindent (i) For $\alpha>1$ and $k>N/2$ we have $\lambda_1<0$ and $\lambda_2>0$, so that the Sylvester criterion is violated. \\ \noindent(ii) For $\alpha>1$ and $k<N/2$, $\lambda_2<0$ for $\alpha>N/(N-2k)$, so that $\vert S_k(H)\vert<0$ only for $1<\alpha<N/(N-2k)$.\\ \noindent (iii) To finally see what happens for $\alpha>1$ and $\alpha>N/(N-2k)$ let us consider the determinant of the submatrix of size $l=k-1$ in the upper left corner of H. This matrix has eigenvalues $\sigma_1=(N-k+1)-\alpha(N-k-1)$ and $\sigma_2=N-\alpha(N-2k)$ with even algebraic multiplicity $(k-2)$, so that again the sign of $\lambda_1$ determines the sign of this subdeterminant. Now $\sigma_1<0$ for $1<\frac{N-k+1}{N-k-1}<\alpha$, but this is certainly satisfied, since in the considered case $k\geq 2$ and $\alpha$ was even larger than $N/(N-2k)$ by assumption. So this $(k-1)\times(k-1)$-dimensional subdeterminant violates the Sylvester criterion for H to be positive definite. In particular, for the maximal number of mismatches $k=N-1$, $\lambda_1=1-\alpha<0$ for $\alpha>1$ and $\lambda_2=N+\alpha(N-2)>0$ for $N>2$, and for $N=2$, $\lambda_2^{k-1}=\lambda_2^{N-2}=1>0$, so that the corresponding pattern again ceases to be a local minimum of the energy function.
1,108,101,565,914
arxiv
\section{Introduction} In the recent survey \cite{M} on hyperbolic $4$--manifolds, Martelli asks whether one can find a cusped hyperbolic $4$--manifold with non--vanishing signature (see \cite[Section 4]{M} for other open questions). The main purpose of this note is to prove the following: \begin{thm} \label{thm:signature_spectrum} Every integer is the signature of a cusped hyperbolic $4$--man\-i\-fold. \end{thm} All manifolds in the paper are assumed connected and oriented unless otherwise stated. Hyperbolic manifolds are understood to be complete and of finite volume. Non--compact hyperbolic manifolds are called \emph{cusped}. Closed hyperbolic $4$--manifolds have vanishing signature, while for cusped manifolds this property holds virtually (in a strong sense, see Corollary \ref{cor:virtually_0}). The latter fact follows from a result of Long and Reid \cite{LR} which plays an important role in this paper. It is worth noting that for cusped manifolds the signature is not necessarily multiplicative under finite coverings. As a byproduct of our construction, we obtain some results on the ``geography problem'' for cusped hyperbolic $4$--manifolds. The latter asks about realising a given pair of of integers as the Euler characteristic $\chi(M)$ and signature $\sigma(M)$ of a cusped hyperbolic $4$--manifold $M$. Indeed, Theorem \ref{thm:signature_spectrum} is a consequence of the following: \begin{thm}\label{thm:main} For every pair of positive integers $m, n$ with $m$ odd, there exists a cusped hyperbolic $4$--manifold $M$ with $\sigma(M) = \pm n$ and $\chi(M) = mn$. \end{thm} Recall that hyperbolic $4$--manifolds have positive Euler characteristic by the generalised Gau\ss--Bonnet formula. \subsection{Consequences and questions} As shown by Ratcliffe and Tschantz \cite{RT}, every positive integer is realised as $\chi(M)$ for some cusped hyperbolic $4$--manifold $M$. However, all the manifolds constructed in \cite{RT} (as well as their covers) happen to have vanishing signature, as well as any other cusped hyperbolic $4$--manifold that we could find in the literature. Combining this fact (or Corollary \ref{cor:virtually_0}) with Theorem \ref{thm:main}, we obtain that for every integer $n$ there exists a cusped hyperbolic $4$--manifold $M$ with $\sigma(M) = n$ and $\chi(M)$ arbitrarily big. On the other hand (Proposition \ref{prop:universal-constant}), every cusped hyperbolic $4$--manifold $M$ satisfies $$\chi(M) > 0.03493 \cdot |\sigma(M)|.$$ It thus seems reasonable trying to minimise $\chi$ for any fixed value of $|\sigma|$. For $\sigma = 0$, the minimum possible $\chi = 1$ is realised by \cite{RT}. Theorem \ref{thm:main} implies the following fact. \begin{cor} \label{cor:chi=sigma} For every positive integer $n$, there exists a cusped hyperbolic $4$--manifold with $\chi(M) = \sigma(M) = n$. \end{cor} We shall call the quantity $$\alpha(M) = \sigma(M)/\chi(M)$$ \textit{the slope} of $M$. By the above inequality, the slope of a cusped hyperbolic $4$--manifold is always bounded, and a natural ``geography'' problem is to determine which slopes can be realised. Theorem \ref{thm:main} gives a partial answer: \begin{cor}\label{cor:slopes} For every odd integer $m$, there exist infinitely many cusped hyperbolic $4$--manifolds with slope $1/m$. \end{cor} In particular, the maximum slope realised by our construction equals $1$. Note that a well--known conjecture dating back to Gromov's work on bounded cohomology \cite[\S 8.A4]{Gromov} states that every \emph{closed} aspherical $4$--manifold $M$ satisfies $|\alpha(M)| \leq 1$, which is known as Winkelnkemper's inequality \cite{JK}. In the setting of our work, the following questions arise naturally. \begin{quest} Can we describe the set of pairs $(\chi(M), \sigma(M))$ for all possible cusped hyperbolic $4$--manifolds $M$? In other words, what is the geography of cusped hyperbolic $4$--manifolds? \end{quest} \begin{quest} What is the maximum slope of a cusped hyperbolic $4$--manifold? \end{quest} \subsection{On the proof} Our proof of Theorem \ref{thm:main} is constructive and rather simple. We explicitly build a cusped hyperbolic $4$--manifold $M$ satisfying $\chi(M) = \sigma(M) = 1$ and such that for every $m, n \geq 1$ with $m$ odd, there exists an $mn$--sheeted covering $M_{m,n} \to M$ with $\sigma(M_{m,n}) = n$. An essential tool is an adaptation of the Atiyah--Patodi--Singer formula for cusped hyperbolic $4$--manifolds by Long and Reid (cf. Theorem \ref{thm:long-reid}), combined with the results of Ouyang \cite{O}, thanks to which the signature can be expressed only in terms of the oriented homeomorphism classes of the cusp sections. Similar to \cite{RT, RT2}, the manifold $M$ is obtained by gluing the sides of the ideal right-angled $24$--cell. In particular, $\chi(M) = 1$. The side pairing is performed in order to have one cusp with section a ``quarter twist'' flat $3$--manifold $F_4$, while the remaining cusps have $3$--torus sections. Then Long and Reid's signature formula gives $\sigma(M) = \pm1$. Moreover, there exist homomorphisms $\pi_1(M) \to \matZ$ that map to zero some generators of the parabolic subgroup of the $F_4$--cusp, in a way to get an $n$--sheeted cyclic covering $M_{n} \to M$ under which $\sigma$ is multiplicative, and an $m$--sheeted cyclic covering $M_{m,n} \to M_n$ under which $\sigma$ does not change. The latter condition is satisfied if $m$ is odd: in this case, the manifold $M_{m,n}$ has exactly $n$ cusps of type $F_4$, all coherently oriented, while the remaining ones have $3$--torus sections. This implies $\sigma(M_{m,n}) = \pm n$. As in \cite{RT2}, the starting point to find $M$ is an extensive computer search. Despite the fact that $M$ is produced by computer, it has a relatively simple structure and all the aforementioned properties can be verify by hand. However, finding such a starting manifold $M$ by hand is conceivably impossible within any reasonable time if one wants to search through all or a sufficiently large number of side pairings. We would like to stress the fact that the manifold $M$ used in our present construction does not appear to be special: we only concentrate on it because of its simple structure that makes our proofs verifiable by hand. Another construction of Tschantz's, the details of which are rather technical and impossible to verify without computer aid, gives on the order of $m^m$ commensurability classes of manifolds with $\chi \leq m$ for any fixed value of $\sigma$ and $m$ large enough (cf. \cite{BGLM, GL}). Presenting the details here, however, would obfuscate our main goal, that is proving Theorem \ref{thm:signature_spectrum} in a relatively simple way. \subsection{Structure of the paper} Some general facts on the geography of cusped hyperbolic $4$--manifolds are given in Section \ref{sec:preliminaries}. The proof of Theorem \ref{thm:main} follows in Section \ref{sec:proof}. \subsection*{Acknowledgements} The authors are grateful to Bruno~Martelli for suggesting a strategy that helped simplifying their previous proof of Theorem \ref{thm:signature_spectrum}. They would also like to thank John~Ratcliffe and Alan~Reid for showing interest in this work. \section{Preliminaries} \label{sec:preliminaries} Recall that all manifolds in this paper are connected and oriented, unless otherwise stated. All homology and cohomology groups are understood with integer coefficients. \subsection{Signature and geography} Let $X$ be a compact $4$--manifold with boundary, and let $[X, \partial X] \in H_4(X, \partial X) \cong \mathbb{Z}$ be its fundamental class. The cup product on $H^2(X, \partial X)$ defines the following symmetric bilinear form, called the \emph{intersection form} of $X$: \begin{equation*} H^2(X, \partial X) \times H^2(X, \partial X) \to \mathbb{Z}, \quad (\alpha, \beta) \mapsto \alpha \smile \beta \, ([X, \partial X]). \end{equation*} By the Poincar\'e--Lefschetz duality, the radical of the intersection form is the kernel of the natural map $H^2(X, \partial X) \to H^2(X)$. Let $q_+$ and $q_-$ be the positive and negative inertia indices of the associated quadratic form over $\matR$. Then the \emph{signature} of $X$ is defined as $$\sigma(X) = q_+ - q_{-} \in \matZ.$$ The notion of ``geography'' for $4$--manifolds appears to be classical and has been studied in different contexts by many authors (see \cite{Stipsicz} for a detailed survey). For $M$ a $4$--manifold, the \textit{geography map} can be defined as $M \mapsto (\chi(M),\, \sigma(M))$. The \textit{slope} of $M$ is $\alpha(M) = \sigma(M)/\chi(M)$. Note that in \cite{Stipsicz} the slope and geography map are defined for manifolds with complex structures and thus expressed via Chern numbers: we slightly modify the definitions in our setting. Here and below we shall be interested in the class of cusped hyperbolic $4$--manifolds, and the behaviour of the geography and slope maps on it. \subsection{Signature of hyperbolic $4$--manifolds} If $M$ is a closed hyperbolic $4$--manifold, then it follows from the Hirzebruch signature theorem theorem that $\sigma(M) = 0$, since by a theorem of Chern the first Pontryagin class vanishes \cite[Theorem 11.3.3]{R} (as it more generally does for locally conformally flat manifolds, cf. \cite{LR}). Let now $M$ be a cusped hyperbolic $4$--manifold. Since $M$ is homeomorphic to the interior of a compact $4$--manifold $X$ with boundary, then by the Poincar\'e--Lefschetz duality we have a well-defined intersection form on $H_2(M) \cong H^2(X, \partial X)$ and signature $\sigma(M) := \sigma(X)$. In \cite{LR} Long and Reid provided an adaptation of the Atiyah--Patodi--Singer formula \cite{APS} for cusped hyperbolic $4$--manifolds. \begin{thm}[Long--Reid] \label{thm:long-reid} Let $M$ be a hyperbolic $4$--manifold with $m$ cusps $C_1, \ldots, C_m$, and let $S_i$ be a horospherical section of $C_i$. Then \begin{equation*} \sigma(M) = - \sum_{i=1}^m \eta(S_i). \end{equation*} \end{thm} Here $\eta$ is the so-called \emph{eta invariant} of a closed oriented Riemannian $3$--mani\-fold, see \cite{APS, LR}. Same as the signature, $\eta$ changes its sign when the orientation of the manifold is reversed, and thus vanishes on achiral manifolds. Since cusped hyperbolic manifolds have virtually torus cusps \cite[Theorem 3.1]{MRS}, we have the following corollary. \begin{cor} \label{cor:virtually_0} For every cusped hyperbolic $4$--manifold $M$ there is a finite covering $M' \to M$ such that $\sigma(M'') = 0$ for every finite covering $M'' \to M'$. \end{cor} This shows that, in particular, constructing cusped hyperbolic manifolds with given signatures likely cannot be done by considering some sort of ``generic'' or ``random'' coverings of a given particular manifold. We shall concentrate on using cyclic coverings as in this case preserving the topological type of cusps in the covering is relatively easy. There are precisely six closed orientable flat $3$--manifolds up to homeomorphism \cite{HW}. We denote them in the order given by Hantzsche and Wendt \cite{HW} as follows: the $3$--torus $F_1$, the ``half-twist'' manifold $F_2$, the ``third-twist'' manifold $F_3$, the ``quarter-twist'' manifold $F_4$, the ``sixth-twist'' manifold $F_5$, and the Hantzsche--Wendt manifold $F_6$. For $i = 1, \ldots, 5$, the manifold $F_i$ is a mapping torus over $S^1 \times S^1$ with monodromy of order $1$, $2$, $3$, $4$, and $6$, respectively. As shown by Ouyang \cite{O}, the $\eta$--invariant of a flat $3$--manifold does not depend on the chosen flat metric, and thus represents a topological invariant for such manifolds. More precisely \cite{O}, we have: $$\eta(F_1) = \eta(F_2) = \eta(F_6) = 0,$$ $$\eta(F_3) = \pm \frac{2}{3},\quad \eta(F_5) = \pm \frac{4}{3},\quad \eta(F_4) = \pm1.$$ In particular, a hyperbolic $4$--manifold $M$ whose unique non--torus cusp has $F_4$ section has signature $\pm 1$. Moreover, $\sigma(\widetilde{M}) = \pm n$ for any $n$--sheeted regular covering $\widetilde{M} \to M = \widetilde{M} \slash G$ with $G < \ensuremath {\mathrm{Isom}}^+(\widetilde{M})$ acting transitively on the $F_4$--cusps of $\widetilde{M}$. \subsection{The slope is bounded} The previous facts together with the volume estimates of Kellerhals \cite{K} imply the following lower bound, that we believe however to be far from sharp. \begin{prop}\label{prop:universal-constant} Every cusped hyperbolic $4$--manifold $M$ satisfies $$|\alpha(M)| < 28.62869.$$ \end{prop} \begin{proof} From Formula (4.6) in \cite[Example 2]{K} we have $\mathrm{Vol}(M) > 0.61293 \cdot k$, where $k$ is the number of cusps of $M$. By Theorem \ref{thm:long-reid} and the values of $\eta(F_i)$ listed above, we have $\frac{4}{3} \cdot k \geq |\sigma(M)|$. Then the claimed inequality follows by applying the Gau\ss--Bonnet formula $\mathrm{Vol}(M) = \frac{4 \pi^2}{3} \cdot \chi(M)$. \end{proof} \section{Proofs} \label{sec:proof} In the following subsection we prove Theorem \ref{thm:main}. The essential geometric construction used in our proof (Theorem \ref{thm:construction}) is postponed to a separate subsection. \subsection{The proof} A \emph{hyperbolic $24$--cell manifold} is a hyperbolic $4$--manifold which can be obtained by gluing isometrically in pairs the sides of an ideal right-angled $24$--cell. Such a manifold $M$ satisfies $\chi(M) = 1$. Recall that the fundamental group of a flat quarter-twist $3$--manifold $F_4$ is generated by two translations $t_1$ and $t_2$, and a rototranslation $a$ whose rotational part has order $4$. Note that $t_1$, $t_2$ and $t_3 = a^4$ generate the translation lattice of $F_4$. Given a hyperbolic $4$--manifold $M$ with a cusp $C$ of type $F_4$, let $\pi_1(C) = \langle t_1, t_2, a \rangle < \pi_1(M)$ denote the corresponding parabolic subgroup. In the next subsection we shall prove the following theorem, that is the cornerstone of our construction. \begin{thm} \label{thm:construction} There exist an orientable hyperbolic $24$--cell manifold $M$ with one cusp $C$ of type $F_4$ and all the other cusps of type $F_1$, and two surjective homomorphisms $h, v \colon \pi_1(M) \to \matZ$ such that $\pi_1(C) = \langle t_1, t_2, a \rangle \subset \ker(h)$, while $v(t_1) = v(t_2) = 0$ and $v(a) = 1$. \end{thm} We are ready to prove Theorem \ref{thm:main} assuming Theorem \ref{thm:construction}. Since $\eta(F_4) = \pm 1$ and $\eta(F_1) = 0$, by Theorem \ref{thm:long-reid} $\sigma(M) = 1$ up to reversing the orientation on $M$. For $n \geq 1$, let $p_n \colon \matZ \to \matZ / n \matZ$ be the reduction mod $n$, and $M_n \to M$ be the cyclic $n$--sheeted covering associated to $h_n := p_n \circ h$. Since $\pi_1(C) \subset \ker(h_n)$, the manifold $M_n$ has $n$ cusps of type $F_4$. Moreover, there is an orientation-preserving isometry of $M_n$ that is cyclically permuting said cusps. All the remaining cusps of $M_n$ have type $F_1$. Thus $\sigma(M_n) = n$ by Theorem \ref{thm:long-reid}, and $\chi(M_n) = n \cdot \chi(M) = n$. Now, fix $m, n \geq 1$ and let $M_{m,n} \to M_n$ be the cyclic $m$--sheeted covering associated to the restriction $v_{m,n}$ of $p_m \circ v$ to $\ker(h_n)$. Since $v_{m,n}(t_1) = v_{m,n}(t_2) = 0$ and $v_{m,n}(a) = 1$, the subgroup $\pi_1(C) \cap \ker(v_{m,n})$ of $\pi_1(M_{m,n})$ is generated by $t_1$, $t_2$ and $a^m$, and does not contain $a^k$ for any positive $k < m$. If $m$ is odd (or, equivalently, $m \equiv \pm 1 \mod 4$), the associated $n$ cusps of $M_{m,n}$ have type $F_4$. As before, the latter are coherently oriented, and all the remaining cusps are of type $F_1$. Thus $\sigma(M_{m,n}) = n$ by Theorem \ref{thm:long-reid}, and $\chi(M_{m,n}) = m \cdot \chi(M_n) = mn$. The proof of Theorem \ref{thm:main} is thus complete assuming Theorem \ref{thm:construction}. \subsection{The construction} We prove here Theorem \ref{thm:construction}. The manifold $M$ is specified by the side pairing of a regular ideal $24$--cell, with vertices given in Table \ref{tab1}, by the correspondence between the vertices of paired sides provided in Table \ref{tab2}. We refer the reader to \cite[Section 11.1]{R} for more details on how to build hyperbolic $24$--cell manifolds by using Poincar\'{e}'s fundamental polytope theorem. \begin{figure} \centering \includegraphics[scale=.35]{3-torus1.pdf} \caption{\footnotesize A fundamental domain for a horosection of one of the two $3$--torus cusps of $M$ in its universal cover, tessellated by $8$ unit cubes. A cube with label $i$ corresponds to the vertex $v_i$ of the $24$--cell. A vertex of cube $i$ has label $j$ if the corresponding edge of the $24$--cell joins $v_i$ and $v_j$. The resulting Euclidean lattice is generated by the translations along $(4,0,0)$, $(0,2,0)$ and $(0,2,-1)$.} \label{fig:3-torus1} \end{figure} \begin{figure} \centering \includegraphics[scale=.35]{3-torus2.pdf} \caption{\footnotesize A fundamental domain for a horosection of the other $3$--torus cusp of $M$ in its universal cover. The resulting Euclidean lattice is generated by the translations along $(4,0,0)$, $(4,0,-1)$ and $(2,2,0)$.} \label{fig:3-torus2} \end{figure} Alternatively, we can give the side pairing by specifying the matrices for each side pairing map. Since side $1$ is paired to side $2$, the side pairing map that carries side $2$ to side $1$ is the inverse of that for the map from side $1$ to side $2$, similarly for sides $3$ and $4$, etc. The matrices $g_i$ for sides $1, 3, 5, 6, 7, 8, 9, 10, 11, 12, 21$, and $22$ as given in Table \ref{tab3}. These twelve matrices generate the fundamental group of $M$ and the defining relations of this group are determined from the ridge cycles as shown in Table \ref{tab4}. There are $96$ ridges in cycles of length $4$, so there are $24$ defining relations. The manifold $M$ is orientable with homology groups \begin{equation*} H_0(M)=\mathbb{Z},\ H_1(M)=\mathbb{Z}^3,\ H_2(M)=\mathbb{Z}^5,\ H_3(M)=\mathbb{Z}^2,\ H_4(M)=0. \end{equation*} Moreover, $M$ has three cusps: two of type $F_1$, and the other of type $F_4$. In order to facilitate a manual verification, the gluing of the $24$ cubes (the vertex links of the $24$--cell) producing the respective cusp sections are provided in Figures \ref{fig:3-torus1}, \ref{fig:3-torus2} and \ref{fig:quarter-twist}. \begin{figure} \centering \includegraphics[scale=.35]{quarter-twist.pdf} \caption{\footnotesize A fundamental domain for a horosection of the $F_4$--cusp $C$ of $M$ in its universal cover. The resulting Euclidean lattice is generated by the translations $t_1$ and $t_2$ along $(1,1,0)$ and $(-1,1,0)$, and the rototranslation $a$ along $(0,0,1)$ whose rotational part has vertical axis through the center of cube $21$ (see also Figure \ref{fig:square_torus}).} \label{fig:quarter-twist} \end{figure} \begin{figure} \centering \includegraphics[scale=.5]{square_torus.pdf} \caption{\footnotesize A portion of a horizontal slice of the universal cover of a horosection of the $F_4$--cusp $C$ of $M$. The labels agree with the vertex indices of the $24$--cell. The shaded square is a fundamental domain for the translation group $\langle t_1, t_2 \rangle$. The ``quarter-turn'' action of $a$ in the direction orthogonal to the slice is shown by arrows in the center.} \label{fig:square_torus} \end{figure} The cusp $C$ of type $F_4$ is the link of the vertex cycle consisting of vertices $v_{17}$, $v_{18}$, $v_{19}$, $v_{20}$, $v_{21}$, $v_{22}$, $v_{23}$, and $v_{24}$. The flat $3$--manifold for the link of $C$ is realised as a gluing of $8$ cubes, giving a presentation for $\pi_1(C)$ with $24$ generators having $7$ singleton and $24$ length four defining relations (such a presentation can certainly be simplified considerably, although we do not need to do this). The $8$ cubes glue together in a pattern corresponding to a square $2$--torus tiled by $8$ squares with sides at $\frac{\pi}{4}$ angle to the axes of the torus as in Figure \ref{fig:square_torus}, multiplied by an interval in order to produce $8$ cubes. The top and bottom tori are identified with a $\frac{\pi}{2}$ twist of the square torus, which is exactly the gluing pattern for the $F_4$ manifold. Extracting the words in the generators of $\pi_1(M)$ giving parabolic elements stabilising vertex $v_{21}$ produces the generators for $\pi_1(C)$, see the accompanying Tables \ref{tab5} and \ref{tab6}. The two homomorphisms $h$ and $v$ are defined in Table \ref{tab7}. The proof of Theorem \ref{thm:construction} is now complete. \begin{table}[h] $$ \begin{array}{ll} v_{1} = \left(-\frac{1}{2},-\frac{1}{2},-\frac{1}{2},-\frac{1}{2},1\right) & v_{2} = \left(-\frac{1}{2},-\frac{1}{2},-\frac{1}{2},\frac{1}{2},1\right) \\[5pt] v_{3} = \left(-\frac{1}{2},-\frac{1}{2},\frac{1}{2},-\frac{1}{2},1\right) & v_{4} = \left(-\frac{1}{2},-\frac{1}{2},\frac{1}{2},\frac{1}{2},1\right) \\[5pt] v_{5} = \left(-\frac{1}{2},\frac{1}{2},-\frac{1}{2},-\frac{1}{2},1\right) & v_{6} = \left(-\frac{1}{2},\frac{1}{2},-\frac{1}{2},\frac{1}{2},1\right) \\[5pt] v_{7} = \left(-\frac{1}{2},\frac{1}{2},\frac{1}{2},-\frac{1}{2},1\right) & v_{8} = \left(-\frac{1}{2},\frac{1}{2},\frac{1}{2},\frac{1}{2},1\right) \\[5pt] v_{9} = \left(\frac{1}{2},-\frac{1}{2},-\frac{1}{2},-\frac{1}{2},1\right) & v_{10} = \left(\frac{1}{2},-\frac{1}{2},-\frac{1}{2},\frac{1}{2},1\right) \\[5pt] v_{11} = \left(\frac{1}{2},-\frac{1}{2},\frac{1}{2},-\frac{1}{2},1\right) & v_{12} = \left(\frac{1}{2},-\frac{1}{2},\frac{1}{2},\frac{1}{2},1\right) \\[5pt] v_{13} = \left(\frac{1}{2},\frac{1}{2},-\frac{1}{2},-\frac{1}{2},1\right) & v_{14} = \left(\frac{1}{2},\frac{1}{2},-\frac{1}{2},\frac{1}{2},1\right) \\[5pt] v_{15} = \left(\frac{1}{2},\frac{1}{2},\frac{1}{2},-\frac{1}{2},1\right) & v_{16} = \left(\frac{1}{2},\frac{1}{2},\frac{1}{2},\frac{1}{2},1\right) \\[5pt] v_{17} = (1,0,0,0,1) & v_{18} = (-1,0,0,0,1) \\[5pt] v_{19} = (0,1,0,0,1) & v_{20} = (0,-1,0,0,1) \\[5pt] v_{21} = (0,0,1,0,1) & v_{22} = (0,0,-1,0,1) \\[5pt] v_{23} = (0,0,0,1,1) & v_{24} = (0,0,0,-1,1) \\[5pt] \end{array} $$ \caption{\footnotesize The vertices of an ideal regular $24$--cell in the hyperboloid model of hyperbolic $4$--space.}\label{tab1} \end{table} \begin{table}[ht] \vspace{1.75in} $$ \begin{array}{ccll} \vspace{.3cm} \text{From side} & \text{To side} & \text{Vertex map}\\ 1 & 2 & (13,14,15,16,17,19)&\mapsto\ \ (7,8,5,6,19,18) \\ 2 & 1 & (5,6,7,8,18,19)&\mapsto\ \ (15,16,13,14,19,17) \\ 3 & 4 & (9,10,11,12,17,20)&\mapsto\ \ (3,4,1,2,20,18) \\ 4 & 3 & (1,2,3,4,18,20)&\mapsto\ \ (11,12,9,10,20,17) \\ 5 & 14 & (11,12,15,16,17,21)&\mapsto\ \ (6,2,8,4,23,18) \\ 6 & 13 & (3,4,7,8,18,21)&\mapsto\ \ (12,16,10,14,23,17) \\ 7 & 16 & (9,10,13,14,17,22)&\mapsto\ \ (3,7,1,5,24,18) \\ 8 & 15 & (1,2,5,6,18,22)&\mapsto\ \ (13,9,15,11,24,17) \\ 9 & 18 & (7,8,15,16,19,21)&\mapsto\ \ (10,2,12,4,23,20) \\ 10 & 17 & (3,4,11,12,20,21)&\mapsto\ \ (8,16,6,14,23,19) \\ 11 & 20 & (5,6,13,14,19,22)&\mapsto\ \ (3,11,1,9,24,20) \\ 12 & 19 & (1,2,9,10,20,22)&\mapsto\ \ (13,5,15,7,24,19) \\ 13 & 6 & (10,12,14,16,17,23)&\mapsto\ \ (7,3,8,4,21,18) \\ 14 & 5 & (2,4,6,8,18,23)&\mapsto\ \ (12,16,11,15,21,17) \\ 15 & 8 & (9,11,13,15,17,24)&\mapsto\ \ (2,6,1,5,22,18) \\ 16 & 7 & (1,3,5,7,18,24)&\mapsto\ \ (13,9,14,10,22,17) \\ 17 & 10 & (6,8,14,16,19,23)&\mapsto\ \ (11,3,12,4,21,20) \\ 18 & 9 & (2,4,10,12,20,23)&\mapsto\ \ (8,16,7,15,21,19) \\ 19 & 12 & (5,7,13,15,19,24)&\mapsto\ \ (2,10,1,9,22,20) \\ 20 & 11 & (1,3,9,11,20,24)&\mapsto\ \ (13,5,14,6,22,19) \\ 21 & 23 & (4,8,12,16,21,23)&\mapsto\ \ (11,3,15,7,21,24) \\ 22 & 24 & (2,6,10,14,22,23)&\mapsto\ \ (5,13,1,9,22,24) \\ 23 & 21 & (3,7,11,15,21,24)&\mapsto\ \ (8,16,4,12,21,23) \\ 24 & 22 & (1,5,9,13,22,24)&\mapsto\ \ (10,2,14,6,22,23) \\[5pt] \end{array} $$ \caption{\footnotesize Side pairings defining $M$.}\label{tab2} \end{table} \begin{table} $$ \begin{array}{ll} g_1 = \left( \begin{array}{ccccc} 2 & 1 & 0 & 0 & -2 \\ -1 & -2 & 0 & 0 & 2 \\ 0 & 0 & -1 & 0 & 0 \\ 0 & 0 & 0 & 1 & 0 \\ -2 & -2 & 0 & 0 & 3 \\ \end{array} \right) & g_3 = \left( \begin{array}{ccccc} 2 & -1 & 0 & 0 & -2 \\ 1 & -2 & 0 & 0 & -2 \\ 0 & 0 & -1 & 0 & 0 \\ 0 & 0 & 0 & 1 & 0 \\ -2 & 2 & 0 & 0 & 3 \\ \end{array} \right) \\[30pt] g_5 = \left( \begin{array}{ccccc} 2 & 0 & 1 & 0 & -2 \\ 0 & 0 & 0 & -1 & 0 \\ 0 & 1 & 0 & 0 & 0 \\ -1 & 0 & -2 & 0 & 2 \\ -2 & 0 & -2 & 0 & 3 \\ \end{array} \right) & g_6 = \left( \begin{array}{ccccc} 2 & 0 & -1 & 0 & 2 \\ 0 & 0 & 0 & 1 & 0 \\ 0 & -1 & 0 & 0 & 0 \\ 1 & 0 & -2 & 0 & 2 \\ 2 & 0 & -2 & 0 & 3 \\ \end{array} \right) \\[30pt] g_7 = \left( \begin{array}{ccccc} 2 & 0 & -1 & 0 & -2 \\ 0 & 0 & 0 & 1 & 0 \\ 0 & -1 & 0 & 0 & 0 \\ 1 & 0 & -2 & 0 & -2 \\ -2 & 0 & 2 & 0 & 3 \\ \end{array} \right) & g_8 = \left( \begin{array}{ccccc} 2 & 0 & 1 & 0 & 2 \\ 0 & 0 & 0 & -1 & 0 \\ 0 & 1 & 0 & 0 & 0 \\ -1 & 0 & -2 & 0 & -2 \\ 2 & 0 & 2 & 0 & 3 \\ \end{array} \right) \\[30pt] g_9 = \left( \begin{array}{ccccc} 0 & 0 & 0 & -1 & 0 \\ 0 & 2 & 1 & 0 & -2 \\ 1 & 0 & 0 & 0 & 0 \\ 0 & -1 & -2 & 0 & 2 \\ 0 & -2 & -2 & 0 & 3 \\ \end{array} \right) & g_{10} = \left( \begin{array}{ccccc} 0 & 0 & 0 & 1 & 0 \\ 0 & 2 & -1 & 0 & 2 \\ -1 & 0 & 0 & 0 & 0 \\ 0 & 1 & -2 & 0 & 2 \\ 0 & 2 & -2 & 0 & 3 \\ \end{array} \right) \\[30pt] g_{11} = \left( \begin{array}{ccccc} 0 & 0 & 0 & 1 & 0 \\ 0 & 2 & -1 & 0 & -2 \\ -1 & 0 & 0 & 0 & 0 \\ 0 & 1 & -2 & 0 & -2 \\ 0 & -2 & 2 & 0 & 3 \\ \end{array} \right) & g_{12} = \left( \begin{array}{ccccc} 0 & 0 & 0 & -1 & 0 \\ 0 & 2 & 1 & 0 & 2 \\ 1 & 0 & 0 & 0 & 0 \\ 0 & -1 & -2 & 0 & -2 \\ 0 & 2 & 2 & 0 & 3 \\ \end{array} \right) \\[30pt] g_{21} = \left( \begin{array}{ccccc} 0 & -1 & 0 & 0 & 0 \\ 1 & 0 & 0 & 0 & 0 \\ 0 & 0 & -1 & -2 & 2 \\ 0 & 0 & 2 & 1 & -2 \\ 0 & 0 & -2 & -2 & 3 \\ \end{array} \right) & g_{22} = \left( \begin{array}{ccccc} 0 & 1 & 0 & 0 & 0 \\ -1 & 0 & 0 & 0 & 0 \\ 0 & 0 & -1 & 2 & -2 \\ 0 & 0 & -2 & 1 & -2 \\ 0 & 0 & 2 & -2 & 3 \\ \end{array} \right) \\[30pt] \end{array} $$ \caption{\footnotesize Generators of $\pi_1(M)$ in $\ensuremath {\mathrm{SO}}(4,1)$.}\label{tab3} \end{table} \begin{table} $$ \begin{array}{llll} g_3g_{10}^{-1}g_{22}^{-1}g_8 & g_3g_5^{-1}g_{22}^{-1}g_{12} & g_7g_8g_{12}^{-1}g_{11}^{-1} & g_3g_{11}g_{22}^{-1}g_7^{-1} \\[5pt] g_3g_8g_{22}^{-1}g_{11}^{-1} & g_7g_{11}^{-1}g_{12}^{-1}g_8 & g_1g_8g_{22}g_{12}^{-1} & g_1g_7^{-1}g_{22}g_9 \\[5pt] g_1g_{12}g_{22}g_7^{-1} & g_1g_{11}^{-1}g_{22}g_6 & g_3g_9g_{21}g_5^{-1} & g_3g_6g_{21}g_9^{-1} \\[5pt] g_5g_6g_{10}^{-1}g_9^{-1} & g_5g_{10}^{-1}g_{11}^{-1}g_8 & g_6g_9^{-1}g_{12}^{-1}g_7 & g_3g_{12}^{-1}g_{21}g_6 \\[5pt] g_3g_7^{-1}g_{21}g_{10} & g_6g_7g_{12}^{-1}g_9^{-1} & g_5g_8g_{11}^{-1}g_{10}^{-1} & g_1g_6g_{21}^{-1}g_{10}^{-1} \\[5pt] g_1g_5^{-1}g_{21}^{-1}g_{11} & g_5g_9^{-1}g_{10}^{-1}g_6 & g_1g_{10}g_{21}^{-1}g_5^{-1} & g_1g_9^{-1}g_{21}^{-1}g_8 \\[5pt] \end{array} $$ \caption{\footnotesize Defining relations for $\pi_1(M)$.}\label{tab4} \end{table} \begin{table} $$ \begin{array}{ll} t_1 = g_5^{-1}g_8^{-1}g_{12}g_9= & \left( \begin{array}{ccccc} 1 & 0 & -4 & 0 & 4 \\ 0 & 1 & 4 & 0 & -4 \\ 4 & -4 & -15 & 0 & 16 \\ 0 & 0 & 0 & 1 & 0 \\ 4 & -4 & -16 & 0 & 17 \\ \end{array} \right) \\ t_2 = g_9^{-1}g_{10}^{-1}g_5g_6= & \left( \begin{array}{ccccc} 1 & 0 & -4 & 0 & 4 \\ 0 & 1 & -4 & 0 & 4 \\ 4 & 4 & -15 & 0 & 16 \\ 0 & 0 & 0 & 1 & 0 \\ 4 & 4 & -16 & 0 & 17 \\ \end{array} \right) \\ t_3 = g_{21}^4= & \left( \begin{array}{ccccc} 1 & 0 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 & 0 \\ 0 & 0 & -31 & -8 & 32 \\ 0 & 0 & 8 & 1 & -8 \\ 0 & 0 & -32 & -8 & 33 \\ \end{array} \right) \\ a = g_{21}= & \left( \begin{array}{ccccc} 0 & -1 & 0 & 0 & 0 \\ 1 & 0 & 0 & 0 & 0 \\ 0 & 0 & -1 & -2 & 2 \\ 0 & 0 & 2 & 1 & -2 \\ 0 & 0 & -2 & -2 & 3 \\ \end{array} \right) \\[5pt] \end{array} $$ \caption{\footnotesize Generators of $\pi_1(C)$ in $\ensuremath {\mathrm{SO}}(4,1)$.}\label{tab5} \end{table} \vspace{4cm} \begin{table} $$ \begin{array}{llll} a^4t_3^{-1} & at_1a^{-1}t_2^{-1} & at_2a^{-1}t_1 & at_3a^{-1}t_3^{-1} \\[5pt] t_1t_2t_1^{-1}t_2^{-1} & t_1t_3t_1^{-1}t_3^{-1} & t_2t_3t_2^{-1}t_3^{-1} &\\[5pt] \end{array} $$ \caption{\footnotesize Defining relations for $\pi_1(C)$.}\label{tab6} \end{table} \vspace{4cm} \begin{table} $$ \begin{array}{lllllllll} h(g_1) = 1\ & h(g_3) = 1\ & h(g_5) = 2\ & h(g_6) = 0 \\[5pt] h(g_7) = 2\ & h(g_8) = 0\ & h(g_9) = 1\ & h(g_{10}) = 1 \\[5pt] h(g_{11}) = 1\ & h(g_{12}) = 1\ & h(g_{21}) = 0\ & h(g_{22}) = 0 \\[10pt] v(g_1) = 2\ & v(g_3) = 0\ & v(g_5) = 2\ & v(g_6) = 0 \\[5pt] v(g_7) = 2\ & v(g_8) = 0\ & v(g_9) = 1\ & v(g_{10}) = 1 \\[5pt] v(g_{11}) = 1\ & v(g_{12}) = 1\ & v(g_{21}) = 1\ & v(g_{22}) = -1 \\[5pt] \end{array} $$ \caption{\footnotesize The two homomorphisms $h, v \colon \pi_1(M) \to \matZ$.}\label{tab7} \end{table} \FloatBarrier